]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/tcg-op.c
tcg: define tcg_init_ctx and make tcg_ctx a pointer
[mirror_qemu.git] / tcg / tcg-op.c
CommitLineData
951c6300
RH
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
757e725b 25#include "qemu/osdep.h"
33c11879
PB
26#include "qemu-common.h"
27#include "cpu.h"
63c91552 28#include "exec/exec-all.h"
951c6300
RH
29#include "tcg.h"
30#include "tcg-op.h"
b32dc337 31#include "tcg-mo.h"
dcdaadb6
LV
32#include "trace-tcg.h"
33#include "trace/mem.h"
951c6300 34
3a13c3f3
RH
35/* Reduce the number of ifdefs below. This assumes that all uses of
36 TCGV_HIGH and TCGV_LOW are properly protected by a conditional that
37 the compiler can eliminate. */
38#if TCG_TARGET_REG_BITS == 64
39extern TCGv_i32 TCGV_LOW_link_error(TCGv_i64);
40extern TCGv_i32 TCGV_HIGH_link_error(TCGv_i64);
41#define TCGV_LOW TCGV_LOW_link_error
42#define TCGV_HIGH TCGV_HIGH_link_error
43#endif
951c6300 44
c45cb8bb
RH
45/* Note that this is optimized for sequential allocation during translate.
46 Up to and including filling in the forward link immediately. We'll do
47 proper termination of the end of the list after we finish translation. */
48
b7e8b17a 49static inline TCGOp *tcg_emit_op(TCGOpcode opc)
c45cb8bb 50{
b1311c4a 51 TCGContext *ctx = tcg_ctx;
c45cb8bb
RH
52 int oi = ctx->gen_next_op_idx;
53 int ni = oi + 1;
54 int pi = oi - 1;
75e8b9b7 55 TCGOp *op = &ctx->gen_op_buf[oi];
c45cb8bb
RH
56
57 tcg_debug_assert(oi < OPC_BUF_SIZE);
dcb8e758 58 ctx->gen_op_buf[0].prev = oi;
c45cb8bb
RH
59 ctx->gen_next_op_idx = ni;
60
75e8b9b7
RH
61 memset(op, 0, offsetof(TCGOp, args));
62 op->opc = opc;
63 op->prev = pi;
64 op->next = ni;
65
66 return op;
c45cb8bb
RH
67}
68
b7e8b17a 69void tcg_gen_op1(TCGOpcode opc, TCGArg a1)
951c6300 70{
b7e8b17a 71 TCGOp *op = tcg_emit_op(opc);
75e8b9b7 72 op->args[0] = a1;
951c6300
RH
73}
74
b7e8b17a 75void tcg_gen_op2(TCGOpcode opc, TCGArg a1, TCGArg a2)
951c6300 76{
b7e8b17a 77 TCGOp *op = tcg_emit_op(opc);
75e8b9b7
RH
78 op->args[0] = a1;
79 op->args[1] = a2;
951c6300
RH
80}
81
b7e8b17a 82void tcg_gen_op3(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3)
951c6300 83{
b7e8b17a 84 TCGOp *op = tcg_emit_op(opc);
75e8b9b7
RH
85 op->args[0] = a1;
86 op->args[1] = a2;
87 op->args[2] = a3;
951c6300
RH
88}
89
b7e8b17a 90void tcg_gen_op4(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, TCGArg a4)
951c6300 91{
b7e8b17a 92 TCGOp *op = tcg_emit_op(opc);
75e8b9b7
RH
93 op->args[0] = a1;
94 op->args[1] = a2;
95 op->args[2] = a3;
96 op->args[3] = a4;
951c6300
RH
97}
98
b7e8b17a
RH
99void tcg_gen_op5(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3,
100 TCGArg a4, TCGArg a5)
951c6300 101{
b7e8b17a 102 TCGOp *op = tcg_emit_op(opc);
75e8b9b7
RH
103 op->args[0] = a1;
104 op->args[1] = a2;
105 op->args[2] = a3;
106 op->args[3] = a4;
107 op->args[4] = a5;
951c6300
RH
108}
109
b7e8b17a
RH
110void tcg_gen_op6(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3,
111 TCGArg a4, TCGArg a5, TCGArg a6)
951c6300 112{
b7e8b17a 113 TCGOp *op = tcg_emit_op(opc);
75e8b9b7
RH
114 op->args[0] = a1;
115 op->args[1] = a2;
116 op->args[2] = a3;
117 op->args[3] = a4;
118 op->args[4] = a5;
119 op->args[5] = a6;
951c6300
RH
120}
121
f65e19bc
PK
122void tcg_gen_mb(TCGBar mb_type)
123{
b1311c4a 124 if (tcg_ctx->tb_cflags & CF_PARALLEL) {
b7e8b17a 125 tcg_gen_op1(INDEX_op_mb, mb_type);
f65e19bc
PK
126 }
127}
128
951c6300
RH
129/* 32 bit ops */
130
131void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
132{
133 /* some cases can be optimized here */
134 if (arg2 == 0) {
135 tcg_gen_mov_i32(ret, arg1);
136 } else {
137 TCGv_i32 t0 = tcg_const_i32(arg2);
138 tcg_gen_add_i32(ret, arg1, t0);
139 tcg_temp_free_i32(t0);
140 }
141}
142
143void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2)
144{
145 if (arg1 == 0 && TCG_TARGET_HAS_neg_i32) {
146 /* Don't recurse with tcg_gen_neg_i32. */
147 tcg_gen_op2_i32(INDEX_op_neg_i32, ret, arg2);
148 } else {
149 TCGv_i32 t0 = tcg_const_i32(arg1);
150 tcg_gen_sub_i32(ret, t0, arg2);
151 tcg_temp_free_i32(t0);
152 }
153}
154
155void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
156{
157 /* some cases can be optimized here */
158 if (arg2 == 0) {
159 tcg_gen_mov_i32(ret, arg1);
160 } else {
161 TCGv_i32 t0 = tcg_const_i32(arg2);
162 tcg_gen_sub_i32(ret, arg1, t0);
163 tcg_temp_free_i32(t0);
164 }
165}
166
167void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
168{
169 TCGv_i32 t0;
170 /* Some cases can be optimized here. */
171 switch (arg2) {
172 case 0:
173 tcg_gen_movi_i32(ret, 0);
174 return;
175 case 0xffffffffu:
176 tcg_gen_mov_i32(ret, arg1);
177 return;
178 case 0xffu:
179 /* Don't recurse with tcg_gen_ext8u_i32. */
180 if (TCG_TARGET_HAS_ext8u_i32) {
181 tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg1);
182 return;
183 }
184 break;
185 case 0xffffu:
186 if (TCG_TARGET_HAS_ext16u_i32) {
187 tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg1);
188 return;
189 }
190 break;
191 }
192 t0 = tcg_const_i32(arg2);
193 tcg_gen_and_i32(ret, arg1, t0);
194 tcg_temp_free_i32(t0);
195}
196
197void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
198{
199 /* Some cases can be optimized here. */
200 if (arg2 == -1) {
201 tcg_gen_movi_i32(ret, -1);
202 } else if (arg2 == 0) {
203 tcg_gen_mov_i32(ret, arg1);
204 } else {
205 TCGv_i32 t0 = tcg_const_i32(arg2);
206 tcg_gen_or_i32(ret, arg1, t0);
207 tcg_temp_free_i32(t0);
208 }
209}
210
211void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
212{
213 /* Some cases can be optimized here. */
214 if (arg2 == 0) {
215 tcg_gen_mov_i32(ret, arg1);
216 } else if (arg2 == -1 && TCG_TARGET_HAS_not_i32) {
217 /* Don't recurse with tcg_gen_not_i32. */
218 tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg1);
219 } else {
220 TCGv_i32 t0 = tcg_const_i32(arg2);
221 tcg_gen_xor_i32(ret, arg1, t0);
222 tcg_temp_free_i32(t0);
223 }
224}
225
226void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
227{
228 tcg_debug_assert(arg2 < 32);
229 if (arg2 == 0) {
230 tcg_gen_mov_i32(ret, arg1);
231 } else {
232 TCGv_i32 t0 = tcg_const_i32(arg2);
233 tcg_gen_shl_i32(ret, arg1, t0);
234 tcg_temp_free_i32(t0);
235 }
236}
237
238void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
239{
240 tcg_debug_assert(arg2 < 32);
241 if (arg2 == 0) {
242 tcg_gen_mov_i32(ret, arg1);
243 } else {
244 TCGv_i32 t0 = tcg_const_i32(arg2);
245 tcg_gen_shr_i32(ret, arg1, t0);
246 tcg_temp_free_i32(t0);
247 }
248}
249
250void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
251{
252 tcg_debug_assert(arg2 < 32);
253 if (arg2 == 0) {
254 tcg_gen_mov_i32(ret, arg1);
255 } else {
256 TCGv_i32 t0 = tcg_const_i32(arg2);
257 tcg_gen_sar_i32(ret, arg1, t0);
258 tcg_temp_free_i32(t0);
259 }
260}
261
42a268c2 262void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, TCGLabel *l)
951c6300
RH
263{
264 if (cond == TCG_COND_ALWAYS) {
42a268c2 265 tcg_gen_br(l);
951c6300 266 } else if (cond != TCG_COND_NEVER) {
42a268c2 267 tcg_gen_op4ii_i32(INDEX_op_brcond_i32, arg1, arg2, cond, label_arg(l));
951c6300
RH
268 }
269}
270
42a268c2 271void tcg_gen_brcondi_i32(TCGCond cond, TCGv_i32 arg1, int32_t arg2, TCGLabel *l)
951c6300 272{
37ed3bf1
RH
273 if (cond == TCG_COND_ALWAYS) {
274 tcg_gen_br(l);
275 } else if (cond != TCG_COND_NEVER) {
276 TCGv_i32 t0 = tcg_const_i32(arg2);
277 tcg_gen_brcond_i32(cond, arg1, t0, l);
278 tcg_temp_free_i32(t0);
279 }
951c6300
RH
280}
281
282void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret,
283 TCGv_i32 arg1, TCGv_i32 arg2)
284{
285 if (cond == TCG_COND_ALWAYS) {
286 tcg_gen_movi_i32(ret, 1);
287 } else if (cond == TCG_COND_NEVER) {
288 tcg_gen_movi_i32(ret, 0);
289 } else {
290 tcg_gen_op4i_i32(INDEX_op_setcond_i32, ret, arg1, arg2, cond);
291 }
292}
293
294void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret,
295 TCGv_i32 arg1, int32_t arg2)
296{
297 TCGv_i32 t0 = tcg_const_i32(arg2);
298 tcg_gen_setcond_i32(cond, ret, arg1, t0);
299 tcg_temp_free_i32(t0);
300}
301
302void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
303{
304 TCGv_i32 t0 = tcg_const_i32(arg2);
305 tcg_gen_mul_i32(ret, arg1, t0);
306 tcg_temp_free_i32(t0);
307}
308
309void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
310{
311 if (TCG_TARGET_HAS_div_i32) {
312 tcg_gen_op3_i32(INDEX_op_div_i32, ret, arg1, arg2);
313 } else if (TCG_TARGET_HAS_div2_i32) {
314 TCGv_i32 t0 = tcg_temp_new_i32();
315 tcg_gen_sari_i32(t0, arg1, 31);
316 tcg_gen_op5_i32(INDEX_op_div2_i32, ret, t0, arg1, t0, arg2);
317 tcg_temp_free_i32(t0);
318 } else {
319 gen_helper_div_i32(ret, arg1, arg2);
320 }
321}
322
323void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
324{
325 if (TCG_TARGET_HAS_rem_i32) {
326 tcg_gen_op3_i32(INDEX_op_rem_i32, ret, arg1, arg2);
327 } else if (TCG_TARGET_HAS_div_i32) {
328 TCGv_i32 t0 = tcg_temp_new_i32();
329 tcg_gen_op3_i32(INDEX_op_div_i32, t0, arg1, arg2);
330 tcg_gen_mul_i32(t0, t0, arg2);
331 tcg_gen_sub_i32(ret, arg1, t0);
332 tcg_temp_free_i32(t0);
333 } else if (TCG_TARGET_HAS_div2_i32) {
334 TCGv_i32 t0 = tcg_temp_new_i32();
335 tcg_gen_sari_i32(t0, arg1, 31);
336 tcg_gen_op5_i32(INDEX_op_div2_i32, t0, ret, arg1, t0, arg2);
337 tcg_temp_free_i32(t0);
338 } else {
339 gen_helper_rem_i32(ret, arg1, arg2);
340 }
341}
342
343void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
344{
345 if (TCG_TARGET_HAS_div_i32) {
346 tcg_gen_op3_i32(INDEX_op_divu_i32, ret, arg1, arg2);
347 } else if (TCG_TARGET_HAS_div2_i32) {
348 TCGv_i32 t0 = tcg_temp_new_i32();
349 tcg_gen_movi_i32(t0, 0);
350 tcg_gen_op5_i32(INDEX_op_divu2_i32, ret, t0, arg1, t0, arg2);
351 tcg_temp_free_i32(t0);
352 } else {
353 gen_helper_divu_i32(ret, arg1, arg2);
354 }
355}
356
357void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
358{
359 if (TCG_TARGET_HAS_rem_i32) {
360 tcg_gen_op3_i32(INDEX_op_remu_i32, ret, arg1, arg2);
361 } else if (TCG_TARGET_HAS_div_i32) {
362 TCGv_i32 t0 = tcg_temp_new_i32();
363 tcg_gen_op3_i32(INDEX_op_divu_i32, t0, arg1, arg2);
364 tcg_gen_mul_i32(t0, t0, arg2);
365 tcg_gen_sub_i32(ret, arg1, t0);
366 tcg_temp_free_i32(t0);
367 } else if (TCG_TARGET_HAS_div2_i32) {
368 TCGv_i32 t0 = tcg_temp_new_i32();
369 tcg_gen_movi_i32(t0, 0);
370 tcg_gen_op5_i32(INDEX_op_divu2_i32, t0, ret, arg1, t0, arg2);
371 tcg_temp_free_i32(t0);
372 } else {
373 gen_helper_remu_i32(ret, arg1, arg2);
374 }
375}
376
377void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
378{
379 if (TCG_TARGET_HAS_andc_i32) {
380 tcg_gen_op3_i32(INDEX_op_andc_i32, ret, arg1, arg2);
381 } else {
382 TCGv_i32 t0 = tcg_temp_new_i32();
383 tcg_gen_not_i32(t0, arg2);
384 tcg_gen_and_i32(ret, arg1, t0);
385 tcg_temp_free_i32(t0);
386 }
387}
388
389void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
390{
391 if (TCG_TARGET_HAS_eqv_i32) {
392 tcg_gen_op3_i32(INDEX_op_eqv_i32, ret, arg1, arg2);
393 } else {
394 tcg_gen_xor_i32(ret, arg1, arg2);
395 tcg_gen_not_i32(ret, ret);
396 }
397}
398
399void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
400{
401 if (TCG_TARGET_HAS_nand_i32) {
402 tcg_gen_op3_i32(INDEX_op_nand_i32, ret, arg1, arg2);
403 } else {
404 tcg_gen_and_i32(ret, arg1, arg2);
405 tcg_gen_not_i32(ret, ret);
406 }
407}
408
409void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
410{
411 if (TCG_TARGET_HAS_nor_i32) {
412 tcg_gen_op3_i32(INDEX_op_nor_i32, ret, arg1, arg2);
413 } else {
414 tcg_gen_or_i32(ret, arg1, arg2);
415 tcg_gen_not_i32(ret, ret);
416 }
417}
418
419void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
420{
421 if (TCG_TARGET_HAS_orc_i32) {
422 tcg_gen_op3_i32(INDEX_op_orc_i32, ret, arg1, arg2);
423 } else {
424 TCGv_i32 t0 = tcg_temp_new_i32();
425 tcg_gen_not_i32(t0, arg2);
426 tcg_gen_or_i32(ret, arg1, t0);
427 tcg_temp_free_i32(t0);
428 }
429}
430
0e28d006
RH
431void tcg_gen_clz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
432{
433 if (TCG_TARGET_HAS_clz_i32) {
434 tcg_gen_op3_i32(INDEX_op_clz_i32, ret, arg1, arg2);
435 } else if (TCG_TARGET_HAS_clz_i64) {
436 TCGv_i64 t1 = tcg_temp_new_i64();
437 TCGv_i64 t2 = tcg_temp_new_i64();
438 tcg_gen_extu_i32_i64(t1, arg1);
439 tcg_gen_extu_i32_i64(t2, arg2);
440 tcg_gen_addi_i64(t2, t2, 32);
441 tcg_gen_clz_i64(t1, t1, t2);
442 tcg_gen_extrl_i64_i32(ret, t1);
443 tcg_temp_free_i64(t1);
444 tcg_temp_free_i64(t2);
445 tcg_gen_subi_i32(ret, ret, 32);
446 } else {
447 gen_helper_clz_i32(ret, arg1, arg2);
448 }
449}
450
451void tcg_gen_clzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
452{
453 TCGv_i32 t = tcg_const_i32(arg2);
454 tcg_gen_clz_i32(ret, arg1, t);
455 tcg_temp_free_i32(t);
456}
457
458void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
459{
460 if (TCG_TARGET_HAS_ctz_i32) {
461 tcg_gen_op3_i32(INDEX_op_ctz_i32, ret, arg1, arg2);
462 } else if (TCG_TARGET_HAS_ctz_i64) {
463 TCGv_i64 t1 = tcg_temp_new_i64();
464 TCGv_i64 t2 = tcg_temp_new_i64();
465 tcg_gen_extu_i32_i64(t1, arg1);
466 tcg_gen_extu_i32_i64(t2, arg2);
467 tcg_gen_ctz_i64(t1, t1, t2);
468 tcg_gen_extrl_i64_i32(ret, t1);
469 tcg_temp_free_i64(t1);
470 tcg_temp_free_i64(t2);
14e99210
RH
471 } else if (TCG_TARGET_HAS_ctpop_i32
472 || TCG_TARGET_HAS_ctpop_i64
473 || TCG_TARGET_HAS_clz_i32
474 || TCG_TARGET_HAS_clz_i64) {
475 TCGv_i32 z, t = tcg_temp_new_i32();
476
477 if (TCG_TARGET_HAS_ctpop_i32 || TCG_TARGET_HAS_ctpop_i64) {
478 tcg_gen_subi_i32(t, arg1, 1);
479 tcg_gen_andc_i32(t, t, arg1);
480 tcg_gen_ctpop_i32(t, t);
481 } else {
482 /* Since all non-x86 hosts have clz(0) == 32, don't fight it. */
483 tcg_gen_neg_i32(t, arg1);
484 tcg_gen_and_i32(t, t, arg1);
485 tcg_gen_clzi_i32(t, t, 32);
486 tcg_gen_xori_i32(t, t, 31);
487 }
488 z = tcg_const_i32(0);
489 tcg_gen_movcond_i32(TCG_COND_EQ, ret, arg1, z, arg2, t);
490 tcg_temp_free_i32(t);
491 tcg_temp_free_i32(z);
0e28d006
RH
492 } else {
493 gen_helper_ctz_i32(ret, arg1, arg2);
494 }
495}
496
497void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
498{
14e99210
RH
499 if (!TCG_TARGET_HAS_ctz_i32 && TCG_TARGET_HAS_ctpop_i32 && arg2 == 32) {
500 /* This equivalence has the advantage of not requiring a fixup. */
501 TCGv_i32 t = tcg_temp_new_i32();
502 tcg_gen_subi_i32(t, arg1, 1);
503 tcg_gen_andc_i32(t, t, arg1);
504 tcg_gen_ctpop_i32(ret, t);
505 tcg_temp_free_i32(t);
506 } else {
507 TCGv_i32 t = tcg_const_i32(arg2);
508 tcg_gen_ctz_i32(ret, arg1, t);
509 tcg_temp_free_i32(t);
510 }
0e28d006
RH
511}
512
086920c2
RH
513void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg)
514{
515 if (TCG_TARGET_HAS_clz_i32) {
516 TCGv_i32 t = tcg_temp_new_i32();
517 tcg_gen_sari_i32(t, arg, 31);
518 tcg_gen_xor_i32(t, t, arg);
519 tcg_gen_clzi_i32(t, t, 32);
520 tcg_gen_subi_i32(ret, t, 1);
521 tcg_temp_free_i32(t);
522 } else {
523 gen_helper_clrsb_i32(ret, arg);
524 }
525}
526
a768e4e9
RH
527void tcg_gen_ctpop_i32(TCGv_i32 ret, TCGv_i32 arg1)
528{
529 if (TCG_TARGET_HAS_ctpop_i32) {
530 tcg_gen_op2_i32(INDEX_op_ctpop_i32, ret, arg1);
531 } else if (TCG_TARGET_HAS_ctpop_i64) {
532 TCGv_i64 t = tcg_temp_new_i64();
533 tcg_gen_extu_i32_i64(t, arg1);
534 tcg_gen_ctpop_i64(t, t);
535 tcg_gen_extrl_i64_i32(ret, t);
536 tcg_temp_free_i64(t);
537 } else {
538 gen_helper_ctpop_i32(ret, arg1);
539 }
540}
541
951c6300
RH
542void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
543{
544 if (TCG_TARGET_HAS_rot_i32) {
545 tcg_gen_op3_i32(INDEX_op_rotl_i32, ret, arg1, arg2);
546 } else {
547 TCGv_i32 t0, t1;
548
549 t0 = tcg_temp_new_i32();
550 t1 = tcg_temp_new_i32();
551 tcg_gen_shl_i32(t0, arg1, arg2);
552 tcg_gen_subfi_i32(t1, 32, arg2);
553 tcg_gen_shr_i32(t1, arg1, t1);
554 tcg_gen_or_i32(ret, t0, t1);
555 tcg_temp_free_i32(t0);
556 tcg_temp_free_i32(t1);
557 }
558}
559
560void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
561{
562 tcg_debug_assert(arg2 < 32);
563 /* some cases can be optimized here */
564 if (arg2 == 0) {
565 tcg_gen_mov_i32(ret, arg1);
566 } else if (TCG_TARGET_HAS_rot_i32) {
567 TCGv_i32 t0 = tcg_const_i32(arg2);
568 tcg_gen_rotl_i32(ret, arg1, t0);
569 tcg_temp_free_i32(t0);
570 } else {
571 TCGv_i32 t0, t1;
572 t0 = tcg_temp_new_i32();
573 t1 = tcg_temp_new_i32();
574 tcg_gen_shli_i32(t0, arg1, arg2);
575 tcg_gen_shri_i32(t1, arg1, 32 - arg2);
576 tcg_gen_or_i32(ret, t0, t1);
577 tcg_temp_free_i32(t0);
578 tcg_temp_free_i32(t1);
579 }
580}
581
582void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
583{
584 if (TCG_TARGET_HAS_rot_i32) {
585 tcg_gen_op3_i32(INDEX_op_rotr_i32, ret, arg1, arg2);
586 } else {
587 TCGv_i32 t0, t1;
588
589 t0 = tcg_temp_new_i32();
590 t1 = tcg_temp_new_i32();
591 tcg_gen_shr_i32(t0, arg1, arg2);
592 tcg_gen_subfi_i32(t1, 32, arg2);
593 tcg_gen_shl_i32(t1, arg1, t1);
594 tcg_gen_or_i32(ret, t0, t1);
595 tcg_temp_free_i32(t0);
596 tcg_temp_free_i32(t1);
597 }
598}
599
600void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
601{
602 tcg_debug_assert(arg2 < 32);
603 /* some cases can be optimized here */
604 if (arg2 == 0) {
605 tcg_gen_mov_i32(ret, arg1);
606 } else {
607 tcg_gen_rotli_i32(ret, arg1, 32 - arg2);
608 }
609}
610
611void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
612 unsigned int ofs, unsigned int len)
613{
614 uint32_t mask;
615 TCGv_i32 t1;
616
617 tcg_debug_assert(ofs < 32);
0d0d309d 618 tcg_debug_assert(len > 0);
951c6300
RH
619 tcg_debug_assert(len <= 32);
620 tcg_debug_assert(ofs + len <= 32);
621
0d0d309d 622 if (len == 32) {
951c6300
RH
623 tcg_gen_mov_i32(ret, arg2);
624 return;
625 }
626 if (TCG_TARGET_HAS_deposit_i32 && TCG_TARGET_deposit_i32_valid(ofs, len)) {
627 tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, arg1, arg2, ofs, len);
628 return;
629 }
630
631 mask = (1u << len) - 1;
632 t1 = tcg_temp_new_i32();
633
634 if (ofs + len < 32) {
635 tcg_gen_andi_i32(t1, arg2, mask);
636 tcg_gen_shli_i32(t1, t1, ofs);
637 } else {
638 tcg_gen_shli_i32(t1, arg2, ofs);
639 }
640 tcg_gen_andi_i32(ret, arg1, ~(mask << ofs));
641 tcg_gen_or_i32(ret, ret, t1);
642
643 tcg_temp_free_i32(t1);
644}
645
07cc68d5
RH
646void tcg_gen_deposit_z_i32(TCGv_i32 ret, TCGv_i32 arg,
647 unsigned int ofs, unsigned int len)
648{
649 tcg_debug_assert(ofs < 32);
650 tcg_debug_assert(len > 0);
651 tcg_debug_assert(len <= 32);
652 tcg_debug_assert(ofs + len <= 32);
653
654 if (ofs + len == 32) {
655 tcg_gen_shli_i32(ret, arg, ofs);
656 } else if (ofs == 0) {
657 tcg_gen_andi_i32(ret, arg, (1u << len) - 1);
658 } else if (TCG_TARGET_HAS_deposit_i32
659 && TCG_TARGET_deposit_i32_valid(ofs, len)) {
660 TCGv_i32 zero = tcg_const_i32(0);
661 tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, zero, arg, ofs, len);
662 tcg_temp_free_i32(zero);
663 } else {
664 /* To help two-operand hosts we prefer to zero-extend first,
665 which allows ARG to stay live. */
666 switch (len) {
667 case 16:
668 if (TCG_TARGET_HAS_ext16u_i32) {
669 tcg_gen_ext16u_i32(ret, arg);
670 tcg_gen_shli_i32(ret, ret, ofs);
671 return;
672 }
673 break;
674 case 8:
675 if (TCG_TARGET_HAS_ext8u_i32) {
676 tcg_gen_ext8u_i32(ret, arg);
677 tcg_gen_shli_i32(ret, ret, ofs);
678 return;
679 }
680 break;
681 }
682 /* Otherwise prefer zero-extension over AND for code size. */
683 switch (ofs + len) {
684 case 16:
685 if (TCG_TARGET_HAS_ext16u_i32) {
686 tcg_gen_shli_i32(ret, arg, ofs);
687 tcg_gen_ext16u_i32(ret, ret);
688 return;
689 }
690 break;
691 case 8:
692 if (TCG_TARGET_HAS_ext8u_i32) {
693 tcg_gen_shli_i32(ret, arg, ofs);
694 tcg_gen_ext8u_i32(ret, ret);
695 return;
696 }
697 break;
698 }
699 tcg_gen_andi_i32(ret, arg, (1u << len) - 1);
700 tcg_gen_shli_i32(ret, ret, ofs);
701 }
702}
703
7ec8bab3
RH
704void tcg_gen_extract_i32(TCGv_i32 ret, TCGv_i32 arg,
705 unsigned int ofs, unsigned int len)
706{
707 tcg_debug_assert(ofs < 32);
708 tcg_debug_assert(len > 0);
709 tcg_debug_assert(len <= 32);
710 tcg_debug_assert(ofs + len <= 32);
711
712 /* Canonicalize certain special cases, even if extract is supported. */
713 if (ofs + len == 32) {
714 tcg_gen_shri_i32(ret, arg, 32 - len);
715 return;
716 }
717 if (ofs == 0) {
718 tcg_gen_andi_i32(ret, arg, (1u << len) - 1);
719 return;
720 }
721
722 if (TCG_TARGET_HAS_extract_i32
723 && TCG_TARGET_extract_i32_valid(ofs, len)) {
724 tcg_gen_op4ii_i32(INDEX_op_extract_i32, ret, arg, ofs, len);
725 return;
726 }
727
728 /* Assume that zero-extension, if available, is cheaper than a shift. */
729 switch (ofs + len) {
730 case 16:
731 if (TCG_TARGET_HAS_ext16u_i32) {
732 tcg_gen_ext16u_i32(ret, arg);
733 tcg_gen_shri_i32(ret, ret, ofs);
734 return;
735 }
736 break;
737 case 8:
738 if (TCG_TARGET_HAS_ext8u_i32) {
739 tcg_gen_ext8u_i32(ret, arg);
740 tcg_gen_shri_i32(ret, ret, ofs);
741 return;
742 }
743 break;
744 }
745
746 /* ??? Ideally we'd know what values are available for immediate AND.
747 Assume that 8 bits are available, plus the special case of 16,
748 so that we get ext8u, ext16u. */
749 switch (len) {
750 case 1 ... 8: case 16:
751 tcg_gen_shri_i32(ret, arg, ofs);
752 tcg_gen_andi_i32(ret, ret, (1u << len) - 1);
753 break;
754 default:
755 tcg_gen_shli_i32(ret, arg, 32 - len - ofs);
756 tcg_gen_shri_i32(ret, ret, 32 - len);
757 break;
758 }
759}
760
761void tcg_gen_sextract_i32(TCGv_i32 ret, TCGv_i32 arg,
762 unsigned int ofs, unsigned int len)
763{
764 tcg_debug_assert(ofs < 32);
765 tcg_debug_assert(len > 0);
766 tcg_debug_assert(len <= 32);
767 tcg_debug_assert(ofs + len <= 32);
768
769 /* Canonicalize certain special cases, even if extract is supported. */
770 if (ofs + len == 32) {
771 tcg_gen_sari_i32(ret, arg, 32 - len);
772 return;
773 }
774 if (ofs == 0) {
775 switch (len) {
776 case 16:
777 tcg_gen_ext16s_i32(ret, arg);
778 return;
779 case 8:
780 tcg_gen_ext8s_i32(ret, arg);
781 return;
782 }
783 }
784
785 if (TCG_TARGET_HAS_sextract_i32
786 && TCG_TARGET_extract_i32_valid(ofs, len)) {
787 tcg_gen_op4ii_i32(INDEX_op_sextract_i32, ret, arg, ofs, len);
788 return;
789 }
790
791 /* Assume that sign-extension, if available, is cheaper than a shift. */
792 switch (ofs + len) {
793 case 16:
794 if (TCG_TARGET_HAS_ext16s_i32) {
795 tcg_gen_ext16s_i32(ret, arg);
796 tcg_gen_sari_i32(ret, ret, ofs);
797 return;
798 }
799 break;
800 case 8:
801 if (TCG_TARGET_HAS_ext8s_i32) {
802 tcg_gen_ext8s_i32(ret, arg);
803 tcg_gen_sari_i32(ret, ret, ofs);
804 return;
805 }
806 break;
807 }
808 switch (len) {
809 case 16:
810 if (TCG_TARGET_HAS_ext16s_i32) {
811 tcg_gen_shri_i32(ret, arg, ofs);
812 tcg_gen_ext16s_i32(ret, ret);
813 return;
814 }
815 break;
816 case 8:
817 if (TCG_TARGET_HAS_ext8s_i32) {
818 tcg_gen_shri_i32(ret, arg, ofs);
819 tcg_gen_ext8s_i32(ret, ret);
820 return;
821 }
822 break;
823 }
824
825 tcg_gen_shli_i32(ret, arg, 32 - len - ofs);
826 tcg_gen_sari_i32(ret, ret, 32 - len);
827}
828
951c6300
RH
829void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1,
830 TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2)
831{
37ed3bf1
RH
832 if (cond == TCG_COND_ALWAYS) {
833 tcg_gen_mov_i32(ret, v1);
834 } else if (cond == TCG_COND_NEVER) {
835 tcg_gen_mov_i32(ret, v2);
836 } else if (TCG_TARGET_HAS_movcond_i32) {
951c6300
RH
837 tcg_gen_op6i_i32(INDEX_op_movcond_i32, ret, c1, c2, v1, v2, cond);
838 } else {
839 TCGv_i32 t0 = tcg_temp_new_i32();
840 TCGv_i32 t1 = tcg_temp_new_i32();
841 tcg_gen_setcond_i32(cond, t0, c1, c2);
842 tcg_gen_neg_i32(t0, t0);
843 tcg_gen_and_i32(t1, v1, t0);
844 tcg_gen_andc_i32(ret, v2, t0);
845 tcg_gen_or_i32(ret, ret, t1);
846 tcg_temp_free_i32(t0);
847 tcg_temp_free_i32(t1);
848 }
849}
850
851void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
852 TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh)
853{
854 if (TCG_TARGET_HAS_add2_i32) {
855 tcg_gen_op6_i32(INDEX_op_add2_i32, rl, rh, al, ah, bl, bh);
951c6300
RH
856 } else {
857 TCGv_i64 t0 = tcg_temp_new_i64();
858 TCGv_i64 t1 = tcg_temp_new_i64();
859 tcg_gen_concat_i32_i64(t0, al, ah);
860 tcg_gen_concat_i32_i64(t1, bl, bh);
861 tcg_gen_add_i64(t0, t0, t1);
862 tcg_gen_extr_i64_i32(rl, rh, t0);
863 tcg_temp_free_i64(t0);
864 tcg_temp_free_i64(t1);
865 }
866}
867
868void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
869 TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh)
870{
871 if (TCG_TARGET_HAS_sub2_i32) {
872 tcg_gen_op6_i32(INDEX_op_sub2_i32, rl, rh, al, ah, bl, bh);
951c6300
RH
873 } else {
874 TCGv_i64 t0 = tcg_temp_new_i64();
875 TCGv_i64 t1 = tcg_temp_new_i64();
876 tcg_gen_concat_i32_i64(t0, al, ah);
877 tcg_gen_concat_i32_i64(t1, bl, bh);
878 tcg_gen_sub_i64(t0, t0, t1);
879 tcg_gen_extr_i64_i32(rl, rh, t0);
880 tcg_temp_free_i64(t0);
881 tcg_temp_free_i64(t1);
882 }
883}
884
885void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
886{
887 if (TCG_TARGET_HAS_mulu2_i32) {
888 tcg_gen_op4_i32(INDEX_op_mulu2_i32, rl, rh, arg1, arg2);
951c6300
RH
889 } else if (TCG_TARGET_HAS_muluh_i32) {
890 TCGv_i32 t = tcg_temp_new_i32();
891 tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2);
892 tcg_gen_op3_i32(INDEX_op_muluh_i32, rh, arg1, arg2);
893 tcg_gen_mov_i32(rl, t);
894 tcg_temp_free_i32(t);
895 } else {
896 TCGv_i64 t0 = tcg_temp_new_i64();
897 TCGv_i64 t1 = tcg_temp_new_i64();
898 tcg_gen_extu_i32_i64(t0, arg1);
899 tcg_gen_extu_i32_i64(t1, arg2);
900 tcg_gen_mul_i64(t0, t0, t1);
901 tcg_gen_extr_i64_i32(rl, rh, t0);
902 tcg_temp_free_i64(t0);
903 tcg_temp_free_i64(t1);
904 }
905}
906
907void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
908{
909 if (TCG_TARGET_HAS_muls2_i32) {
910 tcg_gen_op4_i32(INDEX_op_muls2_i32, rl, rh, arg1, arg2);
951c6300
RH
911 } else if (TCG_TARGET_HAS_mulsh_i32) {
912 TCGv_i32 t = tcg_temp_new_i32();
913 tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2);
914 tcg_gen_op3_i32(INDEX_op_mulsh_i32, rh, arg1, arg2);
915 tcg_gen_mov_i32(rl, t);
916 tcg_temp_free_i32(t);
917 } else if (TCG_TARGET_REG_BITS == 32) {
918 TCGv_i32 t0 = tcg_temp_new_i32();
919 TCGv_i32 t1 = tcg_temp_new_i32();
920 TCGv_i32 t2 = tcg_temp_new_i32();
921 TCGv_i32 t3 = tcg_temp_new_i32();
922 tcg_gen_mulu2_i32(t0, t1, arg1, arg2);
923 /* Adjust for negative inputs. */
924 tcg_gen_sari_i32(t2, arg1, 31);
925 tcg_gen_sari_i32(t3, arg2, 31);
926 tcg_gen_and_i32(t2, t2, arg2);
927 tcg_gen_and_i32(t3, t3, arg1);
928 tcg_gen_sub_i32(rh, t1, t2);
929 tcg_gen_sub_i32(rh, rh, t3);
930 tcg_gen_mov_i32(rl, t0);
931 tcg_temp_free_i32(t0);
932 tcg_temp_free_i32(t1);
933 tcg_temp_free_i32(t2);
934 tcg_temp_free_i32(t3);
935 } else {
936 TCGv_i64 t0 = tcg_temp_new_i64();
937 TCGv_i64 t1 = tcg_temp_new_i64();
938 tcg_gen_ext_i32_i64(t0, arg1);
939 tcg_gen_ext_i32_i64(t1, arg2);
940 tcg_gen_mul_i64(t0, t0, t1);
941 tcg_gen_extr_i64_i32(rl, rh, t0);
942 tcg_temp_free_i64(t0);
943 tcg_temp_free_i64(t1);
944 }
945}
946
5087abfb
RH
947void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
948{
949 if (TCG_TARGET_REG_BITS == 32) {
950 TCGv_i32 t0 = tcg_temp_new_i32();
951 TCGv_i32 t1 = tcg_temp_new_i32();
952 TCGv_i32 t2 = tcg_temp_new_i32();
953 tcg_gen_mulu2_i32(t0, t1, arg1, arg2);
954 /* Adjust for negative input for the signed arg1. */
955 tcg_gen_sari_i32(t2, arg1, 31);
956 tcg_gen_and_i32(t2, t2, arg2);
957 tcg_gen_sub_i32(rh, t1, t2);
958 tcg_gen_mov_i32(rl, t0);
959 tcg_temp_free_i32(t0);
960 tcg_temp_free_i32(t1);
961 tcg_temp_free_i32(t2);
962 } else {
963 TCGv_i64 t0 = tcg_temp_new_i64();
964 TCGv_i64 t1 = tcg_temp_new_i64();
965 tcg_gen_ext_i32_i64(t0, arg1);
966 tcg_gen_extu_i32_i64(t1, arg2);
967 tcg_gen_mul_i64(t0, t0, t1);
968 tcg_gen_extr_i64_i32(rl, rh, t0);
969 tcg_temp_free_i64(t0);
970 tcg_temp_free_i64(t1);
971 }
972}
973
951c6300
RH
974void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg)
975{
976 if (TCG_TARGET_HAS_ext8s_i32) {
977 tcg_gen_op2_i32(INDEX_op_ext8s_i32, ret, arg);
978 } else {
979 tcg_gen_shli_i32(ret, arg, 24);
980 tcg_gen_sari_i32(ret, ret, 24);
981 }
982}
983
984void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg)
985{
986 if (TCG_TARGET_HAS_ext16s_i32) {
987 tcg_gen_op2_i32(INDEX_op_ext16s_i32, ret, arg);
988 } else {
989 tcg_gen_shli_i32(ret, arg, 16);
990 tcg_gen_sari_i32(ret, ret, 16);
991 }
992}
993
994void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg)
995{
996 if (TCG_TARGET_HAS_ext8u_i32) {
997 tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg);
998 } else {
999 tcg_gen_andi_i32(ret, arg, 0xffu);
1000 }
1001}
1002
1003void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg)
1004{
1005 if (TCG_TARGET_HAS_ext16u_i32) {
1006 tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg);
1007 } else {
1008 tcg_gen_andi_i32(ret, arg, 0xffffu);
1009 }
1010}
1011
1012/* Note: we assume the two high bytes are set to zero */
1013void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg)
1014{
1015 if (TCG_TARGET_HAS_bswap16_i32) {
1016 tcg_gen_op2_i32(INDEX_op_bswap16_i32, ret, arg);
1017 } else {
1018 TCGv_i32 t0 = tcg_temp_new_i32();
1019
1020 tcg_gen_ext8u_i32(t0, arg);
1021 tcg_gen_shli_i32(t0, t0, 8);
1022 tcg_gen_shri_i32(ret, arg, 8);
1023 tcg_gen_or_i32(ret, ret, t0);
1024 tcg_temp_free_i32(t0);
1025 }
1026}
1027
1028void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg)
1029{
1030 if (TCG_TARGET_HAS_bswap32_i32) {
1031 tcg_gen_op2_i32(INDEX_op_bswap32_i32, ret, arg);
1032 } else {
1033 TCGv_i32 t0, t1;
1034 t0 = tcg_temp_new_i32();
1035 t1 = tcg_temp_new_i32();
1036
1037 tcg_gen_shli_i32(t0, arg, 24);
1038
1039 tcg_gen_andi_i32(t1, arg, 0x0000ff00);
1040 tcg_gen_shli_i32(t1, t1, 8);
1041 tcg_gen_or_i32(t0, t0, t1);
1042
1043 tcg_gen_shri_i32(t1, arg, 8);
1044 tcg_gen_andi_i32(t1, t1, 0x0000ff00);
1045 tcg_gen_or_i32(t0, t0, t1);
1046
1047 tcg_gen_shri_i32(t1, arg, 24);
1048 tcg_gen_or_i32(ret, t0, t1);
1049 tcg_temp_free_i32(t0);
1050 tcg_temp_free_i32(t1);
1051 }
1052}
1053
1054/* 64-bit ops */
1055
1056#if TCG_TARGET_REG_BITS == 32
1057/* These are all inline for TCG_TARGET_REG_BITS == 64. */
1058
1059void tcg_gen_discard_i64(TCGv_i64 arg)
1060{
1061 tcg_gen_discard_i32(TCGV_LOW(arg));
1062 tcg_gen_discard_i32(TCGV_HIGH(arg));
1063}
1064
1065void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg)
1066{
1067 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1068 tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg));
1069}
1070
1071void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg)
1072{
1073 tcg_gen_movi_i32(TCGV_LOW(ret), arg);
1074 tcg_gen_movi_i32(TCGV_HIGH(ret), arg >> 32);
1075}
1076
1077void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
1078{
1079 tcg_gen_ld8u_i32(TCGV_LOW(ret), arg2, offset);
1080 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1081}
1082
1083void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
1084{
1085 tcg_gen_ld8s_i32(TCGV_LOW(ret), arg2, offset);
3ff91d7e 1086 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
951c6300
RH
1087}
1088
1089void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
1090{
1091 tcg_gen_ld16u_i32(TCGV_LOW(ret), arg2, offset);
1092 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1093}
1094
1095void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
1096{
1097 tcg_gen_ld16s_i32(TCGV_LOW(ret), arg2, offset);
1098 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
1099}
1100
1101void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
1102{
1103 tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset);
1104 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1105}
1106
1107void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
1108{
1109 tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset);
1110 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
1111}
1112
1113void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
1114{
1115 /* Since arg2 and ret have different types,
1116 they cannot be the same temporary */
cf811fff 1117#ifdef HOST_WORDS_BIGENDIAN
951c6300
RH
1118 tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset);
1119 tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset + 4);
1120#else
1121 tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset);
1122 tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset + 4);
1123#endif
1124}
1125
1126void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset)
1127{
cf811fff 1128#ifdef HOST_WORDS_BIGENDIAN
951c6300
RH
1129 tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset);
1130 tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset + 4);
1131#else
1132 tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset);
1133 tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset + 4);
1134#endif
1135}
1136
1137void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1138{
1139 tcg_gen_and_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1140 tcg_gen_and_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1141}
1142
1143void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1144{
1145 tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1146 tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1147}
1148
1149void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1150{
1151 tcg_gen_xor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1152 tcg_gen_xor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1153}
1154
1155void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1156{
1157 gen_helper_shl_i64(ret, arg1, arg2);
1158}
1159
1160void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1161{
1162 gen_helper_shr_i64(ret, arg1, arg2);
1163}
1164
1165void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1166{
1167 gen_helper_sar_i64(ret, arg1, arg2);
1168}
1169
1170void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1171{
1172 TCGv_i64 t0;
1173 TCGv_i32 t1;
1174
1175 t0 = tcg_temp_new_i64();
1176 t1 = tcg_temp_new_i32();
1177
1178 tcg_gen_mulu2_i32(TCGV_LOW(t0), TCGV_HIGH(t0),
1179 TCGV_LOW(arg1), TCGV_LOW(arg2));
1180
1181 tcg_gen_mul_i32(t1, TCGV_LOW(arg1), TCGV_HIGH(arg2));
1182 tcg_gen_add_i32(TCGV_HIGH(t0), TCGV_HIGH(t0), t1);
1183 tcg_gen_mul_i32(t1, TCGV_HIGH(arg1), TCGV_LOW(arg2));
1184 tcg_gen_add_i32(TCGV_HIGH(t0), TCGV_HIGH(t0), t1);
1185
1186 tcg_gen_mov_i64(ret, t0);
1187 tcg_temp_free_i64(t0);
1188 tcg_temp_free_i32(t1);
1189}
1190#endif /* TCG_TARGET_REG_SIZE == 32 */
1191
1192void tcg_gen_addi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
1193{
1194 /* some cases can be optimized here */
1195 if (arg2 == 0) {
1196 tcg_gen_mov_i64(ret, arg1);
1197 } else {
1198 TCGv_i64 t0 = tcg_const_i64(arg2);
1199 tcg_gen_add_i64(ret, arg1, t0);
1200 tcg_temp_free_i64(t0);
1201 }
1202}
1203
1204void tcg_gen_subfi_i64(TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2)
1205{
1206 if (arg1 == 0 && TCG_TARGET_HAS_neg_i64) {
1207 /* Don't recurse with tcg_gen_neg_i64. */
1208 tcg_gen_op2_i64(INDEX_op_neg_i64, ret, arg2);
1209 } else {
1210 TCGv_i64 t0 = tcg_const_i64(arg1);
1211 tcg_gen_sub_i64(ret, t0, arg2);
1212 tcg_temp_free_i64(t0);
1213 }
1214}
1215
1216void tcg_gen_subi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
1217{
1218 /* some cases can be optimized here */
1219 if (arg2 == 0) {
1220 tcg_gen_mov_i64(ret, arg1);
1221 } else {
1222 TCGv_i64 t0 = tcg_const_i64(arg2);
1223 tcg_gen_sub_i64(ret, arg1, t0);
1224 tcg_temp_free_i64(t0);
1225 }
1226}
1227
1228void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
1229{
951c6300 1230 TCGv_i64 t0;
3a13c3f3
RH
1231
1232 if (TCG_TARGET_REG_BITS == 32) {
1233 tcg_gen_andi_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2);
1234 tcg_gen_andi_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32);
1235 return;
1236 }
1237
951c6300
RH
1238 /* Some cases can be optimized here. */
1239 switch (arg2) {
1240 case 0:
1241 tcg_gen_movi_i64(ret, 0);
1242 return;
1243 case 0xffffffffffffffffull:
1244 tcg_gen_mov_i64(ret, arg1);
1245 return;
1246 case 0xffull:
1247 /* Don't recurse with tcg_gen_ext8u_i64. */
1248 if (TCG_TARGET_HAS_ext8u_i64) {
1249 tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg1);
1250 return;
1251 }
1252 break;
1253 case 0xffffu:
1254 if (TCG_TARGET_HAS_ext16u_i64) {
1255 tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg1);
1256 return;
1257 }
1258 break;
1259 case 0xffffffffull:
1260 if (TCG_TARGET_HAS_ext32u_i64) {
1261 tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg1);
1262 return;
1263 }
1264 break;
1265 }
1266 t0 = tcg_const_i64(arg2);
1267 tcg_gen_and_i64(ret, arg1, t0);
1268 tcg_temp_free_i64(t0);
951c6300
RH
1269}
1270
1271void tcg_gen_ori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
1272{
3a13c3f3
RH
1273 if (TCG_TARGET_REG_BITS == 32) {
1274 tcg_gen_ori_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2);
1275 tcg_gen_ori_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32);
1276 return;
1277 }
951c6300
RH
1278 /* Some cases can be optimized here. */
1279 if (arg2 == -1) {
1280 tcg_gen_movi_i64(ret, -1);
1281 } else if (arg2 == 0) {
1282 tcg_gen_mov_i64(ret, arg1);
1283 } else {
1284 TCGv_i64 t0 = tcg_const_i64(arg2);
1285 tcg_gen_or_i64(ret, arg1, t0);
1286 tcg_temp_free_i64(t0);
1287 }
951c6300
RH
1288}
1289
1290void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
1291{
3a13c3f3
RH
1292 if (TCG_TARGET_REG_BITS == 32) {
1293 tcg_gen_xori_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2);
1294 tcg_gen_xori_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32);
1295 return;
1296 }
951c6300
RH
1297 /* Some cases can be optimized here. */
1298 if (arg2 == 0) {
1299 tcg_gen_mov_i64(ret, arg1);
1300 } else if (arg2 == -1 && TCG_TARGET_HAS_not_i64) {
1301 /* Don't recurse with tcg_gen_not_i64. */
1302 tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg1);
1303 } else {
1304 TCGv_i64 t0 = tcg_const_i64(arg2);
1305 tcg_gen_xor_i64(ret, arg1, t0);
1306 tcg_temp_free_i64(t0);
1307 }
951c6300
RH
1308}
1309
951c6300
RH
1310static inline void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
1311 unsigned c, bool right, bool arith)
1312{
1313 tcg_debug_assert(c < 64);
1314 if (c == 0) {
1315 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
1316 tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
1317 } else if (c >= 32) {
1318 c -= 32;
1319 if (right) {
1320 if (arith) {
1321 tcg_gen_sari_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c);
1322 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), 31);
1323 } else {
1324 tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c);
1325 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1326 }
1327 } else {
1328 tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_LOW(arg1), c);
1329 tcg_gen_movi_i32(TCGV_LOW(ret), 0);
1330 }
1331 } else {
1332 TCGv_i32 t0, t1;
1333
1334 t0 = tcg_temp_new_i32();
1335 t1 = tcg_temp_new_i32();
1336 if (right) {
1337 tcg_gen_shli_i32(t0, TCGV_HIGH(arg1), 32 - c);
1338 if (arith) {
1339 tcg_gen_sari_i32(t1, TCGV_HIGH(arg1), c);
1340 } else {
1341 tcg_gen_shri_i32(t1, TCGV_HIGH(arg1), c);
1342 }
1343 tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c);
1344 tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t0);
1345 tcg_gen_mov_i32(TCGV_HIGH(ret), t1);
1346 } else {
1347 tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c);
1348 /* Note: ret can be the same as arg1, so we use t1 */
1349 tcg_gen_shli_i32(t1, TCGV_LOW(arg1), c);
1350 tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
1351 tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t0);
1352 tcg_gen_mov_i32(TCGV_LOW(ret), t1);
1353 }
1354 tcg_temp_free_i32(t0);
1355 tcg_temp_free_i32(t1);
1356 }
1357}
1358
951c6300
RH
1359void tcg_gen_shli_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
1360{
1361 tcg_debug_assert(arg2 < 64);
3a13c3f3
RH
1362 if (TCG_TARGET_REG_BITS == 32) {
1363 tcg_gen_shifti_i64(ret, arg1, arg2, 0, 0);
1364 } else if (arg2 == 0) {
951c6300
RH
1365 tcg_gen_mov_i64(ret, arg1);
1366 } else {
1367 TCGv_i64 t0 = tcg_const_i64(arg2);
1368 tcg_gen_shl_i64(ret, arg1, t0);
1369 tcg_temp_free_i64(t0);
1370 }
1371}
1372
1373void tcg_gen_shri_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
1374{
1375 tcg_debug_assert(arg2 < 64);
3a13c3f3
RH
1376 if (TCG_TARGET_REG_BITS == 32) {
1377 tcg_gen_shifti_i64(ret, arg1, arg2, 1, 0);
1378 } else if (arg2 == 0) {
951c6300
RH
1379 tcg_gen_mov_i64(ret, arg1);
1380 } else {
1381 TCGv_i64 t0 = tcg_const_i64(arg2);
1382 tcg_gen_shr_i64(ret, arg1, t0);
1383 tcg_temp_free_i64(t0);
1384 }
1385}
1386
1387void tcg_gen_sari_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
1388{
1389 tcg_debug_assert(arg2 < 64);
3a13c3f3
RH
1390 if (TCG_TARGET_REG_BITS == 32) {
1391 tcg_gen_shifti_i64(ret, arg1, arg2, 1, 1);
1392 } else if (arg2 == 0) {
951c6300
RH
1393 tcg_gen_mov_i64(ret, arg1);
1394 } else {
1395 TCGv_i64 t0 = tcg_const_i64(arg2);
1396 tcg_gen_sar_i64(ret, arg1, t0);
1397 tcg_temp_free_i64(t0);
1398 }
1399}
951c6300 1400
42a268c2 1401void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *l)
951c6300
RH
1402{
1403 if (cond == TCG_COND_ALWAYS) {
42a268c2 1404 tcg_gen_br(l);
951c6300 1405 } else if (cond != TCG_COND_NEVER) {
3a13c3f3
RH
1406 if (TCG_TARGET_REG_BITS == 32) {
1407 tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, TCGV_LOW(arg1),
1408 TCGV_HIGH(arg1), TCGV_LOW(arg2),
42a268c2 1409 TCGV_HIGH(arg2), cond, label_arg(l));
3a13c3f3 1410 } else {
42a268c2
RH
1411 tcg_gen_op4ii_i64(INDEX_op_brcond_i64, arg1, arg2, cond,
1412 label_arg(l));
3a13c3f3 1413 }
951c6300
RH
1414 }
1415}
1416
42a268c2 1417void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *l)
951c6300
RH
1418{
1419 if (cond == TCG_COND_ALWAYS) {
42a268c2 1420 tcg_gen_br(l);
951c6300
RH
1421 } else if (cond != TCG_COND_NEVER) {
1422 TCGv_i64 t0 = tcg_const_i64(arg2);
42a268c2 1423 tcg_gen_brcond_i64(cond, arg1, t0, l);
951c6300
RH
1424 tcg_temp_free_i64(t0);
1425 }
1426}
1427
1428void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret,
1429 TCGv_i64 arg1, TCGv_i64 arg2)
1430{
1431 if (cond == TCG_COND_ALWAYS) {
1432 tcg_gen_movi_i64(ret, 1);
1433 } else if (cond == TCG_COND_NEVER) {
1434 tcg_gen_movi_i64(ret, 0);
1435 } else {
3a13c3f3
RH
1436 if (TCG_TARGET_REG_BITS == 32) {
1437 tcg_gen_op6i_i32(INDEX_op_setcond2_i32, TCGV_LOW(ret),
1438 TCGV_LOW(arg1), TCGV_HIGH(arg1),
1439 TCGV_LOW(arg2), TCGV_HIGH(arg2), cond);
1440 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1441 } else {
1442 tcg_gen_op4i_i64(INDEX_op_setcond_i64, ret, arg1, arg2, cond);
1443 }
951c6300
RH
1444 }
1445}
1446
1447void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret,
1448 TCGv_i64 arg1, int64_t arg2)
1449{
1450 TCGv_i64 t0 = tcg_const_i64(arg2);
1451 tcg_gen_setcond_i64(cond, ret, arg1, t0);
1452 tcg_temp_free_i64(t0);
1453}
1454
1455void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
1456{
1457 TCGv_i64 t0 = tcg_const_i64(arg2);
1458 tcg_gen_mul_i64(ret, arg1, t0);
1459 tcg_temp_free_i64(t0);
1460}
1461
1462void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1463{
1464 if (TCG_TARGET_HAS_div_i64) {
1465 tcg_gen_op3_i64(INDEX_op_div_i64, ret, arg1, arg2);
1466 } else if (TCG_TARGET_HAS_div2_i64) {
1467 TCGv_i64 t0 = tcg_temp_new_i64();
1468 tcg_gen_sari_i64(t0, arg1, 63);
1469 tcg_gen_op5_i64(INDEX_op_div2_i64, ret, t0, arg1, t0, arg2);
1470 tcg_temp_free_i64(t0);
1471 } else {
1472 gen_helper_div_i64(ret, arg1, arg2);
1473 }
1474}
1475
1476void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1477{
1478 if (TCG_TARGET_HAS_rem_i64) {
1479 tcg_gen_op3_i64(INDEX_op_rem_i64, ret, arg1, arg2);
1480 } else if (TCG_TARGET_HAS_div_i64) {
1481 TCGv_i64 t0 = tcg_temp_new_i64();
1482 tcg_gen_op3_i64(INDEX_op_div_i64, t0, arg1, arg2);
1483 tcg_gen_mul_i64(t0, t0, arg2);
1484 tcg_gen_sub_i64(ret, arg1, t0);
1485 tcg_temp_free_i64(t0);
1486 } else if (TCG_TARGET_HAS_div2_i64) {
1487 TCGv_i64 t0 = tcg_temp_new_i64();
1488 tcg_gen_sari_i64(t0, arg1, 63);
1489 tcg_gen_op5_i64(INDEX_op_div2_i64, t0, ret, arg1, t0, arg2);
1490 tcg_temp_free_i64(t0);
1491 } else {
1492 gen_helper_rem_i64(ret, arg1, arg2);
1493 }
1494}
1495
1496void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1497{
1498 if (TCG_TARGET_HAS_div_i64) {
1499 tcg_gen_op3_i64(INDEX_op_divu_i64, ret, arg1, arg2);
1500 } else if (TCG_TARGET_HAS_div2_i64) {
1501 TCGv_i64 t0 = tcg_temp_new_i64();
1502 tcg_gen_movi_i64(t0, 0);
1503 tcg_gen_op5_i64(INDEX_op_divu2_i64, ret, t0, arg1, t0, arg2);
1504 tcg_temp_free_i64(t0);
1505 } else {
1506 gen_helper_divu_i64(ret, arg1, arg2);
1507 }
1508}
1509
1510void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1511{
1512 if (TCG_TARGET_HAS_rem_i64) {
1513 tcg_gen_op3_i64(INDEX_op_remu_i64, ret, arg1, arg2);
1514 } else if (TCG_TARGET_HAS_div_i64) {
1515 TCGv_i64 t0 = tcg_temp_new_i64();
1516 tcg_gen_op3_i64(INDEX_op_divu_i64, t0, arg1, arg2);
1517 tcg_gen_mul_i64(t0, t0, arg2);
1518 tcg_gen_sub_i64(ret, arg1, t0);
1519 tcg_temp_free_i64(t0);
1520 } else if (TCG_TARGET_HAS_div2_i64) {
1521 TCGv_i64 t0 = tcg_temp_new_i64();
1522 tcg_gen_movi_i64(t0, 0);
1523 tcg_gen_op5_i64(INDEX_op_divu2_i64, t0, ret, arg1, t0, arg2);
1524 tcg_temp_free_i64(t0);
1525 } else {
1526 gen_helper_remu_i64(ret, arg1, arg2);
1527 }
1528}
1529
1530void tcg_gen_ext8s_i64(TCGv_i64 ret, TCGv_i64 arg)
1531{
3a13c3f3
RH
1532 if (TCG_TARGET_REG_BITS == 32) {
1533 tcg_gen_ext8s_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1534 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
1535 } else if (TCG_TARGET_HAS_ext8s_i64) {
951c6300
RH
1536 tcg_gen_op2_i64(INDEX_op_ext8s_i64, ret, arg);
1537 } else {
1538 tcg_gen_shli_i64(ret, arg, 56);
1539 tcg_gen_sari_i64(ret, ret, 56);
1540 }
951c6300
RH
1541}
1542
1543void tcg_gen_ext16s_i64(TCGv_i64 ret, TCGv_i64 arg)
1544{
3a13c3f3
RH
1545 if (TCG_TARGET_REG_BITS == 32) {
1546 tcg_gen_ext16s_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1547 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
1548 } else if (TCG_TARGET_HAS_ext16s_i64) {
951c6300
RH
1549 tcg_gen_op2_i64(INDEX_op_ext16s_i64, ret, arg);
1550 } else {
1551 tcg_gen_shli_i64(ret, arg, 48);
1552 tcg_gen_sari_i64(ret, ret, 48);
1553 }
951c6300
RH
1554}
1555
1556void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg)
1557{
3a13c3f3
RH
1558 if (TCG_TARGET_REG_BITS == 32) {
1559 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1560 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
1561 } else if (TCG_TARGET_HAS_ext32s_i64) {
951c6300
RH
1562 tcg_gen_op2_i64(INDEX_op_ext32s_i64, ret, arg);
1563 } else {
1564 tcg_gen_shli_i64(ret, arg, 32);
1565 tcg_gen_sari_i64(ret, ret, 32);
1566 }
951c6300
RH
1567}
1568
1569void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg)
1570{
3a13c3f3
RH
1571 if (TCG_TARGET_REG_BITS == 32) {
1572 tcg_gen_ext8u_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1573 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1574 } else if (TCG_TARGET_HAS_ext8u_i64) {
951c6300
RH
1575 tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg);
1576 } else {
1577 tcg_gen_andi_i64(ret, arg, 0xffu);
1578 }
951c6300
RH
1579}
1580
1581void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg)
1582{
3a13c3f3
RH
1583 if (TCG_TARGET_REG_BITS == 32) {
1584 tcg_gen_ext16u_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1585 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1586 } else if (TCG_TARGET_HAS_ext16u_i64) {
951c6300
RH
1587 tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg);
1588 } else {
1589 tcg_gen_andi_i64(ret, arg, 0xffffu);
1590 }
951c6300
RH
1591}
1592
1593void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg)
1594{
3a13c3f3
RH
1595 if (TCG_TARGET_REG_BITS == 32) {
1596 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1597 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1598 } else if (TCG_TARGET_HAS_ext32u_i64) {
951c6300
RH
1599 tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg);
1600 } else {
1601 tcg_gen_andi_i64(ret, arg, 0xffffffffu);
1602 }
951c6300
RH
1603}
1604
1605/* Note: we assume the six high bytes are set to zero */
1606void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg)
1607{
3a13c3f3
RH
1608 if (TCG_TARGET_REG_BITS == 32) {
1609 tcg_gen_bswap16_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1610 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1611 } else if (TCG_TARGET_HAS_bswap16_i64) {
951c6300
RH
1612 tcg_gen_op2_i64(INDEX_op_bswap16_i64, ret, arg);
1613 } else {
1614 TCGv_i64 t0 = tcg_temp_new_i64();
1615
1616 tcg_gen_ext8u_i64(t0, arg);
1617 tcg_gen_shli_i64(t0, t0, 8);
1618 tcg_gen_shri_i64(ret, arg, 8);
1619 tcg_gen_or_i64(ret, ret, t0);
1620 tcg_temp_free_i64(t0);
1621 }
951c6300
RH
1622}
1623
1624/* Note: we assume the four high bytes are set to zero */
1625void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg)
1626{
3a13c3f3
RH
1627 if (TCG_TARGET_REG_BITS == 32) {
1628 tcg_gen_bswap32_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1629 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1630 } else if (TCG_TARGET_HAS_bswap32_i64) {
951c6300
RH
1631 tcg_gen_op2_i64(INDEX_op_bswap32_i64, ret, arg);
1632 } else {
1633 TCGv_i64 t0, t1;
1634 t0 = tcg_temp_new_i64();
1635 t1 = tcg_temp_new_i64();
1636
1637 tcg_gen_shli_i64(t0, arg, 24);
1638 tcg_gen_ext32u_i64(t0, t0);
1639
1640 tcg_gen_andi_i64(t1, arg, 0x0000ff00);
1641 tcg_gen_shli_i64(t1, t1, 8);
1642 tcg_gen_or_i64(t0, t0, t1);
1643
1644 tcg_gen_shri_i64(t1, arg, 8);
1645 tcg_gen_andi_i64(t1, t1, 0x0000ff00);
1646 tcg_gen_or_i64(t0, t0, t1);
1647
1648 tcg_gen_shri_i64(t1, arg, 24);
1649 tcg_gen_or_i64(ret, t0, t1);
1650 tcg_temp_free_i64(t0);
1651 tcg_temp_free_i64(t1);
1652 }
951c6300
RH
1653}
1654
1655void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg)
1656{
3a13c3f3
RH
1657 if (TCG_TARGET_REG_BITS == 32) {
1658 TCGv_i32 t0, t1;
1659 t0 = tcg_temp_new_i32();
1660 t1 = tcg_temp_new_i32();
951c6300 1661
3a13c3f3
RH
1662 tcg_gen_bswap32_i32(t0, TCGV_LOW(arg));
1663 tcg_gen_bswap32_i32(t1, TCGV_HIGH(arg));
1664 tcg_gen_mov_i32(TCGV_LOW(ret), t1);
1665 tcg_gen_mov_i32(TCGV_HIGH(ret), t0);
1666 tcg_temp_free_i32(t0);
1667 tcg_temp_free_i32(t1);
1668 } else if (TCG_TARGET_HAS_bswap64_i64) {
951c6300
RH
1669 tcg_gen_op2_i64(INDEX_op_bswap64_i64, ret, arg);
1670 } else {
1671 TCGv_i64 t0 = tcg_temp_new_i64();
1672 TCGv_i64 t1 = tcg_temp_new_i64();
1673
1674 tcg_gen_shli_i64(t0, arg, 56);
1675
1676 tcg_gen_andi_i64(t1, arg, 0x0000ff00);
1677 tcg_gen_shli_i64(t1, t1, 40);
1678 tcg_gen_or_i64(t0, t0, t1);
1679
1680 tcg_gen_andi_i64(t1, arg, 0x00ff0000);
1681 tcg_gen_shli_i64(t1, t1, 24);
1682 tcg_gen_or_i64(t0, t0, t1);
1683
1684 tcg_gen_andi_i64(t1, arg, 0xff000000);
1685 tcg_gen_shli_i64(t1, t1, 8);
1686 tcg_gen_or_i64(t0, t0, t1);
1687
1688 tcg_gen_shri_i64(t1, arg, 8);
1689 tcg_gen_andi_i64(t1, t1, 0xff000000);
1690 tcg_gen_or_i64(t0, t0, t1);
1691
1692 tcg_gen_shri_i64(t1, arg, 24);
1693 tcg_gen_andi_i64(t1, t1, 0x00ff0000);
1694 tcg_gen_or_i64(t0, t0, t1);
1695
1696 tcg_gen_shri_i64(t1, arg, 40);
1697 tcg_gen_andi_i64(t1, t1, 0x0000ff00);
1698 tcg_gen_or_i64(t0, t0, t1);
1699
1700 tcg_gen_shri_i64(t1, arg, 56);
1701 tcg_gen_or_i64(ret, t0, t1);
1702 tcg_temp_free_i64(t0);
1703 tcg_temp_free_i64(t1);
1704 }
951c6300
RH
1705}
1706
1707void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg)
1708{
3a13c3f3
RH
1709 if (TCG_TARGET_REG_BITS == 32) {
1710 tcg_gen_not_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1711 tcg_gen_not_i32(TCGV_HIGH(ret), TCGV_HIGH(arg));
1712 } else if (TCG_TARGET_HAS_not_i64) {
951c6300
RH
1713 tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg);
1714 } else {
1715 tcg_gen_xori_i64(ret, arg, -1);
1716 }
951c6300
RH
1717}
1718
1719void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1720{
3a13c3f3
RH
1721 if (TCG_TARGET_REG_BITS == 32) {
1722 tcg_gen_andc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1723 tcg_gen_andc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1724 } else if (TCG_TARGET_HAS_andc_i64) {
951c6300
RH
1725 tcg_gen_op3_i64(INDEX_op_andc_i64, ret, arg1, arg2);
1726 } else {
1727 TCGv_i64 t0 = tcg_temp_new_i64();
1728 tcg_gen_not_i64(t0, arg2);
1729 tcg_gen_and_i64(ret, arg1, t0);
1730 tcg_temp_free_i64(t0);
1731 }
951c6300
RH
1732}
1733
1734void tcg_gen_eqv_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1735{
3a13c3f3
RH
1736 if (TCG_TARGET_REG_BITS == 32) {
1737 tcg_gen_eqv_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1738 tcg_gen_eqv_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1739 } else if (TCG_TARGET_HAS_eqv_i64) {
951c6300
RH
1740 tcg_gen_op3_i64(INDEX_op_eqv_i64, ret, arg1, arg2);
1741 } else {
1742 tcg_gen_xor_i64(ret, arg1, arg2);
1743 tcg_gen_not_i64(ret, ret);
1744 }
951c6300
RH
1745}
1746
1747void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1748{
3a13c3f3
RH
1749 if (TCG_TARGET_REG_BITS == 32) {
1750 tcg_gen_nand_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1751 tcg_gen_nand_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1752 } else if (TCG_TARGET_HAS_nand_i64) {
951c6300
RH
1753 tcg_gen_op3_i64(INDEX_op_nand_i64, ret, arg1, arg2);
1754 } else {
1755 tcg_gen_and_i64(ret, arg1, arg2);
1756 tcg_gen_not_i64(ret, ret);
1757 }
951c6300
RH
1758}
1759
1760void tcg_gen_nor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1761{
3a13c3f3
RH
1762 if (TCG_TARGET_REG_BITS == 32) {
1763 tcg_gen_nor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1764 tcg_gen_nor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1765 } else if (TCG_TARGET_HAS_nor_i64) {
951c6300
RH
1766 tcg_gen_op3_i64(INDEX_op_nor_i64, ret, arg1, arg2);
1767 } else {
1768 tcg_gen_or_i64(ret, arg1, arg2);
1769 tcg_gen_not_i64(ret, ret);
1770 }
951c6300
RH
1771}
1772
1773void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1774{
3a13c3f3
RH
1775 if (TCG_TARGET_REG_BITS == 32) {
1776 tcg_gen_orc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1777 tcg_gen_orc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1778 } else if (TCG_TARGET_HAS_orc_i64) {
951c6300
RH
1779 tcg_gen_op3_i64(INDEX_op_orc_i64, ret, arg1, arg2);
1780 } else {
1781 TCGv_i64 t0 = tcg_temp_new_i64();
1782 tcg_gen_not_i64(t0, arg2);
1783 tcg_gen_or_i64(ret, arg1, t0);
1784 tcg_temp_free_i64(t0);
1785 }
951c6300
RH
1786}
1787
0e28d006
RH
1788void tcg_gen_clz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1789{
1790 if (TCG_TARGET_HAS_clz_i64) {
1791 tcg_gen_op3_i64(INDEX_op_clz_i64, ret, arg1, arg2);
1792 } else {
1793 gen_helper_clz_i64(ret, arg1, arg2);
1794 }
1795}
1796
1797void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
1798{
1799 if (TCG_TARGET_REG_BITS == 32
1800 && TCG_TARGET_HAS_clz_i32
1801 && arg2 <= 0xffffffffu) {
1802 TCGv_i32 t = tcg_const_i32((uint32_t)arg2 - 32);
1803 tcg_gen_clz_i32(t, TCGV_LOW(arg1), t);
1804 tcg_gen_addi_i32(t, t, 32);
1805 tcg_gen_clz_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), t);
1806 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1807 tcg_temp_free_i32(t);
1808 } else {
1809 TCGv_i64 t = tcg_const_i64(arg2);
1810 tcg_gen_clz_i64(ret, arg1, t);
1811 tcg_temp_free_i64(t);
1812 }
1813}
1814
1815void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1816{
1817 if (TCG_TARGET_HAS_ctz_i64) {
1818 tcg_gen_op3_i64(INDEX_op_ctz_i64, ret, arg1, arg2);
14e99210
RH
1819 } else if (TCG_TARGET_HAS_ctpop_i64 || TCG_TARGET_HAS_clz_i64) {
1820 TCGv_i64 z, t = tcg_temp_new_i64();
1821
1822 if (TCG_TARGET_HAS_ctpop_i64) {
1823 tcg_gen_subi_i64(t, arg1, 1);
1824 tcg_gen_andc_i64(t, t, arg1);
1825 tcg_gen_ctpop_i64(t, t);
1826 } else {
1827 /* Since all non-x86 hosts have clz(0) == 64, don't fight it. */
1828 tcg_gen_neg_i64(t, arg1);
1829 tcg_gen_and_i64(t, t, arg1);
1830 tcg_gen_clzi_i64(t, t, 64);
1831 tcg_gen_xori_i64(t, t, 63);
1832 }
1833 z = tcg_const_i64(0);
1834 tcg_gen_movcond_i64(TCG_COND_EQ, ret, arg1, z, arg2, t);
1835 tcg_temp_free_i64(t);
1836 tcg_temp_free_i64(z);
0e28d006
RH
1837 } else {
1838 gen_helper_ctz_i64(ret, arg1, arg2);
1839 }
1840}
1841
1842void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
1843{
1844 if (TCG_TARGET_REG_BITS == 32
1845 && TCG_TARGET_HAS_ctz_i32
1846 && arg2 <= 0xffffffffu) {
1847 TCGv_i32 t32 = tcg_const_i32((uint32_t)arg2 - 32);
1848 tcg_gen_ctz_i32(t32, TCGV_HIGH(arg1), t32);
1849 tcg_gen_addi_i32(t32, t32, 32);
1850 tcg_gen_ctz_i32(TCGV_LOW(ret), TCGV_LOW(arg1), t32);
1851 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1852 tcg_temp_free_i32(t32);
14e99210
RH
1853 } else if (!TCG_TARGET_HAS_ctz_i64
1854 && TCG_TARGET_HAS_ctpop_i64
1855 && arg2 == 64) {
1856 /* This equivalence has the advantage of not requiring a fixup. */
1857 TCGv_i64 t = tcg_temp_new_i64();
1858 tcg_gen_subi_i64(t, arg1, 1);
1859 tcg_gen_andc_i64(t, t, arg1);
1860 tcg_gen_ctpop_i64(ret, t);
1861 tcg_temp_free_i64(t);
0e28d006
RH
1862 } else {
1863 TCGv_i64 t64 = tcg_const_i64(arg2);
1864 tcg_gen_ctz_i64(ret, arg1, t64);
1865 tcg_temp_free_i64(t64);
1866 }
1867}
1868
086920c2
RH
1869void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg)
1870{
1871 if (TCG_TARGET_HAS_clz_i64 || TCG_TARGET_HAS_clz_i32) {
1872 TCGv_i64 t = tcg_temp_new_i64();
1873 tcg_gen_sari_i64(t, arg, 63);
1874 tcg_gen_xor_i64(t, t, arg);
1875 tcg_gen_clzi_i64(t, t, 64);
1876 tcg_gen_subi_i64(ret, t, 1);
1877 tcg_temp_free_i64(t);
1878 } else {
1879 gen_helper_clrsb_i64(ret, arg);
1880 }
1881}
1882
a768e4e9
RH
1883void tcg_gen_ctpop_i64(TCGv_i64 ret, TCGv_i64 arg1)
1884{
1885 if (TCG_TARGET_HAS_ctpop_i64) {
1886 tcg_gen_op2_i64(INDEX_op_ctpop_i64, ret, arg1);
1887 } else if (TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_ctpop_i32) {
1888 tcg_gen_ctpop_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
1889 tcg_gen_ctpop_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
1890 tcg_gen_add_i32(TCGV_LOW(ret), TCGV_LOW(ret), TCGV_HIGH(ret));
1891 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1892 } else {
1893 gen_helper_ctpop_i64(ret, arg1);
1894 }
1895}
1896
951c6300
RH
1897void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1898{
1899 if (TCG_TARGET_HAS_rot_i64) {
1900 tcg_gen_op3_i64(INDEX_op_rotl_i64, ret, arg1, arg2);
1901 } else {
1902 TCGv_i64 t0, t1;
1903 t0 = tcg_temp_new_i64();
1904 t1 = tcg_temp_new_i64();
1905 tcg_gen_shl_i64(t0, arg1, arg2);
1906 tcg_gen_subfi_i64(t1, 64, arg2);
1907 tcg_gen_shr_i64(t1, arg1, t1);
1908 tcg_gen_or_i64(ret, t0, t1);
1909 tcg_temp_free_i64(t0);
1910 tcg_temp_free_i64(t1);
1911 }
1912}
1913
1914void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
1915{
1916 tcg_debug_assert(arg2 < 64);
1917 /* some cases can be optimized here */
1918 if (arg2 == 0) {
1919 tcg_gen_mov_i64(ret, arg1);
1920 } else if (TCG_TARGET_HAS_rot_i64) {
1921 TCGv_i64 t0 = tcg_const_i64(arg2);
1922 tcg_gen_rotl_i64(ret, arg1, t0);
1923 tcg_temp_free_i64(t0);
1924 } else {
1925 TCGv_i64 t0, t1;
1926 t0 = tcg_temp_new_i64();
1927 t1 = tcg_temp_new_i64();
1928 tcg_gen_shli_i64(t0, arg1, arg2);
1929 tcg_gen_shri_i64(t1, arg1, 64 - arg2);
1930 tcg_gen_or_i64(ret, t0, t1);
1931 tcg_temp_free_i64(t0);
1932 tcg_temp_free_i64(t1);
1933 }
1934}
1935
1936void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1937{
1938 if (TCG_TARGET_HAS_rot_i64) {
1939 tcg_gen_op3_i64(INDEX_op_rotr_i64, ret, arg1, arg2);
1940 } else {
1941 TCGv_i64 t0, t1;
1942 t0 = tcg_temp_new_i64();
1943 t1 = tcg_temp_new_i64();
1944 tcg_gen_shr_i64(t0, arg1, arg2);
1945 tcg_gen_subfi_i64(t1, 64, arg2);
1946 tcg_gen_shl_i64(t1, arg1, t1);
1947 tcg_gen_or_i64(ret, t0, t1);
1948 tcg_temp_free_i64(t0);
1949 tcg_temp_free_i64(t1);
1950 }
1951}
1952
1953void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
1954{
1955 tcg_debug_assert(arg2 < 64);
1956 /* some cases can be optimized here */
1957 if (arg2 == 0) {
1958 tcg_gen_mov_i64(ret, arg1);
1959 } else {
1960 tcg_gen_rotli_i64(ret, arg1, 64 - arg2);
1961 }
1962}
1963
1964void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
1965 unsigned int ofs, unsigned int len)
1966{
1967 uint64_t mask;
1968 TCGv_i64 t1;
1969
1970 tcg_debug_assert(ofs < 64);
0d0d309d 1971 tcg_debug_assert(len > 0);
951c6300
RH
1972 tcg_debug_assert(len <= 64);
1973 tcg_debug_assert(ofs + len <= 64);
1974
0d0d309d 1975 if (len == 64) {
951c6300
RH
1976 tcg_gen_mov_i64(ret, arg2);
1977 return;
1978 }
1979 if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(ofs, len)) {
1980 tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, arg1, arg2, ofs, len);
1981 return;
1982 }
1983
3a13c3f3
RH
1984 if (TCG_TARGET_REG_BITS == 32) {
1985 if (ofs >= 32) {
1986 tcg_gen_deposit_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1),
1987 TCGV_LOW(arg2), ofs - 32, len);
1988 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
1989 return;
1990 }
1991 if (ofs + len <= 32) {
1992 tcg_gen_deposit_i32(TCGV_LOW(ret), TCGV_LOW(arg1),
1993 TCGV_LOW(arg2), ofs, len);
1994 tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
1995 return;
1996 }
951c6300 1997 }
951c6300
RH
1998
1999 mask = (1ull << len) - 1;
2000 t1 = tcg_temp_new_i64();
2001
2002 if (ofs + len < 64) {
2003 tcg_gen_andi_i64(t1, arg2, mask);
2004 tcg_gen_shli_i64(t1, t1, ofs);
2005 } else {
2006 tcg_gen_shli_i64(t1, arg2, ofs);
2007 }
2008 tcg_gen_andi_i64(ret, arg1, ~(mask << ofs));
2009 tcg_gen_or_i64(ret, ret, t1);
2010
2011 tcg_temp_free_i64(t1);
2012}
2013
07cc68d5
RH
2014void tcg_gen_deposit_z_i64(TCGv_i64 ret, TCGv_i64 arg,
2015 unsigned int ofs, unsigned int len)
2016{
2017 tcg_debug_assert(ofs < 64);
2018 tcg_debug_assert(len > 0);
2019 tcg_debug_assert(len <= 64);
2020 tcg_debug_assert(ofs + len <= 64);
2021
2022 if (ofs + len == 64) {
2023 tcg_gen_shli_i64(ret, arg, ofs);
2024 } else if (ofs == 0) {
2025 tcg_gen_andi_i64(ret, arg, (1ull << len) - 1);
2026 } else if (TCG_TARGET_HAS_deposit_i64
2027 && TCG_TARGET_deposit_i64_valid(ofs, len)) {
2028 TCGv_i64 zero = tcg_const_i64(0);
2029 tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, zero, arg, ofs, len);
2030 tcg_temp_free_i64(zero);
2031 } else {
2032 if (TCG_TARGET_REG_BITS == 32) {
2033 if (ofs >= 32) {
2034 tcg_gen_deposit_z_i32(TCGV_HIGH(ret), TCGV_LOW(arg),
2035 ofs - 32, len);
2036 tcg_gen_movi_i32(TCGV_LOW(ret), 0);
2037 return;
2038 }
2039 if (ofs + len <= 32) {
2040 tcg_gen_deposit_z_i32(TCGV_LOW(ret), TCGV_LOW(arg), ofs, len);
2041 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
2042 return;
2043 }
2044 }
2045 /* To help two-operand hosts we prefer to zero-extend first,
2046 which allows ARG to stay live. */
2047 switch (len) {
2048 case 32:
2049 if (TCG_TARGET_HAS_ext32u_i64) {
2050 tcg_gen_ext32u_i64(ret, arg);
2051 tcg_gen_shli_i64(ret, ret, ofs);
2052 return;
2053 }
2054 break;
2055 case 16:
2056 if (TCG_TARGET_HAS_ext16u_i64) {
2057 tcg_gen_ext16u_i64(ret, arg);
2058 tcg_gen_shli_i64(ret, ret, ofs);
2059 return;
2060 }
2061 break;
2062 case 8:
2063 if (TCG_TARGET_HAS_ext8u_i64) {
2064 tcg_gen_ext8u_i64(ret, arg);
2065 tcg_gen_shli_i64(ret, ret, ofs);
2066 return;
2067 }
2068 break;
2069 }
2070 /* Otherwise prefer zero-extension over AND for code size. */
2071 switch (ofs + len) {
2072 case 32:
2073 if (TCG_TARGET_HAS_ext32u_i64) {
2074 tcg_gen_shli_i64(ret, arg, ofs);
2075 tcg_gen_ext32u_i64(ret, ret);
2076 return;
2077 }
2078 break;
2079 case 16:
2080 if (TCG_TARGET_HAS_ext16u_i64) {
2081 tcg_gen_shli_i64(ret, arg, ofs);
2082 tcg_gen_ext16u_i64(ret, ret);
2083 return;
2084 }
2085 break;
2086 case 8:
2087 if (TCG_TARGET_HAS_ext8u_i64) {
2088 tcg_gen_shli_i64(ret, arg, ofs);
2089 tcg_gen_ext8u_i64(ret, ret);
2090 return;
2091 }
2092 break;
2093 }
2094 tcg_gen_andi_i64(ret, arg, (1ull << len) - 1);
2095 tcg_gen_shli_i64(ret, ret, ofs);
2096 }
2097}
2098
7ec8bab3
RH
2099void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
2100 unsigned int ofs, unsigned int len)
2101{
2102 tcg_debug_assert(ofs < 64);
2103 tcg_debug_assert(len > 0);
2104 tcg_debug_assert(len <= 64);
2105 tcg_debug_assert(ofs + len <= 64);
2106
2107 /* Canonicalize certain special cases, even if extract is supported. */
2108 if (ofs + len == 64) {
2109 tcg_gen_shri_i64(ret, arg, 64 - len);
2110 return;
2111 }
2112 if (ofs == 0) {
2113 tcg_gen_andi_i64(ret, arg, (1ull << len) - 1);
2114 return;
2115 }
2116
2117 if (TCG_TARGET_REG_BITS == 32) {
2118 /* Look for a 32-bit extract within one of the two words. */
2119 if (ofs >= 32) {
2120 tcg_gen_extract_i32(TCGV_LOW(ret), TCGV_HIGH(arg), ofs - 32, len);
2121 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
2122 return;
2123 }
2124 if (ofs + len <= 32) {
2125 tcg_gen_extract_i32(TCGV_LOW(ret), TCGV_LOW(arg), ofs, len);
2126 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
2127 return;
2128 }
2129 /* The field is split across two words. One double-word
2130 shift is better than two double-word shifts. */
2131 goto do_shift_and;
2132 }
2133
2134 if (TCG_TARGET_HAS_extract_i64
2135 && TCG_TARGET_extract_i64_valid(ofs, len)) {
2136 tcg_gen_op4ii_i64(INDEX_op_extract_i64, ret, arg, ofs, len);
2137 return;
2138 }
2139
2140 /* Assume that zero-extension, if available, is cheaper than a shift. */
2141 switch (ofs + len) {
2142 case 32:
2143 if (TCG_TARGET_HAS_ext32u_i64) {
2144 tcg_gen_ext32u_i64(ret, arg);
2145 tcg_gen_shri_i64(ret, ret, ofs);
2146 return;
2147 }
2148 break;
2149 case 16:
2150 if (TCG_TARGET_HAS_ext16u_i64) {
2151 tcg_gen_ext16u_i64(ret, arg);
2152 tcg_gen_shri_i64(ret, ret, ofs);
2153 return;
2154 }
2155 break;
2156 case 8:
2157 if (TCG_TARGET_HAS_ext8u_i64) {
2158 tcg_gen_ext8u_i64(ret, arg);
2159 tcg_gen_shri_i64(ret, ret, ofs);
2160 return;
2161 }
2162 break;
2163 }
2164
2165 /* ??? Ideally we'd know what values are available for immediate AND.
2166 Assume that 8 bits are available, plus the special cases of 16 and 32,
2167 so that we get ext8u, ext16u, and ext32u. */
2168 switch (len) {
2169 case 1 ... 8: case 16: case 32:
2170 do_shift_and:
2171 tcg_gen_shri_i64(ret, arg, ofs);
2172 tcg_gen_andi_i64(ret, ret, (1ull << len) - 1);
2173 break;
2174 default:
2175 tcg_gen_shli_i64(ret, arg, 64 - len - ofs);
2176 tcg_gen_shri_i64(ret, ret, 64 - len);
2177 break;
2178 }
2179}
2180
2181void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg,
2182 unsigned int ofs, unsigned int len)
2183{
2184 tcg_debug_assert(ofs < 64);
2185 tcg_debug_assert(len > 0);
2186 tcg_debug_assert(len <= 64);
2187 tcg_debug_assert(ofs + len <= 64);
2188
2189 /* Canonicalize certain special cases, even if sextract is supported. */
2190 if (ofs + len == 64) {
2191 tcg_gen_sari_i64(ret, arg, 64 - len);
2192 return;
2193 }
2194 if (ofs == 0) {
2195 switch (len) {
2196 case 32:
2197 tcg_gen_ext32s_i64(ret, arg);
2198 return;
2199 case 16:
2200 tcg_gen_ext16s_i64(ret, arg);
2201 return;
2202 case 8:
2203 tcg_gen_ext8s_i64(ret, arg);
2204 return;
2205 }
2206 }
2207
2208 if (TCG_TARGET_REG_BITS == 32) {
2209 /* Look for a 32-bit extract within one of the two words. */
2210 if (ofs >= 32) {
2211 tcg_gen_sextract_i32(TCGV_LOW(ret), TCGV_HIGH(arg), ofs - 32, len);
2212 } else if (ofs + len <= 32) {
2213 tcg_gen_sextract_i32(TCGV_LOW(ret), TCGV_LOW(arg), ofs, len);
2214 } else if (ofs == 0) {
2215 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
2216 tcg_gen_sextract_i32(TCGV_HIGH(ret), TCGV_HIGH(arg), 0, len - 32);
2217 return;
2218 } else if (len > 32) {
2219 TCGv_i32 t = tcg_temp_new_i32();
2220 /* Extract the bits for the high word normally. */
2221 tcg_gen_sextract_i32(t, TCGV_HIGH(arg), ofs + 32, len - 32);
2222 /* Shift the field down for the low part. */
2223 tcg_gen_shri_i64(ret, arg, ofs);
2224 /* Overwrite the shift into the high part. */
2225 tcg_gen_mov_i32(TCGV_HIGH(ret), t);
2226 tcg_temp_free_i32(t);
2227 return;
2228 } else {
2229 /* Shift the field down for the low part, such that the
2230 field sits at the MSB. */
2231 tcg_gen_shri_i64(ret, arg, ofs + len - 32);
2232 /* Shift the field down from the MSB, sign extending. */
2233 tcg_gen_sari_i32(TCGV_LOW(ret), TCGV_LOW(ret), 32 - len);
2234 }
2235 /* Sign-extend the field from 32 bits. */
2236 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
2237 return;
2238 }
2239
2240 if (TCG_TARGET_HAS_sextract_i64
2241 && TCG_TARGET_extract_i64_valid(ofs, len)) {
2242 tcg_gen_op4ii_i64(INDEX_op_sextract_i64, ret, arg, ofs, len);
2243 return;
2244 }
2245
2246 /* Assume that sign-extension, if available, is cheaper than a shift. */
2247 switch (ofs + len) {
2248 case 32:
2249 if (TCG_TARGET_HAS_ext32s_i64) {
2250 tcg_gen_ext32s_i64(ret, arg);
2251 tcg_gen_sari_i64(ret, ret, ofs);
2252 return;
2253 }
2254 break;
2255 case 16:
2256 if (TCG_TARGET_HAS_ext16s_i64) {
2257 tcg_gen_ext16s_i64(ret, arg);
2258 tcg_gen_sari_i64(ret, ret, ofs);
2259 return;
2260 }
2261 break;
2262 case 8:
2263 if (TCG_TARGET_HAS_ext8s_i64) {
2264 tcg_gen_ext8s_i64(ret, arg);
2265 tcg_gen_sari_i64(ret, ret, ofs);
2266 return;
2267 }
2268 break;
2269 }
2270 switch (len) {
2271 case 32:
2272 if (TCG_TARGET_HAS_ext32s_i64) {
2273 tcg_gen_shri_i64(ret, arg, ofs);
2274 tcg_gen_ext32s_i64(ret, ret);
2275 return;
2276 }
2277 break;
2278 case 16:
2279 if (TCG_TARGET_HAS_ext16s_i64) {
2280 tcg_gen_shri_i64(ret, arg, ofs);
2281 tcg_gen_ext16s_i64(ret, ret);
2282 return;
2283 }
2284 break;
2285 case 8:
2286 if (TCG_TARGET_HAS_ext8s_i64) {
2287 tcg_gen_shri_i64(ret, arg, ofs);
2288 tcg_gen_ext8s_i64(ret, ret);
2289 return;
2290 }
2291 break;
2292 }
2293 tcg_gen_shli_i64(ret, arg, 64 - len - ofs);
2294 tcg_gen_sari_i64(ret, ret, 64 - len);
2295}
2296
951c6300
RH
2297void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
2298 TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2)
2299{
37ed3bf1
RH
2300 if (cond == TCG_COND_ALWAYS) {
2301 tcg_gen_mov_i64(ret, v1);
2302 } else if (cond == TCG_COND_NEVER) {
2303 tcg_gen_mov_i64(ret, v2);
2304 } else if (TCG_TARGET_REG_BITS == 32) {
3a13c3f3
RH
2305 TCGv_i32 t0 = tcg_temp_new_i32();
2306 TCGv_i32 t1 = tcg_temp_new_i32();
2307 tcg_gen_op6i_i32(INDEX_op_setcond2_i32, t0,
2308 TCGV_LOW(c1), TCGV_HIGH(c1),
2309 TCGV_LOW(c2), TCGV_HIGH(c2), cond);
2310
2311 if (TCG_TARGET_HAS_movcond_i32) {
2312 tcg_gen_movi_i32(t1, 0);
2313 tcg_gen_movcond_i32(TCG_COND_NE, TCGV_LOW(ret), t0, t1,
2314 TCGV_LOW(v1), TCGV_LOW(v2));
2315 tcg_gen_movcond_i32(TCG_COND_NE, TCGV_HIGH(ret), t0, t1,
2316 TCGV_HIGH(v1), TCGV_HIGH(v2));
2317 } else {
2318 tcg_gen_neg_i32(t0, t0);
951c6300 2319
3a13c3f3
RH
2320 tcg_gen_and_i32(t1, TCGV_LOW(v1), t0);
2321 tcg_gen_andc_i32(TCGV_LOW(ret), TCGV_LOW(v2), t0);
2322 tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t1);
951c6300 2323
3a13c3f3
RH
2324 tcg_gen_and_i32(t1, TCGV_HIGH(v1), t0);
2325 tcg_gen_andc_i32(TCGV_HIGH(ret), TCGV_HIGH(v2), t0);
2326 tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t1);
2327 }
2328 tcg_temp_free_i32(t0);
2329 tcg_temp_free_i32(t1);
2330 } else if (TCG_TARGET_HAS_movcond_i64) {
951c6300
RH
2331 tcg_gen_op6i_i64(INDEX_op_movcond_i64, ret, c1, c2, v1, v2, cond);
2332 } else {
2333 TCGv_i64 t0 = tcg_temp_new_i64();
2334 TCGv_i64 t1 = tcg_temp_new_i64();
2335 tcg_gen_setcond_i64(cond, t0, c1, c2);
2336 tcg_gen_neg_i64(t0, t0);
2337 tcg_gen_and_i64(t1, v1, t0);
2338 tcg_gen_andc_i64(ret, v2, t0);
2339 tcg_gen_or_i64(ret, ret, t1);
2340 tcg_temp_free_i64(t0);
2341 tcg_temp_free_i64(t1);
2342 }
951c6300
RH
2343}
2344
2345void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
2346 TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
2347{
2348 if (TCG_TARGET_HAS_add2_i64) {
2349 tcg_gen_op6_i64(INDEX_op_add2_i64, rl, rh, al, ah, bl, bh);
951c6300
RH
2350 } else {
2351 TCGv_i64 t0 = tcg_temp_new_i64();
2352 TCGv_i64 t1 = tcg_temp_new_i64();
2353 tcg_gen_add_i64(t0, al, bl);
2354 tcg_gen_setcond_i64(TCG_COND_LTU, t1, t0, al);
2355 tcg_gen_add_i64(rh, ah, bh);
2356 tcg_gen_add_i64(rh, rh, t1);
2357 tcg_gen_mov_i64(rl, t0);
2358 tcg_temp_free_i64(t0);
2359 tcg_temp_free_i64(t1);
2360 }
2361}
2362
2363void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
2364 TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
2365{
2366 if (TCG_TARGET_HAS_sub2_i64) {
2367 tcg_gen_op6_i64(INDEX_op_sub2_i64, rl, rh, al, ah, bl, bh);
951c6300
RH
2368 } else {
2369 TCGv_i64 t0 = tcg_temp_new_i64();
2370 TCGv_i64 t1 = tcg_temp_new_i64();
2371 tcg_gen_sub_i64(t0, al, bl);
2372 tcg_gen_setcond_i64(TCG_COND_LTU, t1, al, bl);
2373 tcg_gen_sub_i64(rh, ah, bh);
2374 tcg_gen_sub_i64(rh, rh, t1);
2375 tcg_gen_mov_i64(rl, t0);
2376 tcg_temp_free_i64(t0);
2377 tcg_temp_free_i64(t1);
2378 }
2379}
2380
2381void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
2382{
2383 if (TCG_TARGET_HAS_mulu2_i64) {
2384 tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2);
951c6300
RH
2385 } else if (TCG_TARGET_HAS_muluh_i64) {
2386 TCGv_i64 t = tcg_temp_new_i64();
2387 tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2);
2388 tcg_gen_op3_i64(INDEX_op_muluh_i64, rh, arg1, arg2);
2389 tcg_gen_mov_i64(rl, t);
2390 tcg_temp_free_i64(t);
2391 } else {
2392 TCGv_i64 t0 = tcg_temp_new_i64();
2393 tcg_gen_mul_i64(t0, arg1, arg2);
2394 gen_helper_muluh_i64(rh, arg1, arg2);
2395 tcg_gen_mov_i64(rl, t0);
2396 tcg_temp_free_i64(t0);
2397 }
2398}
2399
2400void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
2401{
2402 if (TCG_TARGET_HAS_muls2_i64) {
2403 tcg_gen_op4_i64(INDEX_op_muls2_i64, rl, rh, arg1, arg2);
951c6300
RH
2404 } else if (TCG_TARGET_HAS_mulsh_i64) {
2405 TCGv_i64 t = tcg_temp_new_i64();
2406 tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2);
2407 tcg_gen_op3_i64(INDEX_op_mulsh_i64, rh, arg1, arg2);
2408 tcg_gen_mov_i64(rl, t);
2409 tcg_temp_free_i64(t);
2410 } else if (TCG_TARGET_HAS_mulu2_i64 || TCG_TARGET_HAS_muluh_i64) {
2411 TCGv_i64 t0 = tcg_temp_new_i64();
2412 TCGv_i64 t1 = tcg_temp_new_i64();
2413 TCGv_i64 t2 = tcg_temp_new_i64();
2414 TCGv_i64 t3 = tcg_temp_new_i64();
2415 tcg_gen_mulu2_i64(t0, t1, arg1, arg2);
2416 /* Adjust for negative inputs. */
2417 tcg_gen_sari_i64(t2, arg1, 63);
2418 tcg_gen_sari_i64(t3, arg2, 63);
2419 tcg_gen_and_i64(t2, t2, arg2);
2420 tcg_gen_and_i64(t3, t3, arg1);
2421 tcg_gen_sub_i64(rh, t1, t2);
2422 tcg_gen_sub_i64(rh, rh, t3);
2423 tcg_gen_mov_i64(rl, t0);
2424 tcg_temp_free_i64(t0);
2425 tcg_temp_free_i64(t1);
2426 tcg_temp_free_i64(t2);
2427 tcg_temp_free_i64(t3);
2428 } else {
2429 TCGv_i64 t0 = tcg_temp_new_i64();
2430 tcg_gen_mul_i64(t0, arg1, arg2);
2431 gen_helper_mulsh_i64(rh, arg1, arg2);
2432 tcg_gen_mov_i64(rl, t0);
2433 tcg_temp_free_i64(t0);
2434 }
2435}
2436
5087abfb
RH
2437void tcg_gen_mulsu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
2438{
2439 TCGv_i64 t0 = tcg_temp_new_i64();
2440 TCGv_i64 t1 = tcg_temp_new_i64();
2441 TCGv_i64 t2 = tcg_temp_new_i64();
2442 tcg_gen_mulu2_i64(t0, t1, arg1, arg2);
2443 /* Adjust for negative input for the signed arg1. */
2444 tcg_gen_sari_i64(t2, arg1, 63);
2445 tcg_gen_and_i64(t2, t2, arg2);
2446 tcg_gen_sub_i64(rh, t1, t2);
2447 tcg_gen_mov_i64(rl, t0);
2448 tcg_temp_free_i64(t0);
2449 tcg_temp_free_i64(t1);
2450 tcg_temp_free_i64(t2);
2451}
2452
951c6300
RH
2453/* Size changing operations. */
2454
609ad705 2455void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
951c6300 2456{
3a13c3f3 2457 if (TCG_TARGET_REG_BITS == 32) {
609ad705
RH
2458 tcg_gen_mov_i32(ret, TCGV_LOW(arg));
2459 } else if (TCG_TARGET_HAS_extrl_i64_i32) {
b7e8b17a 2460 tcg_gen_op2(INDEX_op_extrl_i64_i32,
ae8b75dc 2461 tcgv_i32_arg(ret), tcgv_i64_arg(arg));
609ad705 2462 } else {
dc41aa7d 2463 tcg_gen_mov_i32(ret, (TCGv_i32)arg);
609ad705
RH
2464 }
2465}
2466
2467void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
2468{
2469 if (TCG_TARGET_REG_BITS == 32) {
2470 tcg_gen_mov_i32(ret, TCGV_HIGH(arg));
2471 } else if (TCG_TARGET_HAS_extrh_i64_i32) {
b7e8b17a 2472 tcg_gen_op2(INDEX_op_extrh_i64_i32,
ae8b75dc 2473 tcgv_i32_arg(ret), tcgv_i64_arg(arg));
951c6300
RH
2474 } else {
2475 TCGv_i64 t = tcg_temp_new_i64();
609ad705 2476 tcg_gen_shri_i64(t, arg, 32);
dc41aa7d 2477 tcg_gen_mov_i32(ret, (TCGv_i32)t);
951c6300
RH
2478 tcg_temp_free_i64(t);
2479 }
951c6300
RH
2480}
2481
2482void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
2483{
3a13c3f3
RH
2484 if (TCG_TARGET_REG_BITS == 32) {
2485 tcg_gen_mov_i32(TCGV_LOW(ret), arg);
2486 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
2487 } else {
b7e8b17a 2488 tcg_gen_op2(INDEX_op_extu_i32_i64,
ae8b75dc 2489 tcgv_i64_arg(ret), tcgv_i32_arg(arg));
3a13c3f3 2490 }
951c6300
RH
2491}
2492
2493void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
2494{
3a13c3f3
RH
2495 if (TCG_TARGET_REG_BITS == 32) {
2496 tcg_gen_mov_i32(TCGV_LOW(ret), arg);
2497 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
2498 } else {
b7e8b17a 2499 tcg_gen_op2(INDEX_op_ext_i32_i64,
ae8b75dc 2500 tcgv_i64_arg(ret), tcgv_i32_arg(arg));
3a13c3f3 2501 }
951c6300
RH
2502}
2503
2504void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high)
2505{
3a13c3f3
RH
2506 TCGv_i64 tmp;
2507
2508 if (TCG_TARGET_REG_BITS == 32) {
2509 tcg_gen_mov_i32(TCGV_LOW(dest), low);
2510 tcg_gen_mov_i32(TCGV_HIGH(dest), high);
2511 return;
2512 }
2513
2514 tmp = tcg_temp_new_i64();
951c6300
RH
2515 /* These extensions are only needed for type correctness.
2516 We may be able to do better given target specific information. */
2517 tcg_gen_extu_i32_i64(tmp, high);
2518 tcg_gen_extu_i32_i64(dest, low);
2519 /* If deposit is available, use it. Otherwise use the extra
2520 knowledge that we have of the zero-extensions above. */
2521 if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(32, 32)) {
2522 tcg_gen_deposit_i64(dest, dest, tmp, 32, 32);
2523 } else {
2524 tcg_gen_shli_i64(tmp, tmp, 32);
2525 tcg_gen_or_i64(dest, dest, tmp);
2526 }
2527 tcg_temp_free_i64(tmp);
951c6300
RH
2528}
2529
2530void tcg_gen_extr_i64_i32(TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg)
2531{
3a13c3f3
RH
2532 if (TCG_TARGET_REG_BITS == 32) {
2533 tcg_gen_mov_i32(lo, TCGV_LOW(arg));
2534 tcg_gen_mov_i32(hi, TCGV_HIGH(arg));
2535 } else {
609ad705
RH
2536 tcg_gen_extrl_i64_i32(lo, arg);
2537 tcg_gen_extrh_i64_i32(hi, arg);
3a13c3f3 2538 }
951c6300
RH
2539}
2540
2541void tcg_gen_extr32_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg)
2542{
2543 tcg_gen_ext32u_i64(lo, arg);
2544 tcg_gen_shri_i64(hi, arg, 32);
2545}
2546
2547/* QEMU specific operations. */
2548
2549void tcg_gen_goto_tb(unsigned idx)
2550{
2551 /* We only support two chained exits. */
2552 tcg_debug_assert(idx <= 1);
2553#ifdef CONFIG_DEBUG_TCG
2554 /* Verify that we havn't seen this numbered exit before. */
b1311c4a
EC
2555 tcg_debug_assert((tcg_ctx->goto_tb_issue_mask & (1 << idx)) == 0);
2556 tcg_ctx->goto_tb_issue_mask |= 1 << idx;
951c6300
RH
2557#endif
2558 tcg_gen_op1i(INDEX_op_goto_tb, idx);
2559}
2560
7f11636d 2561void tcg_gen_lookup_and_goto_ptr(void)
cedbcb01
EC
2562{
2563 if (TCG_TARGET_HAS_goto_ptr && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
2564 TCGv_ptr ptr = tcg_temp_new_ptr();
b1311c4a 2565 gen_helper_lookup_tb_ptr(ptr, tcg_ctx->tcg_env);
ae8b75dc 2566 tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr));
cedbcb01
EC
2567 tcg_temp_free_ptr(ptr);
2568 } else {
2569 tcg_gen_exit_tb(0);
2570 }
2571}
2572
951c6300
RH
2573static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st)
2574{
1f00b27f
SS
2575 /* Trigger the asserts within as early as possible. */
2576 (void)get_alignment_bits(op);
2577
951c6300
RH
2578 switch (op & MO_SIZE) {
2579 case MO_8:
2580 op &= ~MO_BSWAP;
2581 break;
2582 case MO_16:
2583 break;
2584 case MO_32:
2585 if (!is64) {
2586 op &= ~MO_SIGN;
2587 }
2588 break;
2589 case MO_64:
2590 if (!is64) {
2591 tcg_abort();
2592 }
2593 break;
2594 }
2595 if (st) {
2596 op &= ~MO_SIGN;
2597 }
2598 return op;
2599}
2600
c45cb8bb
RH
2601static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr,
2602 TCGMemOp memop, TCGArg idx)
951c6300 2603{
59227d5d 2604 TCGMemOpIdx oi = make_memop_idx(memop, idx);
c45cb8bb 2605#if TARGET_LONG_BITS == 32
59227d5d 2606 tcg_gen_op3i_i32(opc, val, addr, oi);
c45cb8bb 2607#else
3a13c3f3 2608 if (TCG_TARGET_REG_BITS == 32) {
59227d5d 2609 tcg_gen_op4i_i32(opc, val, TCGV_LOW(addr), TCGV_HIGH(addr), oi);
3a13c3f3 2610 } else {
ae8b75dc 2611 tcg_gen_op3(opc, tcgv_i32_arg(val), tcgv_i64_arg(addr), oi);
3a13c3f3 2612 }
c45cb8bb 2613#endif
951c6300
RH
2614}
2615
c45cb8bb
RH
2616static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr,
2617 TCGMemOp memop, TCGArg idx)
2618{
59227d5d 2619 TCGMemOpIdx oi = make_memop_idx(memop, idx);
951c6300 2620#if TARGET_LONG_BITS == 32
c45cb8bb 2621 if (TCG_TARGET_REG_BITS == 32) {
59227d5d 2622 tcg_gen_op4i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), addr, oi);
c45cb8bb 2623 } else {
ae8b75dc 2624 tcg_gen_op3(opc, tcgv_i64_arg(val), tcgv_i32_arg(addr), oi);
c45cb8bb 2625 }
951c6300 2626#else
c45cb8bb 2627 if (TCG_TARGET_REG_BITS == 32) {
59227d5d
RH
2628 tcg_gen_op5i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val),
2629 TCGV_LOW(addr), TCGV_HIGH(addr), oi);
c45cb8bb 2630 } else {
59227d5d 2631 tcg_gen_op3i_i64(opc, val, addr, oi);
c45cb8bb 2632 }
951c6300 2633#endif
c45cb8bb 2634}
951c6300 2635
b32dc337
PK
2636static void tcg_gen_req_mo(TCGBar type)
2637{
2638#ifdef TCG_GUEST_DEFAULT_MO
2639 type &= TCG_GUEST_DEFAULT_MO;
2640#endif
2641 type &= ~TCG_TARGET_DEFAULT_MO;
2642 if (type) {
2643 tcg_gen_mb(type | TCG_BAR_SC);
2644 }
2645}
2646
951c6300
RH
2647void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
2648{
b32dc337 2649 tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
951c6300 2650 memop = tcg_canonicalize_memop(memop, 0, 0);
b1311c4a 2651 trace_guest_mem_before_tcg(tcg_ctx->cpu, tcg_ctx->tcg_env,
dcdaadb6 2652 addr, trace_mem_get_info(memop, 0));
c45cb8bb 2653 gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx);
951c6300
RH
2654}
2655
2656void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
2657{
b32dc337 2658 tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
951c6300 2659 memop = tcg_canonicalize_memop(memop, 0, 1);
b1311c4a 2660 trace_guest_mem_before_tcg(tcg_ctx->cpu, tcg_ctx->tcg_env,
dcdaadb6 2661 addr, trace_mem_get_info(memop, 1));
c45cb8bb 2662 gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx);
951c6300
RH
2663}
2664
2665void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
2666{
b32dc337 2667 tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
3a13c3f3 2668 if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
951c6300
RH
2669 tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop);
2670 if (memop & MO_SIGN) {
2671 tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31);
2672 } else {
2673 tcg_gen_movi_i32(TCGV_HIGH(val), 0);
2674 }
2675 return;
2676 }
951c6300 2677
c45cb8bb 2678 memop = tcg_canonicalize_memop(memop, 1, 0);
b1311c4a 2679 trace_guest_mem_before_tcg(tcg_ctx->cpu, tcg_ctx->tcg_env,
dcdaadb6 2680 addr, trace_mem_get_info(memop, 0));
c45cb8bb 2681 gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx);
951c6300
RH
2682}
2683
2684void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
2685{
b32dc337 2686 tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
3a13c3f3 2687 if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
951c6300
RH
2688 tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop);
2689 return;
2690 }
951c6300 2691
c45cb8bb 2692 memop = tcg_canonicalize_memop(memop, 1, 1);
b1311c4a 2693 trace_guest_mem_before_tcg(tcg_ctx->cpu, tcg_ctx->tcg_env,
dcdaadb6 2694 addr, trace_mem_get_info(memop, 1));
c45cb8bb 2695 gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx);
951c6300 2696}
c482cb11
RH
2697
2698static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, TCGMemOp opc)
2699{
2700 switch (opc & MO_SSIZE) {
2701 case MO_SB:
2702 tcg_gen_ext8s_i32(ret, val);
2703 break;
2704 case MO_UB:
2705 tcg_gen_ext8u_i32(ret, val);
2706 break;
2707 case MO_SW:
2708 tcg_gen_ext16s_i32(ret, val);
2709 break;
2710 case MO_UW:
2711 tcg_gen_ext16u_i32(ret, val);
2712 break;
2713 default:
2714 tcg_gen_mov_i32(ret, val);
2715 break;
2716 }
2717}
2718
2719static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, TCGMemOp opc)
2720{
2721 switch (opc & MO_SSIZE) {
2722 case MO_SB:
2723 tcg_gen_ext8s_i64(ret, val);
2724 break;
2725 case MO_UB:
2726 tcg_gen_ext8u_i64(ret, val);
2727 break;
2728 case MO_SW:
2729 tcg_gen_ext16s_i64(ret, val);
2730 break;
2731 case MO_UW:
2732 tcg_gen_ext16u_i64(ret, val);
2733 break;
2734 case MO_SL:
2735 tcg_gen_ext32s_i64(ret, val);
2736 break;
2737 case MO_UL:
2738 tcg_gen_ext32u_i64(ret, val);
2739 break;
2740 default:
2741 tcg_gen_mov_i64(ret, val);
2742 break;
2743 }
2744}
2745
2746#ifdef CONFIG_SOFTMMU
2747typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv,
2748 TCGv_i32, TCGv_i32, TCGv_i32);
2749typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv,
2750 TCGv_i64, TCGv_i64, TCGv_i32);
2751typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv,
2752 TCGv_i32, TCGv_i32);
2753typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv,
2754 TCGv_i64, TCGv_i32);
2755#else
2756typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv, TCGv_i32, TCGv_i32);
2757typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv, TCGv_i64, TCGv_i64);
2758typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv, TCGv_i32);
2759typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv, TCGv_i64);
2760#endif
2761
df79b996
RH
2762#ifdef CONFIG_ATOMIC64
2763# define WITH_ATOMIC64(X) X,
2764#else
2765# define WITH_ATOMIC64(X)
2766#endif
2767
c482cb11
RH
2768static void * const table_cmpxchg[16] = {
2769 [MO_8] = gen_helper_atomic_cmpxchgb,
2770 [MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le,
2771 [MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be,
2772 [MO_32 | MO_LE] = gen_helper_atomic_cmpxchgl_le,
2773 [MO_32 | MO_BE] = gen_helper_atomic_cmpxchgl_be,
df79b996
RH
2774 WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le)
2775 WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be)
c482cb11
RH
2776};
2777
2778void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
2779 TCGv_i32 newv, TCGArg idx, TCGMemOp memop)
2780{
2781 memop = tcg_canonicalize_memop(memop, 0, 0);
2782
b1311c4a 2783 if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) {
c482cb11
RH
2784 TCGv_i32 t1 = tcg_temp_new_i32();
2785 TCGv_i32 t2 = tcg_temp_new_i32();
2786
2787 tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE);
2788
2789 tcg_gen_qemu_ld_i32(t1, addr, idx, memop & ~MO_SIGN);
2790 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1);
2791 tcg_gen_qemu_st_i32(t2, addr, idx, memop);
2792 tcg_temp_free_i32(t2);
2793
2794 if (memop & MO_SIGN) {
2795 tcg_gen_ext_i32(retv, t1, memop);
2796 } else {
2797 tcg_gen_mov_i32(retv, t1);
2798 }
2799 tcg_temp_free_i32(t1);
2800 } else {
2801 gen_atomic_cx_i32 gen;
2802
2803 gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
2804 tcg_debug_assert(gen != NULL);
2805
2806#ifdef CONFIG_SOFTMMU
2807 {
2808 TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx));
b1311c4a 2809 gen(retv, tcg_ctx->tcg_env, addr, cmpv, newv, oi);
c482cb11
RH
2810 tcg_temp_free_i32(oi);
2811 }
2812#else
b1311c4a 2813 gen(retv, tcg_ctx->tcg_env, addr, cmpv, newv);
c482cb11
RH
2814#endif
2815
2816 if (memop & MO_SIGN) {
2817 tcg_gen_ext_i32(retv, retv, memop);
2818 }
2819 }
2820}
2821
2822void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
2823 TCGv_i64 newv, TCGArg idx, TCGMemOp memop)
2824{
2825 memop = tcg_canonicalize_memop(memop, 1, 0);
2826
b1311c4a 2827 if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) {
c482cb11
RH
2828 TCGv_i64 t1 = tcg_temp_new_i64();
2829 TCGv_i64 t2 = tcg_temp_new_i64();
2830
2831 tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE);
2832
2833 tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN);
2834 tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1);
2835 tcg_gen_qemu_st_i64(t2, addr, idx, memop);
2836 tcg_temp_free_i64(t2);
2837
2838 if (memop & MO_SIGN) {
2839 tcg_gen_ext_i64(retv, t1, memop);
2840 } else {
2841 tcg_gen_mov_i64(retv, t1);
2842 }
2843 tcg_temp_free_i64(t1);
2844 } else if ((memop & MO_SIZE) == MO_64) {
df79b996 2845#ifdef CONFIG_ATOMIC64
c482cb11
RH
2846 gen_atomic_cx_i64 gen;
2847
2848 gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
2849 tcg_debug_assert(gen != NULL);
2850
2851#ifdef CONFIG_SOFTMMU
2852 {
2853 TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop, idx));
b1311c4a 2854 gen(retv, tcg_ctx->tcg_env, addr, cmpv, newv, oi);
c482cb11
RH
2855 tcg_temp_free_i32(oi);
2856 }
2857#else
b1311c4a 2858 gen(retv, tcg_ctx->tcg_env, addr, cmpv, newv);
c482cb11 2859#endif
df79b996 2860#else
b1311c4a 2861 gen_helper_exit_atomic(tcg_ctx->tcg_env);
79b1af90
RH
2862 /* Produce a result, so that we have a well-formed opcode stream
2863 with respect to uses of the result in the (dead) code following. */
2864 tcg_gen_movi_i64(retv, 0);
df79b996 2865#endif /* CONFIG_ATOMIC64 */
c482cb11
RH
2866 } else {
2867 TCGv_i32 c32 = tcg_temp_new_i32();
2868 TCGv_i32 n32 = tcg_temp_new_i32();
2869 TCGv_i32 r32 = tcg_temp_new_i32();
2870
2871 tcg_gen_extrl_i64_i32(c32, cmpv);
2872 tcg_gen_extrl_i64_i32(n32, newv);
2873 tcg_gen_atomic_cmpxchg_i32(r32, addr, c32, n32, idx, memop & ~MO_SIGN);
2874 tcg_temp_free_i32(c32);
2875 tcg_temp_free_i32(n32);
2876
2877 tcg_gen_extu_i32_i64(retv, r32);
2878 tcg_temp_free_i32(r32);
2879
2880 if (memop & MO_SIGN) {
2881 tcg_gen_ext_i64(retv, retv, memop);
2882 }
2883 }
2884}
2885
2886static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val,
2887 TCGArg idx, TCGMemOp memop, bool new_val,
2888 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
2889{
2890 TCGv_i32 t1 = tcg_temp_new_i32();
2891 TCGv_i32 t2 = tcg_temp_new_i32();
2892
2893 memop = tcg_canonicalize_memop(memop, 0, 0);
2894
2895 tcg_gen_qemu_ld_i32(t1, addr, idx, memop & ~MO_SIGN);
2896 gen(t2, t1, val);
2897 tcg_gen_qemu_st_i32(t2, addr, idx, memop);
2898
2899 tcg_gen_ext_i32(ret, (new_val ? t2 : t1), memop);
2900 tcg_temp_free_i32(t1);
2901 tcg_temp_free_i32(t2);
2902}
2903
2904static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val,
2905 TCGArg idx, TCGMemOp memop, void * const table[])
2906{
2907 gen_atomic_op_i32 gen;
2908
2909 memop = tcg_canonicalize_memop(memop, 0, 0);
2910
2911 gen = table[memop & (MO_SIZE | MO_BSWAP)];
2912 tcg_debug_assert(gen != NULL);
2913
2914#ifdef CONFIG_SOFTMMU
2915 {
2916 TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx));
b1311c4a 2917 gen(ret, tcg_ctx->tcg_env, addr, val, oi);
c482cb11
RH
2918 tcg_temp_free_i32(oi);
2919 }
2920#else
b1311c4a 2921 gen(ret, tcg_ctx->tcg_env, addr, val);
c482cb11
RH
2922#endif
2923
2924 if (memop & MO_SIGN) {
2925 tcg_gen_ext_i32(ret, ret, memop);
2926 }
2927}
2928
2929static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
2930 TCGArg idx, TCGMemOp memop, bool new_val,
2931 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
2932{
2933 TCGv_i64 t1 = tcg_temp_new_i64();
2934 TCGv_i64 t2 = tcg_temp_new_i64();
2935
2936 memop = tcg_canonicalize_memop(memop, 1, 0);
2937
2938 tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN);
2939 gen(t2, t1, val);
2940 tcg_gen_qemu_st_i64(t2, addr, idx, memop);
2941
2942 tcg_gen_ext_i64(ret, (new_val ? t2 : t1), memop);
2943 tcg_temp_free_i64(t1);
2944 tcg_temp_free_i64(t2);
2945}
2946
2947static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
2948 TCGArg idx, TCGMemOp memop, void * const table[])
2949{
2950 memop = tcg_canonicalize_memop(memop, 1, 0);
2951
2952 if ((memop & MO_SIZE) == MO_64) {
df79b996 2953#ifdef CONFIG_ATOMIC64
c482cb11
RH
2954 gen_atomic_op_i64 gen;
2955
2956 gen = table[memop & (MO_SIZE | MO_BSWAP)];
2957 tcg_debug_assert(gen != NULL);
2958
2959#ifdef CONFIG_SOFTMMU
2960 {
2961 TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx));
b1311c4a 2962 gen(ret, tcg_ctx->tcg_env, addr, val, oi);
c482cb11
RH
2963 tcg_temp_free_i32(oi);
2964 }
2965#else
b1311c4a 2966 gen(ret, tcg_ctx->tcg_env, addr, val);
c482cb11 2967#endif
df79b996 2968#else
b1311c4a 2969 gen_helper_exit_atomic(tcg_ctx->tcg_env);
79b1af90
RH
2970 /* Produce a result, so that we have a well-formed opcode stream
2971 with respect to uses of the result in the (dead) code following. */
2972 tcg_gen_movi_i64(ret, 0);
df79b996 2973#endif /* CONFIG_ATOMIC64 */
c482cb11
RH
2974 } else {
2975 TCGv_i32 v32 = tcg_temp_new_i32();
2976 TCGv_i32 r32 = tcg_temp_new_i32();
2977
2978 tcg_gen_extrl_i64_i32(v32, val);
2979 do_atomic_op_i32(r32, addr, v32, idx, memop & ~MO_SIGN, table);
2980 tcg_temp_free_i32(v32);
2981
2982 tcg_gen_extu_i32_i64(ret, r32);
2983 tcg_temp_free_i32(r32);
2984
2985 if (memop & MO_SIGN) {
2986 tcg_gen_ext_i64(ret, ret, memop);
2987 }
2988 }
2989}
2990
2991#define GEN_ATOMIC_HELPER(NAME, OP, NEW) \
2992static void * const table_##NAME[16] = { \
2993 [MO_8] = gen_helper_atomic_##NAME##b, \
2994 [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \
2995 [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \
2996 [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le, \
2997 [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be, \
df79b996
RH
2998 WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le) \
2999 WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \
c482cb11
RH
3000}; \
3001void tcg_gen_atomic_##NAME##_i32 \
3002 (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, TCGMemOp memop) \
3003{ \
b1311c4a 3004 if (tcg_ctx->tb_cflags & CF_PARALLEL) { \
c482cb11
RH
3005 do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \
3006 } else { \
3007 do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \
3008 tcg_gen_##OP##_i32); \
3009 } \
3010} \
3011void tcg_gen_atomic_##NAME##_i64 \
3012 (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, TCGMemOp memop) \
3013{ \
b1311c4a 3014 if (tcg_ctx->tb_cflags & CF_PARALLEL) { \
c482cb11
RH
3015 do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \
3016 } else { \
3017 do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \
3018 tcg_gen_##OP##_i64); \
3019 } \
3020}
3021
3022GEN_ATOMIC_HELPER(fetch_add, add, 0)
3023GEN_ATOMIC_HELPER(fetch_and, and, 0)
3024GEN_ATOMIC_HELPER(fetch_or, or, 0)
3025GEN_ATOMIC_HELPER(fetch_xor, xor, 0)
3026
3027GEN_ATOMIC_HELPER(add_fetch, add, 1)
3028GEN_ATOMIC_HELPER(and_fetch, and, 1)
3029GEN_ATOMIC_HELPER(or_fetch, or, 1)
3030GEN_ATOMIC_HELPER(xor_fetch, xor, 1)
3031
3032static void tcg_gen_mov2_i32(TCGv_i32 r, TCGv_i32 a, TCGv_i32 b)
3033{
3034 tcg_gen_mov_i32(r, b);
3035}
3036
3037static void tcg_gen_mov2_i64(TCGv_i64 r, TCGv_i64 a, TCGv_i64 b)
3038{
3039 tcg_gen_mov_i64(r, b);
3040}
3041
3042GEN_ATOMIC_HELPER(xchg, mov2, 0)
3043
3044#undef GEN_ATOMIC_HELPER