]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/tcg-op.c
tcg/s390: Implement field extraction opcodes
[mirror_qemu.git] / tcg / tcg-op.c
CommitLineData
951c6300
RH
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
757e725b 25#include "qemu/osdep.h"
33c11879
PB
26#include "qemu-common.h"
27#include "cpu.h"
63c91552 28#include "exec/exec-all.h"
951c6300
RH
29#include "tcg.h"
30#include "tcg-op.h"
dcdaadb6
LV
31#include "trace-tcg.h"
32#include "trace/mem.h"
951c6300 33
3a13c3f3
RH
34/* Reduce the number of ifdefs below. This assumes that all uses of
35 TCGV_HIGH and TCGV_LOW are properly protected by a conditional that
36 the compiler can eliminate. */
37#if TCG_TARGET_REG_BITS == 64
38extern TCGv_i32 TCGV_LOW_link_error(TCGv_i64);
39extern TCGv_i32 TCGV_HIGH_link_error(TCGv_i64);
40#define TCGV_LOW TCGV_LOW_link_error
41#define TCGV_HIGH TCGV_HIGH_link_error
42#endif
951c6300 43
c45cb8bb
RH
44/* Note that this is optimized for sequential allocation during translate.
45 Up to and including filling in the forward link immediately. We'll do
46 proper termination of the end of the list after we finish translation. */
47
48static void tcg_emit_op(TCGContext *ctx, TCGOpcode opc, int args)
49{
50 int oi = ctx->gen_next_op_idx;
51 int ni = oi + 1;
52 int pi = oi - 1;
53
54 tcg_debug_assert(oi < OPC_BUF_SIZE);
dcb8e758 55 ctx->gen_op_buf[0].prev = oi;
c45cb8bb
RH
56 ctx->gen_next_op_idx = ni;
57
58 ctx->gen_op_buf[oi] = (TCGOp){
59 .opc = opc,
60 .args = args,
61 .prev = pi,
62 .next = ni
63 };
64}
65
951c6300
RH
66void tcg_gen_op1(TCGContext *ctx, TCGOpcode opc, TCGArg a1)
67{
c45cb8bb 68 int pi = ctx->gen_next_parm_idx;
951c6300 69
c45cb8bb
RH
70 tcg_debug_assert(pi + 1 <= OPPARAM_BUF_SIZE);
71 ctx->gen_next_parm_idx = pi + 1;
72 ctx->gen_opparam_buf[pi] = a1;
951c6300 73
c45cb8bb 74 tcg_emit_op(ctx, opc, pi);
951c6300
RH
75}
76
77void tcg_gen_op2(TCGContext *ctx, TCGOpcode opc, TCGArg a1, TCGArg a2)
78{
c45cb8bb 79 int pi = ctx->gen_next_parm_idx;
951c6300 80
c45cb8bb
RH
81 tcg_debug_assert(pi + 2 <= OPPARAM_BUF_SIZE);
82 ctx->gen_next_parm_idx = pi + 2;
83 ctx->gen_opparam_buf[pi + 0] = a1;
84 ctx->gen_opparam_buf[pi + 1] = a2;
951c6300 85
c45cb8bb 86 tcg_emit_op(ctx, opc, pi);
951c6300
RH
87}
88
89void tcg_gen_op3(TCGContext *ctx, TCGOpcode opc, TCGArg a1,
90 TCGArg a2, TCGArg a3)
91{
c45cb8bb 92 int pi = ctx->gen_next_parm_idx;
951c6300 93
c45cb8bb
RH
94 tcg_debug_assert(pi + 3 <= OPPARAM_BUF_SIZE);
95 ctx->gen_next_parm_idx = pi + 3;
96 ctx->gen_opparam_buf[pi + 0] = a1;
97 ctx->gen_opparam_buf[pi + 1] = a2;
98 ctx->gen_opparam_buf[pi + 2] = a3;
951c6300 99
c45cb8bb 100 tcg_emit_op(ctx, opc, pi);
951c6300
RH
101}
102
103void tcg_gen_op4(TCGContext *ctx, TCGOpcode opc, TCGArg a1,
104 TCGArg a2, TCGArg a3, TCGArg a4)
105{
c45cb8bb 106 int pi = ctx->gen_next_parm_idx;
951c6300 107
c45cb8bb
RH
108 tcg_debug_assert(pi + 4 <= OPPARAM_BUF_SIZE);
109 ctx->gen_next_parm_idx = pi + 4;
110 ctx->gen_opparam_buf[pi + 0] = a1;
111 ctx->gen_opparam_buf[pi + 1] = a2;
112 ctx->gen_opparam_buf[pi + 2] = a3;
113 ctx->gen_opparam_buf[pi + 3] = a4;
951c6300 114
c45cb8bb 115 tcg_emit_op(ctx, opc, pi);
951c6300
RH
116}
117
118void tcg_gen_op5(TCGContext *ctx, TCGOpcode opc, TCGArg a1,
119 TCGArg a2, TCGArg a3, TCGArg a4, TCGArg a5)
120{
c45cb8bb 121 int pi = ctx->gen_next_parm_idx;
951c6300 122
c45cb8bb
RH
123 tcg_debug_assert(pi + 5 <= OPPARAM_BUF_SIZE);
124 ctx->gen_next_parm_idx = pi + 5;
125 ctx->gen_opparam_buf[pi + 0] = a1;
126 ctx->gen_opparam_buf[pi + 1] = a2;
127 ctx->gen_opparam_buf[pi + 2] = a3;
128 ctx->gen_opparam_buf[pi + 3] = a4;
129 ctx->gen_opparam_buf[pi + 4] = a5;
951c6300 130
c45cb8bb 131 tcg_emit_op(ctx, opc, pi);
951c6300
RH
132}
133
134void tcg_gen_op6(TCGContext *ctx, TCGOpcode opc, TCGArg a1, TCGArg a2,
135 TCGArg a3, TCGArg a4, TCGArg a5, TCGArg a6)
136{
c45cb8bb 137 int pi = ctx->gen_next_parm_idx;
951c6300 138
c45cb8bb
RH
139 tcg_debug_assert(pi + 6 <= OPPARAM_BUF_SIZE);
140 ctx->gen_next_parm_idx = pi + 6;
141 ctx->gen_opparam_buf[pi + 0] = a1;
142 ctx->gen_opparam_buf[pi + 1] = a2;
143 ctx->gen_opparam_buf[pi + 2] = a3;
144 ctx->gen_opparam_buf[pi + 3] = a4;
145 ctx->gen_opparam_buf[pi + 4] = a5;
146 ctx->gen_opparam_buf[pi + 5] = a6;
951c6300 147
c45cb8bb 148 tcg_emit_op(ctx, opc, pi);
951c6300
RH
149}
150
f65e19bc
PK
151void tcg_gen_mb(TCGBar mb_type)
152{
91682118 153 if (parallel_cpus) {
f65e19bc
PK
154 tcg_gen_op1(&tcg_ctx, INDEX_op_mb, mb_type);
155 }
156}
157
951c6300
RH
158/* 32 bit ops */
159
160void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
161{
162 /* some cases can be optimized here */
163 if (arg2 == 0) {
164 tcg_gen_mov_i32(ret, arg1);
165 } else {
166 TCGv_i32 t0 = tcg_const_i32(arg2);
167 tcg_gen_add_i32(ret, arg1, t0);
168 tcg_temp_free_i32(t0);
169 }
170}
171
172void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2)
173{
174 if (arg1 == 0 && TCG_TARGET_HAS_neg_i32) {
175 /* Don't recurse with tcg_gen_neg_i32. */
176 tcg_gen_op2_i32(INDEX_op_neg_i32, ret, arg2);
177 } else {
178 TCGv_i32 t0 = tcg_const_i32(arg1);
179 tcg_gen_sub_i32(ret, t0, arg2);
180 tcg_temp_free_i32(t0);
181 }
182}
183
184void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
185{
186 /* some cases can be optimized here */
187 if (arg2 == 0) {
188 tcg_gen_mov_i32(ret, arg1);
189 } else {
190 TCGv_i32 t0 = tcg_const_i32(arg2);
191 tcg_gen_sub_i32(ret, arg1, t0);
192 tcg_temp_free_i32(t0);
193 }
194}
195
196void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
197{
198 TCGv_i32 t0;
199 /* Some cases can be optimized here. */
200 switch (arg2) {
201 case 0:
202 tcg_gen_movi_i32(ret, 0);
203 return;
204 case 0xffffffffu:
205 tcg_gen_mov_i32(ret, arg1);
206 return;
207 case 0xffu:
208 /* Don't recurse with tcg_gen_ext8u_i32. */
209 if (TCG_TARGET_HAS_ext8u_i32) {
210 tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg1);
211 return;
212 }
213 break;
214 case 0xffffu:
215 if (TCG_TARGET_HAS_ext16u_i32) {
216 tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg1);
217 return;
218 }
219 break;
220 }
221 t0 = tcg_const_i32(arg2);
222 tcg_gen_and_i32(ret, arg1, t0);
223 tcg_temp_free_i32(t0);
224}
225
226void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
227{
228 /* Some cases can be optimized here. */
229 if (arg2 == -1) {
230 tcg_gen_movi_i32(ret, -1);
231 } else if (arg2 == 0) {
232 tcg_gen_mov_i32(ret, arg1);
233 } else {
234 TCGv_i32 t0 = tcg_const_i32(arg2);
235 tcg_gen_or_i32(ret, arg1, t0);
236 tcg_temp_free_i32(t0);
237 }
238}
239
240void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
241{
242 /* Some cases can be optimized here. */
243 if (arg2 == 0) {
244 tcg_gen_mov_i32(ret, arg1);
245 } else if (arg2 == -1 && TCG_TARGET_HAS_not_i32) {
246 /* Don't recurse with tcg_gen_not_i32. */
247 tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg1);
248 } else {
249 TCGv_i32 t0 = tcg_const_i32(arg2);
250 tcg_gen_xor_i32(ret, arg1, t0);
251 tcg_temp_free_i32(t0);
252 }
253}
254
255void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
256{
257 tcg_debug_assert(arg2 < 32);
258 if (arg2 == 0) {
259 tcg_gen_mov_i32(ret, arg1);
260 } else {
261 TCGv_i32 t0 = tcg_const_i32(arg2);
262 tcg_gen_shl_i32(ret, arg1, t0);
263 tcg_temp_free_i32(t0);
264 }
265}
266
267void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
268{
269 tcg_debug_assert(arg2 < 32);
270 if (arg2 == 0) {
271 tcg_gen_mov_i32(ret, arg1);
272 } else {
273 TCGv_i32 t0 = tcg_const_i32(arg2);
274 tcg_gen_shr_i32(ret, arg1, t0);
275 tcg_temp_free_i32(t0);
276 }
277}
278
279void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
280{
281 tcg_debug_assert(arg2 < 32);
282 if (arg2 == 0) {
283 tcg_gen_mov_i32(ret, arg1);
284 } else {
285 TCGv_i32 t0 = tcg_const_i32(arg2);
286 tcg_gen_sar_i32(ret, arg1, t0);
287 tcg_temp_free_i32(t0);
288 }
289}
290
42a268c2 291void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, TCGLabel *l)
951c6300
RH
292{
293 if (cond == TCG_COND_ALWAYS) {
42a268c2 294 tcg_gen_br(l);
951c6300 295 } else if (cond != TCG_COND_NEVER) {
42a268c2 296 tcg_gen_op4ii_i32(INDEX_op_brcond_i32, arg1, arg2, cond, label_arg(l));
951c6300
RH
297 }
298}
299
42a268c2 300void tcg_gen_brcondi_i32(TCGCond cond, TCGv_i32 arg1, int32_t arg2, TCGLabel *l)
951c6300 301{
37ed3bf1
RH
302 if (cond == TCG_COND_ALWAYS) {
303 tcg_gen_br(l);
304 } else if (cond != TCG_COND_NEVER) {
305 TCGv_i32 t0 = tcg_const_i32(arg2);
306 tcg_gen_brcond_i32(cond, arg1, t0, l);
307 tcg_temp_free_i32(t0);
308 }
951c6300
RH
309}
310
311void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret,
312 TCGv_i32 arg1, TCGv_i32 arg2)
313{
314 if (cond == TCG_COND_ALWAYS) {
315 tcg_gen_movi_i32(ret, 1);
316 } else if (cond == TCG_COND_NEVER) {
317 tcg_gen_movi_i32(ret, 0);
318 } else {
319 tcg_gen_op4i_i32(INDEX_op_setcond_i32, ret, arg1, arg2, cond);
320 }
321}
322
323void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret,
324 TCGv_i32 arg1, int32_t arg2)
325{
326 TCGv_i32 t0 = tcg_const_i32(arg2);
327 tcg_gen_setcond_i32(cond, ret, arg1, t0);
328 tcg_temp_free_i32(t0);
329}
330
331void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
332{
333 TCGv_i32 t0 = tcg_const_i32(arg2);
334 tcg_gen_mul_i32(ret, arg1, t0);
335 tcg_temp_free_i32(t0);
336}
337
338void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
339{
340 if (TCG_TARGET_HAS_div_i32) {
341 tcg_gen_op3_i32(INDEX_op_div_i32, ret, arg1, arg2);
342 } else if (TCG_TARGET_HAS_div2_i32) {
343 TCGv_i32 t0 = tcg_temp_new_i32();
344 tcg_gen_sari_i32(t0, arg1, 31);
345 tcg_gen_op5_i32(INDEX_op_div2_i32, ret, t0, arg1, t0, arg2);
346 tcg_temp_free_i32(t0);
347 } else {
348 gen_helper_div_i32(ret, arg1, arg2);
349 }
350}
351
352void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
353{
354 if (TCG_TARGET_HAS_rem_i32) {
355 tcg_gen_op3_i32(INDEX_op_rem_i32, ret, arg1, arg2);
356 } else if (TCG_TARGET_HAS_div_i32) {
357 TCGv_i32 t0 = tcg_temp_new_i32();
358 tcg_gen_op3_i32(INDEX_op_div_i32, t0, arg1, arg2);
359 tcg_gen_mul_i32(t0, t0, arg2);
360 tcg_gen_sub_i32(ret, arg1, t0);
361 tcg_temp_free_i32(t0);
362 } else if (TCG_TARGET_HAS_div2_i32) {
363 TCGv_i32 t0 = tcg_temp_new_i32();
364 tcg_gen_sari_i32(t0, arg1, 31);
365 tcg_gen_op5_i32(INDEX_op_div2_i32, t0, ret, arg1, t0, arg2);
366 tcg_temp_free_i32(t0);
367 } else {
368 gen_helper_rem_i32(ret, arg1, arg2);
369 }
370}
371
372void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
373{
374 if (TCG_TARGET_HAS_div_i32) {
375 tcg_gen_op3_i32(INDEX_op_divu_i32, ret, arg1, arg2);
376 } else if (TCG_TARGET_HAS_div2_i32) {
377 TCGv_i32 t0 = tcg_temp_new_i32();
378 tcg_gen_movi_i32(t0, 0);
379 tcg_gen_op5_i32(INDEX_op_divu2_i32, ret, t0, arg1, t0, arg2);
380 tcg_temp_free_i32(t0);
381 } else {
382 gen_helper_divu_i32(ret, arg1, arg2);
383 }
384}
385
386void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
387{
388 if (TCG_TARGET_HAS_rem_i32) {
389 tcg_gen_op3_i32(INDEX_op_remu_i32, ret, arg1, arg2);
390 } else if (TCG_TARGET_HAS_div_i32) {
391 TCGv_i32 t0 = tcg_temp_new_i32();
392 tcg_gen_op3_i32(INDEX_op_divu_i32, t0, arg1, arg2);
393 tcg_gen_mul_i32(t0, t0, arg2);
394 tcg_gen_sub_i32(ret, arg1, t0);
395 tcg_temp_free_i32(t0);
396 } else if (TCG_TARGET_HAS_div2_i32) {
397 TCGv_i32 t0 = tcg_temp_new_i32();
398 tcg_gen_movi_i32(t0, 0);
399 tcg_gen_op5_i32(INDEX_op_divu2_i32, t0, ret, arg1, t0, arg2);
400 tcg_temp_free_i32(t0);
401 } else {
402 gen_helper_remu_i32(ret, arg1, arg2);
403 }
404}
405
406void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
407{
408 if (TCG_TARGET_HAS_andc_i32) {
409 tcg_gen_op3_i32(INDEX_op_andc_i32, ret, arg1, arg2);
410 } else {
411 TCGv_i32 t0 = tcg_temp_new_i32();
412 tcg_gen_not_i32(t0, arg2);
413 tcg_gen_and_i32(ret, arg1, t0);
414 tcg_temp_free_i32(t0);
415 }
416}
417
418void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
419{
420 if (TCG_TARGET_HAS_eqv_i32) {
421 tcg_gen_op3_i32(INDEX_op_eqv_i32, ret, arg1, arg2);
422 } else {
423 tcg_gen_xor_i32(ret, arg1, arg2);
424 tcg_gen_not_i32(ret, ret);
425 }
426}
427
428void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
429{
430 if (TCG_TARGET_HAS_nand_i32) {
431 tcg_gen_op3_i32(INDEX_op_nand_i32, ret, arg1, arg2);
432 } else {
433 tcg_gen_and_i32(ret, arg1, arg2);
434 tcg_gen_not_i32(ret, ret);
435 }
436}
437
438void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
439{
440 if (TCG_TARGET_HAS_nor_i32) {
441 tcg_gen_op3_i32(INDEX_op_nor_i32, ret, arg1, arg2);
442 } else {
443 tcg_gen_or_i32(ret, arg1, arg2);
444 tcg_gen_not_i32(ret, ret);
445 }
446}
447
448void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
449{
450 if (TCG_TARGET_HAS_orc_i32) {
451 tcg_gen_op3_i32(INDEX_op_orc_i32, ret, arg1, arg2);
452 } else {
453 TCGv_i32 t0 = tcg_temp_new_i32();
454 tcg_gen_not_i32(t0, arg2);
455 tcg_gen_or_i32(ret, arg1, t0);
456 tcg_temp_free_i32(t0);
457 }
458}
459
460void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
461{
462 if (TCG_TARGET_HAS_rot_i32) {
463 tcg_gen_op3_i32(INDEX_op_rotl_i32, ret, arg1, arg2);
464 } else {
465 TCGv_i32 t0, t1;
466
467 t0 = tcg_temp_new_i32();
468 t1 = tcg_temp_new_i32();
469 tcg_gen_shl_i32(t0, arg1, arg2);
470 tcg_gen_subfi_i32(t1, 32, arg2);
471 tcg_gen_shr_i32(t1, arg1, t1);
472 tcg_gen_or_i32(ret, t0, t1);
473 tcg_temp_free_i32(t0);
474 tcg_temp_free_i32(t1);
475 }
476}
477
478void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
479{
480 tcg_debug_assert(arg2 < 32);
481 /* some cases can be optimized here */
482 if (arg2 == 0) {
483 tcg_gen_mov_i32(ret, arg1);
484 } else if (TCG_TARGET_HAS_rot_i32) {
485 TCGv_i32 t0 = tcg_const_i32(arg2);
486 tcg_gen_rotl_i32(ret, arg1, t0);
487 tcg_temp_free_i32(t0);
488 } else {
489 TCGv_i32 t0, t1;
490 t0 = tcg_temp_new_i32();
491 t1 = tcg_temp_new_i32();
492 tcg_gen_shli_i32(t0, arg1, arg2);
493 tcg_gen_shri_i32(t1, arg1, 32 - arg2);
494 tcg_gen_or_i32(ret, t0, t1);
495 tcg_temp_free_i32(t0);
496 tcg_temp_free_i32(t1);
497 }
498}
499
500void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
501{
502 if (TCG_TARGET_HAS_rot_i32) {
503 tcg_gen_op3_i32(INDEX_op_rotr_i32, ret, arg1, arg2);
504 } else {
505 TCGv_i32 t0, t1;
506
507 t0 = tcg_temp_new_i32();
508 t1 = tcg_temp_new_i32();
509 tcg_gen_shr_i32(t0, arg1, arg2);
510 tcg_gen_subfi_i32(t1, 32, arg2);
511 tcg_gen_shl_i32(t1, arg1, t1);
512 tcg_gen_or_i32(ret, t0, t1);
513 tcg_temp_free_i32(t0);
514 tcg_temp_free_i32(t1);
515 }
516}
517
518void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
519{
520 tcg_debug_assert(arg2 < 32);
521 /* some cases can be optimized here */
522 if (arg2 == 0) {
523 tcg_gen_mov_i32(ret, arg1);
524 } else {
525 tcg_gen_rotli_i32(ret, arg1, 32 - arg2);
526 }
527}
528
529void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
530 unsigned int ofs, unsigned int len)
531{
532 uint32_t mask;
533 TCGv_i32 t1;
534
535 tcg_debug_assert(ofs < 32);
0d0d309d 536 tcg_debug_assert(len > 0);
951c6300
RH
537 tcg_debug_assert(len <= 32);
538 tcg_debug_assert(ofs + len <= 32);
539
0d0d309d 540 if (len == 32) {
951c6300
RH
541 tcg_gen_mov_i32(ret, arg2);
542 return;
543 }
544 if (TCG_TARGET_HAS_deposit_i32 && TCG_TARGET_deposit_i32_valid(ofs, len)) {
545 tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, arg1, arg2, ofs, len);
546 return;
547 }
548
549 mask = (1u << len) - 1;
550 t1 = tcg_temp_new_i32();
551
552 if (ofs + len < 32) {
553 tcg_gen_andi_i32(t1, arg2, mask);
554 tcg_gen_shli_i32(t1, t1, ofs);
555 } else {
556 tcg_gen_shli_i32(t1, arg2, ofs);
557 }
558 tcg_gen_andi_i32(ret, arg1, ~(mask << ofs));
559 tcg_gen_or_i32(ret, ret, t1);
560
561 tcg_temp_free_i32(t1);
562}
563
07cc68d5
RH
564void tcg_gen_deposit_z_i32(TCGv_i32 ret, TCGv_i32 arg,
565 unsigned int ofs, unsigned int len)
566{
567 tcg_debug_assert(ofs < 32);
568 tcg_debug_assert(len > 0);
569 tcg_debug_assert(len <= 32);
570 tcg_debug_assert(ofs + len <= 32);
571
572 if (ofs + len == 32) {
573 tcg_gen_shli_i32(ret, arg, ofs);
574 } else if (ofs == 0) {
575 tcg_gen_andi_i32(ret, arg, (1u << len) - 1);
576 } else if (TCG_TARGET_HAS_deposit_i32
577 && TCG_TARGET_deposit_i32_valid(ofs, len)) {
578 TCGv_i32 zero = tcg_const_i32(0);
579 tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, zero, arg, ofs, len);
580 tcg_temp_free_i32(zero);
581 } else {
582 /* To help two-operand hosts we prefer to zero-extend first,
583 which allows ARG to stay live. */
584 switch (len) {
585 case 16:
586 if (TCG_TARGET_HAS_ext16u_i32) {
587 tcg_gen_ext16u_i32(ret, arg);
588 tcg_gen_shli_i32(ret, ret, ofs);
589 return;
590 }
591 break;
592 case 8:
593 if (TCG_TARGET_HAS_ext8u_i32) {
594 tcg_gen_ext8u_i32(ret, arg);
595 tcg_gen_shli_i32(ret, ret, ofs);
596 return;
597 }
598 break;
599 }
600 /* Otherwise prefer zero-extension over AND for code size. */
601 switch (ofs + len) {
602 case 16:
603 if (TCG_TARGET_HAS_ext16u_i32) {
604 tcg_gen_shli_i32(ret, arg, ofs);
605 tcg_gen_ext16u_i32(ret, ret);
606 return;
607 }
608 break;
609 case 8:
610 if (TCG_TARGET_HAS_ext8u_i32) {
611 tcg_gen_shli_i32(ret, arg, ofs);
612 tcg_gen_ext8u_i32(ret, ret);
613 return;
614 }
615 break;
616 }
617 tcg_gen_andi_i32(ret, arg, (1u << len) - 1);
618 tcg_gen_shli_i32(ret, ret, ofs);
619 }
620}
621
7ec8bab3
RH
622void tcg_gen_extract_i32(TCGv_i32 ret, TCGv_i32 arg,
623 unsigned int ofs, unsigned int len)
624{
625 tcg_debug_assert(ofs < 32);
626 tcg_debug_assert(len > 0);
627 tcg_debug_assert(len <= 32);
628 tcg_debug_assert(ofs + len <= 32);
629
630 /* Canonicalize certain special cases, even if extract is supported. */
631 if (ofs + len == 32) {
632 tcg_gen_shri_i32(ret, arg, 32 - len);
633 return;
634 }
635 if (ofs == 0) {
636 tcg_gen_andi_i32(ret, arg, (1u << len) - 1);
637 return;
638 }
639
640 if (TCG_TARGET_HAS_extract_i32
641 && TCG_TARGET_extract_i32_valid(ofs, len)) {
642 tcg_gen_op4ii_i32(INDEX_op_extract_i32, ret, arg, ofs, len);
643 return;
644 }
645
646 /* Assume that zero-extension, if available, is cheaper than a shift. */
647 switch (ofs + len) {
648 case 16:
649 if (TCG_TARGET_HAS_ext16u_i32) {
650 tcg_gen_ext16u_i32(ret, arg);
651 tcg_gen_shri_i32(ret, ret, ofs);
652 return;
653 }
654 break;
655 case 8:
656 if (TCG_TARGET_HAS_ext8u_i32) {
657 tcg_gen_ext8u_i32(ret, arg);
658 tcg_gen_shri_i32(ret, ret, ofs);
659 return;
660 }
661 break;
662 }
663
664 /* ??? Ideally we'd know what values are available for immediate AND.
665 Assume that 8 bits are available, plus the special case of 16,
666 so that we get ext8u, ext16u. */
667 switch (len) {
668 case 1 ... 8: case 16:
669 tcg_gen_shri_i32(ret, arg, ofs);
670 tcg_gen_andi_i32(ret, ret, (1u << len) - 1);
671 break;
672 default:
673 tcg_gen_shli_i32(ret, arg, 32 - len - ofs);
674 tcg_gen_shri_i32(ret, ret, 32 - len);
675 break;
676 }
677}
678
679void tcg_gen_sextract_i32(TCGv_i32 ret, TCGv_i32 arg,
680 unsigned int ofs, unsigned int len)
681{
682 tcg_debug_assert(ofs < 32);
683 tcg_debug_assert(len > 0);
684 tcg_debug_assert(len <= 32);
685 tcg_debug_assert(ofs + len <= 32);
686
687 /* Canonicalize certain special cases, even if extract is supported. */
688 if (ofs + len == 32) {
689 tcg_gen_sari_i32(ret, arg, 32 - len);
690 return;
691 }
692 if (ofs == 0) {
693 switch (len) {
694 case 16:
695 tcg_gen_ext16s_i32(ret, arg);
696 return;
697 case 8:
698 tcg_gen_ext8s_i32(ret, arg);
699 return;
700 }
701 }
702
703 if (TCG_TARGET_HAS_sextract_i32
704 && TCG_TARGET_extract_i32_valid(ofs, len)) {
705 tcg_gen_op4ii_i32(INDEX_op_sextract_i32, ret, arg, ofs, len);
706 return;
707 }
708
709 /* Assume that sign-extension, if available, is cheaper than a shift. */
710 switch (ofs + len) {
711 case 16:
712 if (TCG_TARGET_HAS_ext16s_i32) {
713 tcg_gen_ext16s_i32(ret, arg);
714 tcg_gen_sari_i32(ret, ret, ofs);
715 return;
716 }
717 break;
718 case 8:
719 if (TCG_TARGET_HAS_ext8s_i32) {
720 tcg_gen_ext8s_i32(ret, arg);
721 tcg_gen_sari_i32(ret, ret, ofs);
722 return;
723 }
724 break;
725 }
726 switch (len) {
727 case 16:
728 if (TCG_TARGET_HAS_ext16s_i32) {
729 tcg_gen_shri_i32(ret, arg, ofs);
730 tcg_gen_ext16s_i32(ret, ret);
731 return;
732 }
733 break;
734 case 8:
735 if (TCG_TARGET_HAS_ext8s_i32) {
736 tcg_gen_shri_i32(ret, arg, ofs);
737 tcg_gen_ext8s_i32(ret, ret);
738 return;
739 }
740 break;
741 }
742
743 tcg_gen_shli_i32(ret, arg, 32 - len - ofs);
744 tcg_gen_sari_i32(ret, ret, 32 - len);
745}
746
951c6300
RH
747void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1,
748 TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2)
749{
37ed3bf1
RH
750 if (cond == TCG_COND_ALWAYS) {
751 tcg_gen_mov_i32(ret, v1);
752 } else if (cond == TCG_COND_NEVER) {
753 tcg_gen_mov_i32(ret, v2);
754 } else if (TCG_TARGET_HAS_movcond_i32) {
951c6300
RH
755 tcg_gen_op6i_i32(INDEX_op_movcond_i32, ret, c1, c2, v1, v2, cond);
756 } else {
757 TCGv_i32 t0 = tcg_temp_new_i32();
758 TCGv_i32 t1 = tcg_temp_new_i32();
759 tcg_gen_setcond_i32(cond, t0, c1, c2);
760 tcg_gen_neg_i32(t0, t0);
761 tcg_gen_and_i32(t1, v1, t0);
762 tcg_gen_andc_i32(ret, v2, t0);
763 tcg_gen_or_i32(ret, ret, t1);
764 tcg_temp_free_i32(t0);
765 tcg_temp_free_i32(t1);
766 }
767}
768
769void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
770 TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh)
771{
772 if (TCG_TARGET_HAS_add2_i32) {
773 tcg_gen_op6_i32(INDEX_op_add2_i32, rl, rh, al, ah, bl, bh);
951c6300
RH
774 } else {
775 TCGv_i64 t0 = tcg_temp_new_i64();
776 TCGv_i64 t1 = tcg_temp_new_i64();
777 tcg_gen_concat_i32_i64(t0, al, ah);
778 tcg_gen_concat_i32_i64(t1, bl, bh);
779 tcg_gen_add_i64(t0, t0, t1);
780 tcg_gen_extr_i64_i32(rl, rh, t0);
781 tcg_temp_free_i64(t0);
782 tcg_temp_free_i64(t1);
783 }
784}
785
786void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
787 TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh)
788{
789 if (TCG_TARGET_HAS_sub2_i32) {
790 tcg_gen_op6_i32(INDEX_op_sub2_i32, rl, rh, al, ah, bl, bh);
951c6300
RH
791 } else {
792 TCGv_i64 t0 = tcg_temp_new_i64();
793 TCGv_i64 t1 = tcg_temp_new_i64();
794 tcg_gen_concat_i32_i64(t0, al, ah);
795 tcg_gen_concat_i32_i64(t1, bl, bh);
796 tcg_gen_sub_i64(t0, t0, t1);
797 tcg_gen_extr_i64_i32(rl, rh, t0);
798 tcg_temp_free_i64(t0);
799 tcg_temp_free_i64(t1);
800 }
801}
802
803void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
804{
805 if (TCG_TARGET_HAS_mulu2_i32) {
806 tcg_gen_op4_i32(INDEX_op_mulu2_i32, rl, rh, arg1, arg2);
951c6300
RH
807 } else if (TCG_TARGET_HAS_muluh_i32) {
808 TCGv_i32 t = tcg_temp_new_i32();
809 tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2);
810 tcg_gen_op3_i32(INDEX_op_muluh_i32, rh, arg1, arg2);
811 tcg_gen_mov_i32(rl, t);
812 tcg_temp_free_i32(t);
813 } else {
814 TCGv_i64 t0 = tcg_temp_new_i64();
815 TCGv_i64 t1 = tcg_temp_new_i64();
816 tcg_gen_extu_i32_i64(t0, arg1);
817 tcg_gen_extu_i32_i64(t1, arg2);
818 tcg_gen_mul_i64(t0, t0, t1);
819 tcg_gen_extr_i64_i32(rl, rh, t0);
820 tcg_temp_free_i64(t0);
821 tcg_temp_free_i64(t1);
822 }
823}
824
825void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
826{
827 if (TCG_TARGET_HAS_muls2_i32) {
828 tcg_gen_op4_i32(INDEX_op_muls2_i32, rl, rh, arg1, arg2);
951c6300
RH
829 } else if (TCG_TARGET_HAS_mulsh_i32) {
830 TCGv_i32 t = tcg_temp_new_i32();
831 tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2);
832 tcg_gen_op3_i32(INDEX_op_mulsh_i32, rh, arg1, arg2);
833 tcg_gen_mov_i32(rl, t);
834 tcg_temp_free_i32(t);
835 } else if (TCG_TARGET_REG_BITS == 32) {
836 TCGv_i32 t0 = tcg_temp_new_i32();
837 TCGv_i32 t1 = tcg_temp_new_i32();
838 TCGv_i32 t2 = tcg_temp_new_i32();
839 TCGv_i32 t3 = tcg_temp_new_i32();
840 tcg_gen_mulu2_i32(t0, t1, arg1, arg2);
841 /* Adjust for negative inputs. */
842 tcg_gen_sari_i32(t2, arg1, 31);
843 tcg_gen_sari_i32(t3, arg2, 31);
844 tcg_gen_and_i32(t2, t2, arg2);
845 tcg_gen_and_i32(t3, t3, arg1);
846 tcg_gen_sub_i32(rh, t1, t2);
847 tcg_gen_sub_i32(rh, rh, t3);
848 tcg_gen_mov_i32(rl, t0);
849 tcg_temp_free_i32(t0);
850 tcg_temp_free_i32(t1);
851 tcg_temp_free_i32(t2);
852 tcg_temp_free_i32(t3);
853 } else {
854 TCGv_i64 t0 = tcg_temp_new_i64();
855 TCGv_i64 t1 = tcg_temp_new_i64();
856 tcg_gen_ext_i32_i64(t0, arg1);
857 tcg_gen_ext_i32_i64(t1, arg2);
858 tcg_gen_mul_i64(t0, t0, t1);
859 tcg_gen_extr_i64_i32(rl, rh, t0);
860 tcg_temp_free_i64(t0);
861 tcg_temp_free_i64(t1);
862 }
863}
864
5087abfb
RH
865void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
866{
867 if (TCG_TARGET_REG_BITS == 32) {
868 TCGv_i32 t0 = tcg_temp_new_i32();
869 TCGv_i32 t1 = tcg_temp_new_i32();
870 TCGv_i32 t2 = tcg_temp_new_i32();
871 tcg_gen_mulu2_i32(t0, t1, arg1, arg2);
872 /* Adjust for negative input for the signed arg1. */
873 tcg_gen_sari_i32(t2, arg1, 31);
874 tcg_gen_and_i32(t2, t2, arg2);
875 tcg_gen_sub_i32(rh, t1, t2);
876 tcg_gen_mov_i32(rl, t0);
877 tcg_temp_free_i32(t0);
878 tcg_temp_free_i32(t1);
879 tcg_temp_free_i32(t2);
880 } else {
881 TCGv_i64 t0 = tcg_temp_new_i64();
882 TCGv_i64 t1 = tcg_temp_new_i64();
883 tcg_gen_ext_i32_i64(t0, arg1);
884 tcg_gen_extu_i32_i64(t1, arg2);
885 tcg_gen_mul_i64(t0, t0, t1);
886 tcg_gen_extr_i64_i32(rl, rh, t0);
887 tcg_temp_free_i64(t0);
888 tcg_temp_free_i64(t1);
889 }
890}
891
951c6300
RH
892void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg)
893{
894 if (TCG_TARGET_HAS_ext8s_i32) {
895 tcg_gen_op2_i32(INDEX_op_ext8s_i32, ret, arg);
896 } else {
897 tcg_gen_shli_i32(ret, arg, 24);
898 tcg_gen_sari_i32(ret, ret, 24);
899 }
900}
901
902void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg)
903{
904 if (TCG_TARGET_HAS_ext16s_i32) {
905 tcg_gen_op2_i32(INDEX_op_ext16s_i32, ret, arg);
906 } else {
907 tcg_gen_shli_i32(ret, arg, 16);
908 tcg_gen_sari_i32(ret, ret, 16);
909 }
910}
911
912void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg)
913{
914 if (TCG_TARGET_HAS_ext8u_i32) {
915 tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg);
916 } else {
917 tcg_gen_andi_i32(ret, arg, 0xffu);
918 }
919}
920
921void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg)
922{
923 if (TCG_TARGET_HAS_ext16u_i32) {
924 tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg);
925 } else {
926 tcg_gen_andi_i32(ret, arg, 0xffffu);
927 }
928}
929
930/* Note: we assume the two high bytes are set to zero */
931void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg)
932{
933 if (TCG_TARGET_HAS_bswap16_i32) {
934 tcg_gen_op2_i32(INDEX_op_bswap16_i32, ret, arg);
935 } else {
936 TCGv_i32 t0 = tcg_temp_new_i32();
937
938 tcg_gen_ext8u_i32(t0, arg);
939 tcg_gen_shli_i32(t0, t0, 8);
940 tcg_gen_shri_i32(ret, arg, 8);
941 tcg_gen_or_i32(ret, ret, t0);
942 tcg_temp_free_i32(t0);
943 }
944}
945
946void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg)
947{
948 if (TCG_TARGET_HAS_bswap32_i32) {
949 tcg_gen_op2_i32(INDEX_op_bswap32_i32, ret, arg);
950 } else {
951 TCGv_i32 t0, t1;
952 t0 = tcg_temp_new_i32();
953 t1 = tcg_temp_new_i32();
954
955 tcg_gen_shli_i32(t0, arg, 24);
956
957 tcg_gen_andi_i32(t1, arg, 0x0000ff00);
958 tcg_gen_shli_i32(t1, t1, 8);
959 tcg_gen_or_i32(t0, t0, t1);
960
961 tcg_gen_shri_i32(t1, arg, 8);
962 tcg_gen_andi_i32(t1, t1, 0x0000ff00);
963 tcg_gen_or_i32(t0, t0, t1);
964
965 tcg_gen_shri_i32(t1, arg, 24);
966 tcg_gen_or_i32(ret, t0, t1);
967 tcg_temp_free_i32(t0);
968 tcg_temp_free_i32(t1);
969 }
970}
971
972/* 64-bit ops */
973
974#if TCG_TARGET_REG_BITS == 32
975/* These are all inline for TCG_TARGET_REG_BITS == 64. */
976
977void tcg_gen_discard_i64(TCGv_i64 arg)
978{
979 tcg_gen_discard_i32(TCGV_LOW(arg));
980 tcg_gen_discard_i32(TCGV_HIGH(arg));
981}
982
983void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg)
984{
985 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
986 tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg));
987}
988
989void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg)
990{
991 tcg_gen_movi_i32(TCGV_LOW(ret), arg);
992 tcg_gen_movi_i32(TCGV_HIGH(ret), arg >> 32);
993}
994
995void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
996{
997 tcg_gen_ld8u_i32(TCGV_LOW(ret), arg2, offset);
998 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
999}
1000
1001void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
1002{
1003 tcg_gen_ld8s_i32(TCGV_LOW(ret), arg2, offset);
3ff91d7e 1004 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
951c6300
RH
1005}
1006
1007void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
1008{
1009 tcg_gen_ld16u_i32(TCGV_LOW(ret), arg2, offset);
1010 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1011}
1012
1013void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
1014{
1015 tcg_gen_ld16s_i32(TCGV_LOW(ret), arg2, offset);
1016 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
1017}
1018
1019void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
1020{
1021 tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset);
1022 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1023}
1024
1025void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
1026{
1027 tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset);
1028 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
1029}
1030
1031void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
1032{
1033 /* Since arg2 and ret have different types,
1034 they cannot be the same temporary */
cf811fff 1035#ifdef HOST_WORDS_BIGENDIAN
951c6300
RH
1036 tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset);
1037 tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset + 4);
1038#else
1039 tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset);
1040 tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset + 4);
1041#endif
1042}
1043
1044void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset)
1045{
cf811fff 1046#ifdef HOST_WORDS_BIGENDIAN
951c6300
RH
1047 tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset);
1048 tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset + 4);
1049#else
1050 tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset);
1051 tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset + 4);
1052#endif
1053}
1054
1055void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1056{
1057 tcg_gen_and_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1058 tcg_gen_and_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1059}
1060
1061void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1062{
1063 tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1064 tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1065}
1066
1067void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1068{
1069 tcg_gen_xor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1070 tcg_gen_xor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1071}
1072
1073void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1074{
1075 gen_helper_shl_i64(ret, arg1, arg2);
1076}
1077
1078void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1079{
1080 gen_helper_shr_i64(ret, arg1, arg2);
1081}
1082
1083void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1084{
1085 gen_helper_sar_i64(ret, arg1, arg2);
1086}
1087
1088void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1089{
1090 TCGv_i64 t0;
1091 TCGv_i32 t1;
1092
1093 t0 = tcg_temp_new_i64();
1094 t1 = tcg_temp_new_i32();
1095
1096 tcg_gen_mulu2_i32(TCGV_LOW(t0), TCGV_HIGH(t0),
1097 TCGV_LOW(arg1), TCGV_LOW(arg2));
1098
1099 tcg_gen_mul_i32(t1, TCGV_LOW(arg1), TCGV_HIGH(arg2));
1100 tcg_gen_add_i32(TCGV_HIGH(t0), TCGV_HIGH(t0), t1);
1101 tcg_gen_mul_i32(t1, TCGV_HIGH(arg1), TCGV_LOW(arg2));
1102 tcg_gen_add_i32(TCGV_HIGH(t0), TCGV_HIGH(t0), t1);
1103
1104 tcg_gen_mov_i64(ret, t0);
1105 tcg_temp_free_i64(t0);
1106 tcg_temp_free_i32(t1);
1107}
1108#endif /* TCG_TARGET_REG_SIZE == 32 */
1109
1110void tcg_gen_addi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
1111{
1112 /* some cases can be optimized here */
1113 if (arg2 == 0) {
1114 tcg_gen_mov_i64(ret, arg1);
1115 } else {
1116 TCGv_i64 t0 = tcg_const_i64(arg2);
1117 tcg_gen_add_i64(ret, arg1, t0);
1118 tcg_temp_free_i64(t0);
1119 }
1120}
1121
1122void tcg_gen_subfi_i64(TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2)
1123{
1124 if (arg1 == 0 && TCG_TARGET_HAS_neg_i64) {
1125 /* Don't recurse with tcg_gen_neg_i64. */
1126 tcg_gen_op2_i64(INDEX_op_neg_i64, ret, arg2);
1127 } else {
1128 TCGv_i64 t0 = tcg_const_i64(arg1);
1129 tcg_gen_sub_i64(ret, t0, arg2);
1130 tcg_temp_free_i64(t0);
1131 }
1132}
1133
1134void tcg_gen_subi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
1135{
1136 /* some cases can be optimized here */
1137 if (arg2 == 0) {
1138 tcg_gen_mov_i64(ret, arg1);
1139 } else {
1140 TCGv_i64 t0 = tcg_const_i64(arg2);
1141 tcg_gen_sub_i64(ret, arg1, t0);
1142 tcg_temp_free_i64(t0);
1143 }
1144}
1145
1146void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
1147{
951c6300 1148 TCGv_i64 t0;
3a13c3f3
RH
1149
1150 if (TCG_TARGET_REG_BITS == 32) {
1151 tcg_gen_andi_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2);
1152 tcg_gen_andi_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32);
1153 return;
1154 }
1155
951c6300
RH
1156 /* Some cases can be optimized here. */
1157 switch (arg2) {
1158 case 0:
1159 tcg_gen_movi_i64(ret, 0);
1160 return;
1161 case 0xffffffffffffffffull:
1162 tcg_gen_mov_i64(ret, arg1);
1163 return;
1164 case 0xffull:
1165 /* Don't recurse with tcg_gen_ext8u_i64. */
1166 if (TCG_TARGET_HAS_ext8u_i64) {
1167 tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg1);
1168 return;
1169 }
1170 break;
1171 case 0xffffu:
1172 if (TCG_TARGET_HAS_ext16u_i64) {
1173 tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg1);
1174 return;
1175 }
1176 break;
1177 case 0xffffffffull:
1178 if (TCG_TARGET_HAS_ext32u_i64) {
1179 tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg1);
1180 return;
1181 }
1182 break;
1183 }
1184 t0 = tcg_const_i64(arg2);
1185 tcg_gen_and_i64(ret, arg1, t0);
1186 tcg_temp_free_i64(t0);
951c6300
RH
1187}
1188
1189void tcg_gen_ori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
1190{
3a13c3f3
RH
1191 if (TCG_TARGET_REG_BITS == 32) {
1192 tcg_gen_ori_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2);
1193 tcg_gen_ori_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32);
1194 return;
1195 }
951c6300
RH
1196 /* Some cases can be optimized here. */
1197 if (arg2 == -1) {
1198 tcg_gen_movi_i64(ret, -1);
1199 } else if (arg2 == 0) {
1200 tcg_gen_mov_i64(ret, arg1);
1201 } else {
1202 TCGv_i64 t0 = tcg_const_i64(arg2);
1203 tcg_gen_or_i64(ret, arg1, t0);
1204 tcg_temp_free_i64(t0);
1205 }
951c6300
RH
1206}
1207
1208void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
1209{
3a13c3f3
RH
1210 if (TCG_TARGET_REG_BITS == 32) {
1211 tcg_gen_xori_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2);
1212 tcg_gen_xori_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32);
1213 return;
1214 }
951c6300
RH
1215 /* Some cases can be optimized here. */
1216 if (arg2 == 0) {
1217 tcg_gen_mov_i64(ret, arg1);
1218 } else if (arg2 == -1 && TCG_TARGET_HAS_not_i64) {
1219 /* Don't recurse with tcg_gen_not_i64. */
1220 tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg1);
1221 } else {
1222 TCGv_i64 t0 = tcg_const_i64(arg2);
1223 tcg_gen_xor_i64(ret, arg1, t0);
1224 tcg_temp_free_i64(t0);
1225 }
951c6300
RH
1226}
1227
951c6300
RH
1228static inline void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
1229 unsigned c, bool right, bool arith)
1230{
1231 tcg_debug_assert(c < 64);
1232 if (c == 0) {
1233 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
1234 tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
1235 } else if (c >= 32) {
1236 c -= 32;
1237 if (right) {
1238 if (arith) {
1239 tcg_gen_sari_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c);
1240 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), 31);
1241 } else {
1242 tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c);
1243 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1244 }
1245 } else {
1246 tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_LOW(arg1), c);
1247 tcg_gen_movi_i32(TCGV_LOW(ret), 0);
1248 }
1249 } else {
1250 TCGv_i32 t0, t1;
1251
1252 t0 = tcg_temp_new_i32();
1253 t1 = tcg_temp_new_i32();
1254 if (right) {
1255 tcg_gen_shli_i32(t0, TCGV_HIGH(arg1), 32 - c);
1256 if (arith) {
1257 tcg_gen_sari_i32(t1, TCGV_HIGH(arg1), c);
1258 } else {
1259 tcg_gen_shri_i32(t1, TCGV_HIGH(arg1), c);
1260 }
1261 tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c);
1262 tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t0);
1263 tcg_gen_mov_i32(TCGV_HIGH(ret), t1);
1264 } else {
1265 tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c);
1266 /* Note: ret can be the same as arg1, so we use t1 */
1267 tcg_gen_shli_i32(t1, TCGV_LOW(arg1), c);
1268 tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
1269 tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t0);
1270 tcg_gen_mov_i32(TCGV_LOW(ret), t1);
1271 }
1272 tcg_temp_free_i32(t0);
1273 tcg_temp_free_i32(t1);
1274 }
1275}
1276
951c6300
RH
1277void tcg_gen_shli_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
1278{
1279 tcg_debug_assert(arg2 < 64);
3a13c3f3
RH
1280 if (TCG_TARGET_REG_BITS == 32) {
1281 tcg_gen_shifti_i64(ret, arg1, arg2, 0, 0);
1282 } else if (arg2 == 0) {
951c6300
RH
1283 tcg_gen_mov_i64(ret, arg1);
1284 } else {
1285 TCGv_i64 t0 = tcg_const_i64(arg2);
1286 tcg_gen_shl_i64(ret, arg1, t0);
1287 tcg_temp_free_i64(t0);
1288 }
1289}
1290
1291void tcg_gen_shri_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
1292{
1293 tcg_debug_assert(arg2 < 64);
3a13c3f3
RH
1294 if (TCG_TARGET_REG_BITS == 32) {
1295 tcg_gen_shifti_i64(ret, arg1, arg2, 1, 0);
1296 } else if (arg2 == 0) {
951c6300
RH
1297 tcg_gen_mov_i64(ret, arg1);
1298 } else {
1299 TCGv_i64 t0 = tcg_const_i64(arg2);
1300 tcg_gen_shr_i64(ret, arg1, t0);
1301 tcg_temp_free_i64(t0);
1302 }
1303}
1304
1305void tcg_gen_sari_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
1306{
1307 tcg_debug_assert(arg2 < 64);
3a13c3f3
RH
1308 if (TCG_TARGET_REG_BITS == 32) {
1309 tcg_gen_shifti_i64(ret, arg1, arg2, 1, 1);
1310 } else if (arg2 == 0) {
951c6300
RH
1311 tcg_gen_mov_i64(ret, arg1);
1312 } else {
1313 TCGv_i64 t0 = tcg_const_i64(arg2);
1314 tcg_gen_sar_i64(ret, arg1, t0);
1315 tcg_temp_free_i64(t0);
1316 }
1317}
951c6300 1318
42a268c2 1319void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *l)
951c6300
RH
1320{
1321 if (cond == TCG_COND_ALWAYS) {
42a268c2 1322 tcg_gen_br(l);
951c6300 1323 } else if (cond != TCG_COND_NEVER) {
3a13c3f3
RH
1324 if (TCG_TARGET_REG_BITS == 32) {
1325 tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, TCGV_LOW(arg1),
1326 TCGV_HIGH(arg1), TCGV_LOW(arg2),
42a268c2 1327 TCGV_HIGH(arg2), cond, label_arg(l));
3a13c3f3 1328 } else {
42a268c2
RH
1329 tcg_gen_op4ii_i64(INDEX_op_brcond_i64, arg1, arg2, cond,
1330 label_arg(l));
3a13c3f3 1331 }
951c6300
RH
1332 }
1333}
1334
42a268c2 1335void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *l)
951c6300
RH
1336{
1337 if (cond == TCG_COND_ALWAYS) {
42a268c2 1338 tcg_gen_br(l);
951c6300
RH
1339 } else if (cond != TCG_COND_NEVER) {
1340 TCGv_i64 t0 = tcg_const_i64(arg2);
42a268c2 1341 tcg_gen_brcond_i64(cond, arg1, t0, l);
951c6300
RH
1342 tcg_temp_free_i64(t0);
1343 }
1344}
1345
1346void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret,
1347 TCGv_i64 arg1, TCGv_i64 arg2)
1348{
1349 if (cond == TCG_COND_ALWAYS) {
1350 tcg_gen_movi_i64(ret, 1);
1351 } else if (cond == TCG_COND_NEVER) {
1352 tcg_gen_movi_i64(ret, 0);
1353 } else {
3a13c3f3
RH
1354 if (TCG_TARGET_REG_BITS == 32) {
1355 tcg_gen_op6i_i32(INDEX_op_setcond2_i32, TCGV_LOW(ret),
1356 TCGV_LOW(arg1), TCGV_HIGH(arg1),
1357 TCGV_LOW(arg2), TCGV_HIGH(arg2), cond);
1358 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1359 } else {
1360 tcg_gen_op4i_i64(INDEX_op_setcond_i64, ret, arg1, arg2, cond);
1361 }
951c6300
RH
1362 }
1363}
1364
1365void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret,
1366 TCGv_i64 arg1, int64_t arg2)
1367{
1368 TCGv_i64 t0 = tcg_const_i64(arg2);
1369 tcg_gen_setcond_i64(cond, ret, arg1, t0);
1370 tcg_temp_free_i64(t0);
1371}
1372
1373void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
1374{
1375 TCGv_i64 t0 = tcg_const_i64(arg2);
1376 tcg_gen_mul_i64(ret, arg1, t0);
1377 tcg_temp_free_i64(t0);
1378}
1379
1380void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1381{
1382 if (TCG_TARGET_HAS_div_i64) {
1383 tcg_gen_op3_i64(INDEX_op_div_i64, ret, arg1, arg2);
1384 } else if (TCG_TARGET_HAS_div2_i64) {
1385 TCGv_i64 t0 = tcg_temp_new_i64();
1386 tcg_gen_sari_i64(t0, arg1, 63);
1387 tcg_gen_op5_i64(INDEX_op_div2_i64, ret, t0, arg1, t0, arg2);
1388 tcg_temp_free_i64(t0);
1389 } else {
1390 gen_helper_div_i64(ret, arg1, arg2);
1391 }
1392}
1393
1394void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1395{
1396 if (TCG_TARGET_HAS_rem_i64) {
1397 tcg_gen_op3_i64(INDEX_op_rem_i64, ret, arg1, arg2);
1398 } else if (TCG_TARGET_HAS_div_i64) {
1399 TCGv_i64 t0 = tcg_temp_new_i64();
1400 tcg_gen_op3_i64(INDEX_op_div_i64, t0, arg1, arg2);
1401 tcg_gen_mul_i64(t0, t0, arg2);
1402 tcg_gen_sub_i64(ret, arg1, t0);
1403 tcg_temp_free_i64(t0);
1404 } else if (TCG_TARGET_HAS_div2_i64) {
1405 TCGv_i64 t0 = tcg_temp_new_i64();
1406 tcg_gen_sari_i64(t0, arg1, 63);
1407 tcg_gen_op5_i64(INDEX_op_div2_i64, t0, ret, arg1, t0, arg2);
1408 tcg_temp_free_i64(t0);
1409 } else {
1410 gen_helper_rem_i64(ret, arg1, arg2);
1411 }
1412}
1413
1414void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1415{
1416 if (TCG_TARGET_HAS_div_i64) {
1417 tcg_gen_op3_i64(INDEX_op_divu_i64, ret, arg1, arg2);
1418 } else if (TCG_TARGET_HAS_div2_i64) {
1419 TCGv_i64 t0 = tcg_temp_new_i64();
1420 tcg_gen_movi_i64(t0, 0);
1421 tcg_gen_op5_i64(INDEX_op_divu2_i64, ret, t0, arg1, t0, arg2);
1422 tcg_temp_free_i64(t0);
1423 } else {
1424 gen_helper_divu_i64(ret, arg1, arg2);
1425 }
1426}
1427
1428void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1429{
1430 if (TCG_TARGET_HAS_rem_i64) {
1431 tcg_gen_op3_i64(INDEX_op_remu_i64, ret, arg1, arg2);
1432 } else if (TCG_TARGET_HAS_div_i64) {
1433 TCGv_i64 t0 = tcg_temp_new_i64();
1434 tcg_gen_op3_i64(INDEX_op_divu_i64, t0, arg1, arg2);
1435 tcg_gen_mul_i64(t0, t0, arg2);
1436 tcg_gen_sub_i64(ret, arg1, t0);
1437 tcg_temp_free_i64(t0);
1438 } else if (TCG_TARGET_HAS_div2_i64) {
1439 TCGv_i64 t0 = tcg_temp_new_i64();
1440 tcg_gen_movi_i64(t0, 0);
1441 tcg_gen_op5_i64(INDEX_op_divu2_i64, t0, ret, arg1, t0, arg2);
1442 tcg_temp_free_i64(t0);
1443 } else {
1444 gen_helper_remu_i64(ret, arg1, arg2);
1445 }
1446}
1447
1448void tcg_gen_ext8s_i64(TCGv_i64 ret, TCGv_i64 arg)
1449{
3a13c3f3
RH
1450 if (TCG_TARGET_REG_BITS == 32) {
1451 tcg_gen_ext8s_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1452 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
1453 } else if (TCG_TARGET_HAS_ext8s_i64) {
951c6300
RH
1454 tcg_gen_op2_i64(INDEX_op_ext8s_i64, ret, arg);
1455 } else {
1456 tcg_gen_shli_i64(ret, arg, 56);
1457 tcg_gen_sari_i64(ret, ret, 56);
1458 }
951c6300
RH
1459}
1460
1461void tcg_gen_ext16s_i64(TCGv_i64 ret, TCGv_i64 arg)
1462{
3a13c3f3
RH
1463 if (TCG_TARGET_REG_BITS == 32) {
1464 tcg_gen_ext16s_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1465 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
1466 } else if (TCG_TARGET_HAS_ext16s_i64) {
951c6300
RH
1467 tcg_gen_op2_i64(INDEX_op_ext16s_i64, ret, arg);
1468 } else {
1469 tcg_gen_shli_i64(ret, arg, 48);
1470 tcg_gen_sari_i64(ret, ret, 48);
1471 }
951c6300
RH
1472}
1473
1474void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg)
1475{
3a13c3f3
RH
1476 if (TCG_TARGET_REG_BITS == 32) {
1477 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1478 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
1479 } else if (TCG_TARGET_HAS_ext32s_i64) {
951c6300
RH
1480 tcg_gen_op2_i64(INDEX_op_ext32s_i64, ret, arg);
1481 } else {
1482 tcg_gen_shli_i64(ret, arg, 32);
1483 tcg_gen_sari_i64(ret, ret, 32);
1484 }
951c6300
RH
1485}
1486
1487void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg)
1488{
3a13c3f3
RH
1489 if (TCG_TARGET_REG_BITS == 32) {
1490 tcg_gen_ext8u_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1491 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1492 } else if (TCG_TARGET_HAS_ext8u_i64) {
951c6300
RH
1493 tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg);
1494 } else {
1495 tcg_gen_andi_i64(ret, arg, 0xffu);
1496 }
951c6300
RH
1497}
1498
1499void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg)
1500{
3a13c3f3
RH
1501 if (TCG_TARGET_REG_BITS == 32) {
1502 tcg_gen_ext16u_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1503 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1504 } else if (TCG_TARGET_HAS_ext16u_i64) {
951c6300
RH
1505 tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg);
1506 } else {
1507 tcg_gen_andi_i64(ret, arg, 0xffffu);
1508 }
951c6300
RH
1509}
1510
1511void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg)
1512{
3a13c3f3
RH
1513 if (TCG_TARGET_REG_BITS == 32) {
1514 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1515 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1516 } else if (TCG_TARGET_HAS_ext32u_i64) {
951c6300
RH
1517 tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg);
1518 } else {
1519 tcg_gen_andi_i64(ret, arg, 0xffffffffu);
1520 }
951c6300
RH
1521}
1522
1523/* Note: we assume the six high bytes are set to zero */
1524void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg)
1525{
3a13c3f3
RH
1526 if (TCG_TARGET_REG_BITS == 32) {
1527 tcg_gen_bswap16_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1528 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1529 } else if (TCG_TARGET_HAS_bswap16_i64) {
951c6300
RH
1530 tcg_gen_op2_i64(INDEX_op_bswap16_i64, ret, arg);
1531 } else {
1532 TCGv_i64 t0 = tcg_temp_new_i64();
1533
1534 tcg_gen_ext8u_i64(t0, arg);
1535 tcg_gen_shli_i64(t0, t0, 8);
1536 tcg_gen_shri_i64(ret, arg, 8);
1537 tcg_gen_or_i64(ret, ret, t0);
1538 tcg_temp_free_i64(t0);
1539 }
951c6300
RH
1540}
1541
1542/* Note: we assume the four high bytes are set to zero */
1543void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg)
1544{
3a13c3f3
RH
1545 if (TCG_TARGET_REG_BITS == 32) {
1546 tcg_gen_bswap32_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1547 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1548 } else if (TCG_TARGET_HAS_bswap32_i64) {
951c6300
RH
1549 tcg_gen_op2_i64(INDEX_op_bswap32_i64, ret, arg);
1550 } else {
1551 TCGv_i64 t0, t1;
1552 t0 = tcg_temp_new_i64();
1553 t1 = tcg_temp_new_i64();
1554
1555 tcg_gen_shli_i64(t0, arg, 24);
1556 tcg_gen_ext32u_i64(t0, t0);
1557
1558 tcg_gen_andi_i64(t1, arg, 0x0000ff00);
1559 tcg_gen_shli_i64(t1, t1, 8);
1560 tcg_gen_or_i64(t0, t0, t1);
1561
1562 tcg_gen_shri_i64(t1, arg, 8);
1563 tcg_gen_andi_i64(t1, t1, 0x0000ff00);
1564 tcg_gen_or_i64(t0, t0, t1);
1565
1566 tcg_gen_shri_i64(t1, arg, 24);
1567 tcg_gen_or_i64(ret, t0, t1);
1568 tcg_temp_free_i64(t0);
1569 tcg_temp_free_i64(t1);
1570 }
951c6300
RH
1571}
1572
1573void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg)
1574{
3a13c3f3
RH
1575 if (TCG_TARGET_REG_BITS == 32) {
1576 TCGv_i32 t0, t1;
1577 t0 = tcg_temp_new_i32();
1578 t1 = tcg_temp_new_i32();
951c6300 1579
3a13c3f3
RH
1580 tcg_gen_bswap32_i32(t0, TCGV_LOW(arg));
1581 tcg_gen_bswap32_i32(t1, TCGV_HIGH(arg));
1582 tcg_gen_mov_i32(TCGV_LOW(ret), t1);
1583 tcg_gen_mov_i32(TCGV_HIGH(ret), t0);
1584 tcg_temp_free_i32(t0);
1585 tcg_temp_free_i32(t1);
1586 } else if (TCG_TARGET_HAS_bswap64_i64) {
951c6300
RH
1587 tcg_gen_op2_i64(INDEX_op_bswap64_i64, ret, arg);
1588 } else {
1589 TCGv_i64 t0 = tcg_temp_new_i64();
1590 TCGv_i64 t1 = tcg_temp_new_i64();
1591
1592 tcg_gen_shli_i64(t0, arg, 56);
1593
1594 tcg_gen_andi_i64(t1, arg, 0x0000ff00);
1595 tcg_gen_shli_i64(t1, t1, 40);
1596 tcg_gen_or_i64(t0, t0, t1);
1597
1598 tcg_gen_andi_i64(t1, arg, 0x00ff0000);
1599 tcg_gen_shli_i64(t1, t1, 24);
1600 tcg_gen_or_i64(t0, t0, t1);
1601
1602 tcg_gen_andi_i64(t1, arg, 0xff000000);
1603 tcg_gen_shli_i64(t1, t1, 8);
1604 tcg_gen_or_i64(t0, t0, t1);
1605
1606 tcg_gen_shri_i64(t1, arg, 8);
1607 tcg_gen_andi_i64(t1, t1, 0xff000000);
1608 tcg_gen_or_i64(t0, t0, t1);
1609
1610 tcg_gen_shri_i64(t1, arg, 24);
1611 tcg_gen_andi_i64(t1, t1, 0x00ff0000);
1612 tcg_gen_or_i64(t0, t0, t1);
1613
1614 tcg_gen_shri_i64(t1, arg, 40);
1615 tcg_gen_andi_i64(t1, t1, 0x0000ff00);
1616 tcg_gen_or_i64(t0, t0, t1);
1617
1618 tcg_gen_shri_i64(t1, arg, 56);
1619 tcg_gen_or_i64(ret, t0, t1);
1620 tcg_temp_free_i64(t0);
1621 tcg_temp_free_i64(t1);
1622 }
951c6300
RH
1623}
1624
1625void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg)
1626{
3a13c3f3
RH
1627 if (TCG_TARGET_REG_BITS == 32) {
1628 tcg_gen_not_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1629 tcg_gen_not_i32(TCGV_HIGH(ret), TCGV_HIGH(arg));
1630 } else if (TCG_TARGET_HAS_not_i64) {
951c6300
RH
1631 tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg);
1632 } else {
1633 tcg_gen_xori_i64(ret, arg, -1);
1634 }
951c6300
RH
1635}
1636
1637void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1638{
3a13c3f3
RH
1639 if (TCG_TARGET_REG_BITS == 32) {
1640 tcg_gen_andc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1641 tcg_gen_andc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1642 } else if (TCG_TARGET_HAS_andc_i64) {
951c6300
RH
1643 tcg_gen_op3_i64(INDEX_op_andc_i64, ret, arg1, arg2);
1644 } else {
1645 TCGv_i64 t0 = tcg_temp_new_i64();
1646 tcg_gen_not_i64(t0, arg2);
1647 tcg_gen_and_i64(ret, arg1, t0);
1648 tcg_temp_free_i64(t0);
1649 }
951c6300
RH
1650}
1651
1652void tcg_gen_eqv_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1653{
3a13c3f3
RH
1654 if (TCG_TARGET_REG_BITS == 32) {
1655 tcg_gen_eqv_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1656 tcg_gen_eqv_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1657 } else if (TCG_TARGET_HAS_eqv_i64) {
951c6300
RH
1658 tcg_gen_op3_i64(INDEX_op_eqv_i64, ret, arg1, arg2);
1659 } else {
1660 tcg_gen_xor_i64(ret, arg1, arg2);
1661 tcg_gen_not_i64(ret, ret);
1662 }
951c6300
RH
1663}
1664
1665void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1666{
3a13c3f3
RH
1667 if (TCG_TARGET_REG_BITS == 32) {
1668 tcg_gen_nand_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1669 tcg_gen_nand_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1670 } else if (TCG_TARGET_HAS_nand_i64) {
951c6300
RH
1671 tcg_gen_op3_i64(INDEX_op_nand_i64, ret, arg1, arg2);
1672 } else {
1673 tcg_gen_and_i64(ret, arg1, arg2);
1674 tcg_gen_not_i64(ret, ret);
1675 }
951c6300
RH
1676}
1677
1678void tcg_gen_nor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1679{
3a13c3f3
RH
1680 if (TCG_TARGET_REG_BITS == 32) {
1681 tcg_gen_nor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1682 tcg_gen_nor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1683 } else if (TCG_TARGET_HAS_nor_i64) {
951c6300
RH
1684 tcg_gen_op3_i64(INDEX_op_nor_i64, ret, arg1, arg2);
1685 } else {
1686 tcg_gen_or_i64(ret, arg1, arg2);
1687 tcg_gen_not_i64(ret, ret);
1688 }
951c6300
RH
1689}
1690
1691void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1692{
3a13c3f3
RH
1693 if (TCG_TARGET_REG_BITS == 32) {
1694 tcg_gen_orc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1695 tcg_gen_orc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1696 } else if (TCG_TARGET_HAS_orc_i64) {
951c6300
RH
1697 tcg_gen_op3_i64(INDEX_op_orc_i64, ret, arg1, arg2);
1698 } else {
1699 TCGv_i64 t0 = tcg_temp_new_i64();
1700 tcg_gen_not_i64(t0, arg2);
1701 tcg_gen_or_i64(ret, arg1, t0);
1702 tcg_temp_free_i64(t0);
1703 }
951c6300
RH
1704}
1705
1706void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1707{
1708 if (TCG_TARGET_HAS_rot_i64) {
1709 tcg_gen_op3_i64(INDEX_op_rotl_i64, ret, arg1, arg2);
1710 } else {
1711 TCGv_i64 t0, t1;
1712 t0 = tcg_temp_new_i64();
1713 t1 = tcg_temp_new_i64();
1714 tcg_gen_shl_i64(t0, arg1, arg2);
1715 tcg_gen_subfi_i64(t1, 64, arg2);
1716 tcg_gen_shr_i64(t1, arg1, t1);
1717 tcg_gen_or_i64(ret, t0, t1);
1718 tcg_temp_free_i64(t0);
1719 tcg_temp_free_i64(t1);
1720 }
1721}
1722
1723void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
1724{
1725 tcg_debug_assert(arg2 < 64);
1726 /* some cases can be optimized here */
1727 if (arg2 == 0) {
1728 tcg_gen_mov_i64(ret, arg1);
1729 } else if (TCG_TARGET_HAS_rot_i64) {
1730 TCGv_i64 t0 = tcg_const_i64(arg2);
1731 tcg_gen_rotl_i64(ret, arg1, t0);
1732 tcg_temp_free_i64(t0);
1733 } else {
1734 TCGv_i64 t0, t1;
1735 t0 = tcg_temp_new_i64();
1736 t1 = tcg_temp_new_i64();
1737 tcg_gen_shli_i64(t0, arg1, arg2);
1738 tcg_gen_shri_i64(t1, arg1, 64 - arg2);
1739 tcg_gen_or_i64(ret, t0, t1);
1740 tcg_temp_free_i64(t0);
1741 tcg_temp_free_i64(t1);
1742 }
1743}
1744
1745void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1746{
1747 if (TCG_TARGET_HAS_rot_i64) {
1748 tcg_gen_op3_i64(INDEX_op_rotr_i64, ret, arg1, arg2);
1749 } else {
1750 TCGv_i64 t0, t1;
1751 t0 = tcg_temp_new_i64();
1752 t1 = tcg_temp_new_i64();
1753 tcg_gen_shr_i64(t0, arg1, arg2);
1754 tcg_gen_subfi_i64(t1, 64, arg2);
1755 tcg_gen_shl_i64(t1, arg1, t1);
1756 tcg_gen_or_i64(ret, t0, t1);
1757 tcg_temp_free_i64(t0);
1758 tcg_temp_free_i64(t1);
1759 }
1760}
1761
1762void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
1763{
1764 tcg_debug_assert(arg2 < 64);
1765 /* some cases can be optimized here */
1766 if (arg2 == 0) {
1767 tcg_gen_mov_i64(ret, arg1);
1768 } else {
1769 tcg_gen_rotli_i64(ret, arg1, 64 - arg2);
1770 }
1771}
1772
1773void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
1774 unsigned int ofs, unsigned int len)
1775{
1776 uint64_t mask;
1777 TCGv_i64 t1;
1778
1779 tcg_debug_assert(ofs < 64);
0d0d309d 1780 tcg_debug_assert(len > 0);
951c6300
RH
1781 tcg_debug_assert(len <= 64);
1782 tcg_debug_assert(ofs + len <= 64);
1783
0d0d309d 1784 if (len == 64) {
951c6300
RH
1785 tcg_gen_mov_i64(ret, arg2);
1786 return;
1787 }
1788 if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(ofs, len)) {
1789 tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, arg1, arg2, ofs, len);
1790 return;
1791 }
1792
3a13c3f3
RH
1793 if (TCG_TARGET_REG_BITS == 32) {
1794 if (ofs >= 32) {
1795 tcg_gen_deposit_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1),
1796 TCGV_LOW(arg2), ofs - 32, len);
1797 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
1798 return;
1799 }
1800 if (ofs + len <= 32) {
1801 tcg_gen_deposit_i32(TCGV_LOW(ret), TCGV_LOW(arg1),
1802 TCGV_LOW(arg2), ofs, len);
1803 tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
1804 return;
1805 }
951c6300 1806 }
951c6300
RH
1807
1808 mask = (1ull << len) - 1;
1809 t1 = tcg_temp_new_i64();
1810
1811 if (ofs + len < 64) {
1812 tcg_gen_andi_i64(t1, arg2, mask);
1813 tcg_gen_shli_i64(t1, t1, ofs);
1814 } else {
1815 tcg_gen_shli_i64(t1, arg2, ofs);
1816 }
1817 tcg_gen_andi_i64(ret, arg1, ~(mask << ofs));
1818 tcg_gen_or_i64(ret, ret, t1);
1819
1820 tcg_temp_free_i64(t1);
1821}
1822
07cc68d5
RH
1823void tcg_gen_deposit_z_i64(TCGv_i64 ret, TCGv_i64 arg,
1824 unsigned int ofs, unsigned int len)
1825{
1826 tcg_debug_assert(ofs < 64);
1827 tcg_debug_assert(len > 0);
1828 tcg_debug_assert(len <= 64);
1829 tcg_debug_assert(ofs + len <= 64);
1830
1831 if (ofs + len == 64) {
1832 tcg_gen_shli_i64(ret, arg, ofs);
1833 } else if (ofs == 0) {
1834 tcg_gen_andi_i64(ret, arg, (1ull << len) - 1);
1835 } else if (TCG_TARGET_HAS_deposit_i64
1836 && TCG_TARGET_deposit_i64_valid(ofs, len)) {
1837 TCGv_i64 zero = tcg_const_i64(0);
1838 tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, zero, arg, ofs, len);
1839 tcg_temp_free_i64(zero);
1840 } else {
1841 if (TCG_TARGET_REG_BITS == 32) {
1842 if (ofs >= 32) {
1843 tcg_gen_deposit_z_i32(TCGV_HIGH(ret), TCGV_LOW(arg),
1844 ofs - 32, len);
1845 tcg_gen_movi_i32(TCGV_LOW(ret), 0);
1846 return;
1847 }
1848 if (ofs + len <= 32) {
1849 tcg_gen_deposit_z_i32(TCGV_LOW(ret), TCGV_LOW(arg), ofs, len);
1850 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1851 return;
1852 }
1853 }
1854 /* To help two-operand hosts we prefer to zero-extend first,
1855 which allows ARG to stay live. */
1856 switch (len) {
1857 case 32:
1858 if (TCG_TARGET_HAS_ext32u_i64) {
1859 tcg_gen_ext32u_i64(ret, arg);
1860 tcg_gen_shli_i64(ret, ret, ofs);
1861 return;
1862 }
1863 break;
1864 case 16:
1865 if (TCG_TARGET_HAS_ext16u_i64) {
1866 tcg_gen_ext16u_i64(ret, arg);
1867 tcg_gen_shli_i64(ret, ret, ofs);
1868 return;
1869 }
1870 break;
1871 case 8:
1872 if (TCG_TARGET_HAS_ext8u_i64) {
1873 tcg_gen_ext8u_i64(ret, arg);
1874 tcg_gen_shli_i64(ret, ret, ofs);
1875 return;
1876 }
1877 break;
1878 }
1879 /* Otherwise prefer zero-extension over AND for code size. */
1880 switch (ofs + len) {
1881 case 32:
1882 if (TCG_TARGET_HAS_ext32u_i64) {
1883 tcg_gen_shli_i64(ret, arg, ofs);
1884 tcg_gen_ext32u_i64(ret, ret);
1885 return;
1886 }
1887 break;
1888 case 16:
1889 if (TCG_TARGET_HAS_ext16u_i64) {
1890 tcg_gen_shli_i64(ret, arg, ofs);
1891 tcg_gen_ext16u_i64(ret, ret);
1892 return;
1893 }
1894 break;
1895 case 8:
1896 if (TCG_TARGET_HAS_ext8u_i64) {
1897 tcg_gen_shli_i64(ret, arg, ofs);
1898 tcg_gen_ext8u_i64(ret, ret);
1899 return;
1900 }
1901 break;
1902 }
1903 tcg_gen_andi_i64(ret, arg, (1ull << len) - 1);
1904 tcg_gen_shli_i64(ret, ret, ofs);
1905 }
1906}
1907
7ec8bab3
RH
1908void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
1909 unsigned int ofs, unsigned int len)
1910{
1911 tcg_debug_assert(ofs < 64);
1912 tcg_debug_assert(len > 0);
1913 tcg_debug_assert(len <= 64);
1914 tcg_debug_assert(ofs + len <= 64);
1915
1916 /* Canonicalize certain special cases, even if extract is supported. */
1917 if (ofs + len == 64) {
1918 tcg_gen_shri_i64(ret, arg, 64 - len);
1919 return;
1920 }
1921 if (ofs == 0) {
1922 tcg_gen_andi_i64(ret, arg, (1ull << len) - 1);
1923 return;
1924 }
1925
1926 if (TCG_TARGET_REG_BITS == 32) {
1927 /* Look for a 32-bit extract within one of the two words. */
1928 if (ofs >= 32) {
1929 tcg_gen_extract_i32(TCGV_LOW(ret), TCGV_HIGH(arg), ofs - 32, len);
1930 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1931 return;
1932 }
1933 if (ofs + len <= 32) {
1934 tcg_gen_extract_i32(TCGV_LOW(ret), TCGV_LOW(arg), ofs, len);
1935 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1936 return;
1937 }
1938 /* The field is split across two words. One double-word
1939 shift is better than two double-word shifts. */
1940 goto do_shift_and;
1941 }
1942
1943 if (TCG_TARGET_HAS_extract_i64
1944 && TCG_TARGET_extract_i64_valid(ofs, len)) {
1945 tcg_gen_op4ii_i64(INDEX_op_extract_i64, ret, arg, ofs, len);
1946 return;
1947 }
1948
1949 /* Assume that zero-extension, if available, is cheaper than a shift. */
1950 switch (ofs + len) {
1951 case 32:
1952 if (TCG_TARGET_HAS_ext32u_i64) {
1953 tcg_gen_ext32u_i64(ret, arg);
1954 tcg_gen_shri_i64(ret, ret, ofs);
1955 return;
1956 }
1957 break;
1958 case 16:
1959 if (TCG_TARGET_HAS_ext16u_i64) {
1960 tcg_gen_ext16u_i64(ret, arg);
1961 tcg_gen_shri_i64(ret, ret, ofs);
1962 return;
1963 }
1964 break;
1965 case 8:
1966 if (TCG_TARGET_HAS_ext8u_i64) {
1967 tcg_gen_ext8u_i64(ret, arg);
1968 tcg_gen_shri_i64(ret, ret, ofs);
1969 return;
1970 }
1971 break;
1972 }
1973
1974 /* ??? Ideally we'd know what values are available for immediate AND.
1975 Assume that 8 bits are available, plus the special cases of 16 and 32,
1976 so that we get ext8u, ext16u, and ext32u. */
1977 switch (len) {
1978 case 1 ... 8: case 16: case 32:
1979 do_shift_and:
1980 tcg_gen_shri_i64(ret, arg, ofs);
1981 tcg_gen_andi_i64(ret, ret, (1ull << len) - 1);
1982 break;
1983 default:
1984 tcg_gen_shli_i64(ret, arg, 64 - len - ofs);
1985 tcg_gen_shri_i64(ret, ret, 64 - len);
1986 break;
1987 }
1988}
1989
1990void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg,
1991 unsigned int ofs, unsigned int len)
1992{
1993 tcg_debug_assert(ofs < 64);
1994 tcg_debug_assert(len > 0);
1995 tcg_debug_assert(len <= 64);
1996 tcg_debug_assert(ofs + len <= 64);
1997
1998 /* Canonicalize certain special cases, even if sextract is supported. */
1999 if (ofs + len == 64) {
2000 tcg_gen_sari_i64(ret, arg, 64 - len);
2001 return;
2002 }
2003 if (ofs == 0) {
2004 switch (len) {
2005 case 32:
2006 tcg_gen_ext32s_i64(ret, arg);
2007 return;
2008 case 16:
2009 tcg_gen_ext16s_i64(ret, arg);
2010 return;
2011 case 8:
2012 tcg_gen_ext8s_i64(ret, arg);
2013 return;
2014 }
2015 }
2016
2017 if (TCG_TARGET_REG_BITS == 32) {
2018 /* Look for a 32-bit extract within one of the two words. */
2019 if (ofs >= 32) {
2020 tcg_gen_sextract_i32(TCGV_LOW(ret), TCGV_HIGH(arg), ofs - 32, len);
2021 } else if (ofs + len <= 32) {
2022 tcg_gen_sextract_i32(TCGV_LOW(ret), TCGV_LOW(arg), ofs, len);
2023 } else if (ofs == 0) {
2024 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
2025 tcg_gen_sextract_i32(TCGV_HIGH(ret), TCGV_HIGH(arg), 0, len - 32);
2026 return;
2027 } else if (len > 32) {
2028 TCGv_i32 t = tcg_temp_new_i32();
2029 /* Extract the bits for the high word normally. */
2030 tcg_gen_sextract_i32(t, TCGV_HIGH(arg), ofs + 32, len - 32);
2031 /* Shift the field down for the low part. */
2032 tcg_gen_shri_i64(ret, arg, ofs);
2033 /* Overwrite the shift into the high part. */
2034 tcg_gen_mov_i32(TCGV_HIGH(ret), t);
2035 tcg_temp_free_i32(t);
2036 return;
2037 } else {
2038 /* Shift the field down for the low part, such that the
2039 field sits at the MSB. */
2040 tcg_gen_shri_i64(ret, arg, ofs + len - 32);
2041 /* Shift the field down from the MSB, sign extending. */
2042 tcg_gen_sari_i32(TCGV_LOW(ret), TCGV_LOW(ret), 32 - len);
2043 }
2044 /* Sign-extend the field from 32 bits. */
2045 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
2046 return;
2047 }
2048
2049 if (TCG_TARGET_HAS_sextract_i64
2050 && TCG_TARGET_extract_i64_valid(ofs, len)) {
2051 tcg_gen_op4ii_i64(INDEX_op_sextract_i64, ret, arg, ofs, len);
2052 return;
2053 }
2054
2055 /* Assume that sign-extension, if available, is cheaper than a shift. */
2056 switch (ofs + len) {
2057 case 32:
2058 if (TCG_TARGET_HAS_ext32s_i64) {
2059 tcg_gen_ext32s_i64(ret, arg);
2060 tcg_gen_sari_i64(ret, ret, ofs);
2061 return;
2062 }
2063 break;
2064 case 16:
2065 if (TCG_TARGET_HAS_ext16s_i64) {
2066 tcg_gen_ext16s_i64(ret, arg);
2067 tcg_gen_sari_i64(ret, ret, ofs);
2068 return;
2069 }
2070 break;
2071 case 8:
2072 if (TCG_TARGET_HAS_ext8s_i64) {
2073 tcg_gen_ext8s_i64(ret, arg);
2074 tcg_gen_sari_i64(ret, ret, ofs);
2075 return;
2076 }
2077 break;
2078 }
2079 switch (len) {
2080 case 32:
2081 if (TCG_TARGET_HAS_ext32s_i64) {
2082 tcg_gen_shri_i64(ret, arg, ofs);
2083 tcg_gen_ext32s_i64(ret, ret);
2084 return;
2085 }
2086 break;
2087 case 16:
2088 if (TCG_TARGET_HAS_ext16s_i64) {
2089 tcg_gen_shri_i64(ret, arg, ofs);
2090 tcg_gen_ext16s_i64(ret, ret);
2091 return;
2092 }
2093 break;
2094 case 8:
2095 if (TCG_TARGET_HAS_ext8s_i64) {
2096 tcg_gen_shri_i64(ret, arg, ofs);
2097 tcg_gen_ext8s_i64(ret, ret);
2098 return;
2099 }
2100 break;
2101 }
2102 tcg_gen_shli_i64(ret, arg, 64 - len - ofs);
2103 tcg_gen_sari_i64(ret, ret, 64 - len);
2104}
2105
951c6300
RH
2106void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
2107 TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2)
2108{
37ed3bf1
RH
2109 if (cond == TCG_COND_ALWAYS) {
2110 tcg_gen_mov_i64(ret, v1);
2111 } else if (cond == TCG_COND_NEVER) {
2112 tcg_gen_mov_i64(ret, v2);
2113 } else if (TCG_TARGET_REG_BITS == 32) {
3a13c3f3
RH
2114 TCGv_i32 t0 = tcg_temp_new_i32();
2115 TCGv_i32 t1 = tcg_temp_new_i32();
2116 tcg_gen_op6i_i32(INDEX_op_setcond2_i32, t0,
2117 TCGV_LOW(c1), TCGV_HIGH(c1),
2118 TCGV_LOW(c2), TCGV_HIGH(c2), cond);
2119
2120 if (TCG_TARGET_HAS_movcond_i32) {
2121 tcg_gen_movi_i32(t1, 0);
2122 tcg_gen_movcond_i32(TCG_COND_NE, TCGV_LOW(ret), t0, t1,
2123 TCGV_LOW(v1), TCGV_LOW(v2));
2124 tcg_gen_movcond_i32(TCG_COND_NE, TCGV_HIGH(ret), t0, t1,
2125 TCGV_HIGH(v1), TCGV_HIGH(v2));
2126 } else {
2127 tcg_gen_neg_i32(t0, t0);
951c6300 2128
3a13c3f3
RH
2129 tcg_gen_and_i32(t1, TCGV_LOW(v1), t0);
2130 tcg_gen_andc_i32(TCGV_LOW(ret), TCGV_LOW(v2), t0);
2131 tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t1);
951c6300 2132
3a13c3f3
RH
2133 tcg_gen_and_i32(t1, TCGV_HIGH(v1), t0);
2134 tcg_gen_andc_i32(TCGV_HIGH(ret), TCGV_HIGH(v2), t0);
2135 tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t1);
2136 }
2137 tcg_temp_free_i32(t0);
2138 tcg_temp_free_i32(t1);
2139 } else if (TCG_TARGET_HAS_movcond_i64) {
951c6300
RH
2140 tcg_gen_op6i_i64(INDEX_op_movcond_i64, ret, c1, c2, v1, v2, cond);
2141 } else {
2142 TCGv_i64 t0 = tcg_temp_new_i64();
2143 TCGv_i64 t1 = tcg_temp_new_i64();
2144 tcg_gen_setcond_i64(cond, t0, c1, c2);
2145 tcg_gen_neg_i64(t0, t0);
2146 tcg_gen_and_i64(t1, v1, t0);
2147 tcg_gen_andc_i64(ret, v2, t0);
2148 tcg_gen_or_i64(ret, ret, t1);
2149 tcg_temp_free_i64(t0);
2150 tcg_temp_free_i64(t1);
2151 }
951c6300
RH
2152}
2153
2154void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
2155 TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
2156{
2157 if (TCG_TARGET_HAS_add2_i64) {
2158 tcg_gen_op6_i64(INDEX_op_add2_i64, rl, rh, al, ah, bl, bh);
951c6300
RH
2159 } else {
2160 TCGv_i64 t0 = tcg_temp_new_i64();
2161 TCGv_i64 t1 = tcg_temp_new_i64();
2162 tcg_gen_add_i64(t0, al, bl);
2163 tcg_gen_setcond_i64(TCG_COND_LTU, t1, t0, al);
2164 tcg_gen_add_i64(rh, ah, bh);
2165 tcg_gen_add_i64(rh, rh, t1);
2166 tcg_gen_mov_i64(rl, t0);
2167 tcg_temp_free_i64(t0);
2168 tcg_temp_free_i64(t1);
2169 }
2170}
2171
2172void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
2173 TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
2174{
2175 if (TCG_TARGET_HAS_sub2_i64) {
2176 tcg_gen_op6_i64(INDEX_op_sub2_i64, rl, rh, al, ah, bl, bh);
951c6300
RH
2177 } else {
2178 TCGv_i64 t0 = tcg_temp_new_i64();
2179 TCGv_i64 t1 = tcg_temp_new_i64();
2180 tcg_gen_sub_i64(t0, al, bl);
2181 tcg_gen_setcond_i64(TCG_COND_LTU, t1, al, bl);
2182 tcg_gen_sub_i64(rh, ah, bh);
2183 tcg_gen_sub_i64(rh, rh, t1);
2184 tcg_gen_mov_i64(rl, t0);
2185 tcg_temp_free_i64(t0);
2186 tcg_temp_free_i64(t1);
2187 }
2188}
2189
2190void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
2191{
2192 if (TCG_TARGET_HAS_mulu2_i64) {
2193 tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2);
951c6300
RH
2194 } else if (TCG_TARGET_HAS_muluh_i64) {
2195 TCGv_i64 t = tcg_temp_new_i64();
2196 tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2);
2197 tcg_gen_op3_i64(INDEX_op_muluh_i64, rh, arg1, arg2);
2198 tcg_gen_mov_i64(rl, t);
2199 tcg_temp_free_i64(t);
2200 } else {
2201 TCGv_i64 t0 = tcg_temp_new_i64();
2202 tcg_gen_mul_i64(t0, arg1, arg2);
2203 gen_helper_muluh_i64(rh, arg1, arg2);
2204 tcg_gen_mov_i64(rl, t0);
2205 tcg_temp_free_i64(t0);
2206 }
2207}
2208
2209void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
2210{
2211 if (TCG_TARGET_HAS_muls2_i64) {
2212 tcg_gen_op4_i64(INDEX_op_muls2_i64, rl, rh, arg1, arg2);
951c6300
RH
2213 } else if (TCG_TARGET_HAS_mulsh_i64) {
2214 TCGv_i64 t = tcg_temp_new_i64();
2215 tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2);
2216 tcg_gen_op3_i64(INDEX_op_mulsh_i64, rh, arg1, arg2);
2217 tcg_gen_mov_i64(rl, t);
2218 tcg_temp_free_i64(t);
2219 } else if (TCG_TARGET_HAS_mulu2_i64 || TCG_TARGET_HAS_muluh_i64) {
2220 TCGv_i64 t0 = tcg_temp_new_i64();
2221 TCGv_i64 t1 = tcg_temp_new_i64();
2222 TCGv_i64 t2 = tcg_temp_new_i64();
2223 TCGv_i64 t3 = tcg_temp_new_i64();
2224 tcg_gen_mulu2_i64(t0, t1, arg1, arg2);
2225 /* Adjust for negative inputs. */
2226 tcg_gen_sari_i64(t2, arg1, 63);
2227 tcg_gen_sari_i64(t3, arg2, 63);
2228 tcg_gen_and_i64(t2, t2, arg2);
2229 tcg_gen_and_i64(t3, t3, arg1);
2230 tcg_gen_sub_i64(rh, t1, t2);
2231 tcg_gen_sub_i64(rh, rh, t3);
2232 tcg_gen_mov_i64(rl, t0);
2233 tcg_temp_free_i64(t0);
2234 tcg_temp_free_i64(t1);
2235 tcg_temp_free_i64(t2);
2236 tcg_temp_free_i64(t3);
2237 } else {
2238 TCGv_i64 t0 = tcg_temp_new_i64();
2239 tcg_gen_mul_i64(t0, arg1, arg2);
2240 gen_helper_mulsh_i64(rh, arg1, arg2);
2241 tcg_gen_mov_i64(rl, t0);
2242 tcg_temp_free_i64(t0);
2243 }
2244}
2245
5087abfb
RH
2246void tcg_gen_mulsu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
2247{
2248 TCGv_i64 t0 = tcg_temp_new_i64();
2249 TCGv_i64 t1 = tcg_temp_new_i64();
2250 TCGv_i64 t2 = tcg_temp_new_i64();
2251 tcg_gen_mulu2_i64(t0, t1, arg1, arg2);
2252 /* Adjust for negative input for the signed arg1. */
2253 tcg_gen_sari_i64(t2, arg1, 63);
2254 tcg_gen_and_i64(t2, t2, arg2);
2255 tcg_gen_sub_i64(rh, t1, t2);
2256 tcg_gen_mov_i64(rl, t0);
2257 tcg_temp_free_i64(t0);
2258 tcg_temp_free_i64(t1);
2259 tcg_temp_free_i64(t2);
2260}
2261
951c6300
RH
2262/* Size changing operations. */
2263
609ad705 2264void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
951c6300 2265{
3a13c3f3 2266 if (TCG_TARGET_REG_BITS == 32) {
609ad705
RH
2267 tcg_gen_mov_i32(ret, TCGV_LOW(arg));
2268 } else if (TCG_TARGET_HAS_extrl_i64_i32) {
2269 tcg_gen_op2(&tcg_ctx, INDEX_op_extrl_i64_i32,
2270 GET_TCGV_I32(ret), GET_TCGV_I64(arg));
2271 } else {
951c6300 2272 tcg_gen_mov_i32(ret, MAKE_TCGV_I32(GET_TCGV_I64(arg)));
609ad705
RH
2273 }
2274}
2275
2276void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
2277{
2278 if (TCG_TARGET_REG_BITS == 32) {
2279 tcg_gen_mov_i32(ret, TCGV_HIGH(arg));
2280 } else if (TCG_TARGET_HAS_extrh_i64_i32) {
2281 tcg_gen_op2(&tcg_ctx, INDEX_op_extrh_i64_i32,
2282 GET_TCGV_I32(ret), GET_TCGV_I64(arg));
951c6300
RH
2283 } else {
2284 TCGv_i64 t = tcg_temp_new_i64();
609ad705 2285 tcg_gen_shri_i64(t, arg, 32);
951c6300
RH
2286 tcg_gen_mov_i32(ret, MAKE_TCGV_I32(GET_TCGV_I64(t)));
2287 tcg_temp_free_i64(t);
2288 }
951c6300
RH
2289}
2290
2291void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
2292{
3a13c3f3
RH
2293 if (TCG_TARGET_REG_BITS == 32) {
2294 tcg_gen_mov_i32(TCGV_LOW(ret), arg);
2295 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
2296 } else {
4f2331e5
AJ
2297 tcg_gen_op2(&tcg_ctx, INDEX_op_extu_i32_i64,
2298 GET_TCGV_I64(ret), GET_TCGV_I32(arg));
3a13c3f3 2299 }
951c6300
RH
2300}
2301
2302void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
2303{
3a13c3f3
RH
2304 if (TCG_TARGET_REG_BITS == 32) {
2305 tcg_gen_mov_i32(TCGV_LOW(ret), arg);
2306 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
2307 } else {
4f2331e5
AJ
2308 tcg_gen_op2(&tcg_ctx, INDEX_op_ext_i32_i64,
2309 GET_TCGV_I64(ret), GET_TCGV_I32(arg));
3a13c3f3 2310 }
951c6300
RH
2311}
2312
2313void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high)
2314{
3a13c3f3
RH
2315 TCGv_i64 tmp;
2316
2317 if (TCG_TARGET_REG_BITS == 32) {
2318 tcg_gen_mov_i32(TCGV_LOW(dest), low);
2319 tcg_gen_mov_i32(TCGV_HIGH(dest), high);
2320 return;
2321 }
2322
2323 tmp = tcg_temp_new_i64();
951c6300
RH
2324 /* These extensions are only needed for type correctness.
2325 We may be able to do better given target specific information. */
2326 tcg_gen_extu_i32_i64(tmp, high);
2327 tcg_gen_extu_i32_i64(dest, low);
2328 /* If deposit is available, use it. Otherwise use the extra
2329 knowledge that we have of the zero-extensions above. */
2330 if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(32, 32)) {
2331 tcg_gen_deposit_i64(dest, dest, tmp, 32, 32);
2332 } else {
2333 tcg_gen_shli_i64(tmp, tmp, 32);
2334 tcg_gen_or_i64(dest, dest, tmp);
2335 }
2336 tcg_temp_free_i64(tmp);
951c6300
RH
2337}
2338
2339void tcg_gen_extr_i64_i32(TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg)
2340{
3a13c3f3
RH
2341 if (TCG_TARGET_REG_BITS == 32) {
2342 tcg_gen_mov_i32(lo, TCGV_LOW(arg));
2343 tcg_gen_mov_i32(hi, TCGV_HIGH(arg));
2344 } else {
609ad705
RH
2345 tcg_gen_extrl_i64_i32(lo, arg);
2346 tcg_gen_extrh_i64_i32(hi, arg);
3a13c3f3 2347 }
951c6300
RH
2348}
2349
2350void tcg_gen_extr32_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg)
2351{
2352 tcg_gen_ext32u_i64(lo, arg);
2353 tcg_gen_shri_i64(hi, arg, 32);
2354}
2355
2356/* QEMU specific operations. */
2357
2358void tcg_gen_goto_tb(unsigned idx)
2359{
2360 /* We only support two chained exits. */
2361 tcg_debug_assert(idx <= 1);
2362#ifdef CONFIG_DEBUG_TCG
2363 /* Verify that we havn't seen this numbered exit before. */
2364 tcg_debug_assert((tcg_ctx.goto_tb_issue_mask & (1 << idx)) == 0);
2365 tcg_ctx.goto_tb_issue_mask |= 1 << idx;
2366#endif
2367 tcg_gen_op1i(INDEX_op_goto_tb, idx);
2368}
2369
2370static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st)
2371{
1f00b27f
SS
2372 /* Trigger the asserts within as early as possible. */
2373 (void)get_alignment_bits(op);
2374
951c6300
RH
2375 switch (op & MO_SIZE) {
2376 case MO_8:
2377 op &= ~MO_BSWAP;
2378 break;
2379 case MO_16:
2380 break;
2381 case MO_32:
2382 if (!is64) {
2383 op &= ~MO_SIGN;
2384 }
2385 break;
2386 case MO_64:
2387 if (!is64) {
2388 tcg_abort();
2389 }
2390 break;
2391 }
2392 if (st) {
2393 op &= ~MO_SIGN;
2394 }
2395 return op;
2396}
2397
c45cb8bb
RH
2398static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr,
2399 TCGMemOp memop, TCGArg idx)
951c6300 2400{
59227d5d 2401 TCGMemOpIdx oi = make_memop_idx(memop, idx);
c45cb8bb 2402#if TARGET_LONG_BITS == 32
59227d5d 2403 tcg_gen_op3i_i32(opc, val, addr, oi);
c45cb8bb 2404#else
3a13c3f3 2405 if (TCG_TARGET_REG_BITS == 32) {
59227d5d 2406 tcg_gen_op4i_i32(opc, val, TCGV_LOW(addr), TCGV_HIGH(addr), oi);
3a13c3f3 2407 } else {
59227d5d 2408 tcg_gen_op3(&tcg_ctx, opc, GET_TCGV_I32(val), GET_TCGV_I64(addr), oi);
3a13c3f3 2409 }
c45cb8bb 2410#endif
951c6300
RH
2411}
2412
c45cb8bb
RH
2413static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr,
2414 TCGMemOp memop, TCGArg idx)
2415{
59227d5d 2416 TCGMemOpIdx oi = make_memop_idx(memop, idx);
951c6300 2417#if TARGET_LONG_BITS == 32
c45cb8bb 2418 if (TCG_TARGET_REG_BITS == 32) {
59227d5d 2419 tcg_gen_op4i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), addr, oi);
c45cb8bb 2420 } else {
59227d5d 2421 tcg_gen_op3(&tcg_ctx, opc, GET_TCGV_I64(val), GET_TCGV_I32(addr), oi);
c45cb8bb 2422 }
951c6300 2423#else
c45cb8bb 2424 if (TCG_TARGET_REG_BITS == 32) {
59227d5d
RH
2425 tcg_gen_op5i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val),
2426 TCGV_LOW(addr), TCGV_HIGH(addr), oi);
c45cb8bb 2427 } else {
59227d5d 2428 tcg_gen_op3i_i64(opc, val, addr, oi);
c45cb8bb 2429 }
951c6300 2430#endif
c45cb8bb 2431}
951c6300
RH
2432
2433void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
2434{
2435 memop = tcg_canonicalize_memop(memop, 0, 0);
dcdaadb6
LV
2436 trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env,
2437 addr, trace_mem_get_info(memop, 0));
c45cb8bb 2438 gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx);
951c6300
RH
2439}
2440
2441void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
2442{
2443 memop = tcg_canonicalize_memop(memop, 0, 1);
dcdaadb6
LV
2444 trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env,
2445 addr, trace_mem_get_info(memop, 1));
c45cb8bb 2446 gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx);
951c6300
RH
2447}
2448
2449void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
2450{
3a13c3f3 2451 if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
951c6300
RH
2452 tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop);
2453 if (memop & MO_SIGN) {
2454 tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31);
2455 } else {
2456 tcg_gen_movi_i32(TCGV_HIGH(val), 0);
2457 }
2458 return;
2459 }
951c6300 2460
c45cb8bb 2461 memop = tcg_canonicalize_memop(memop, 1, 0);
dcdaadb6
LV
2462 trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env,
2463 addr, trace_mem_get_info(memop, 0));
c45cb8bb 2464 gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx);
951c6300
RH
2465}
2466
2467void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
2468{
3a13c3f3 2469 if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
951c6300
RH
2470 tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop);
2471 return;
2472 }
951c6300 2473
c45cb8bb 2474 memop = tcg_canonicalize_memop(memop, 1, 1);
dcdaadb6
LV
2475 trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env,
2476 addr, trace_mem_get_info(memop, 1));
c45cb8bb 2477 gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx);
951c6300 2478}
c482cb11
RH
2479
2480static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, TCGMemOp opc)
2481{
2482 switch (opc & MO_SSIZE) {
2483 case MO_SB:
2484 tcg_gen_ext8s_i32(ret, val);
2485 break;
2486 case MO_UB:
2487 tcg_gen_ext8u_i32(ret, val);
2488 break;
2489 case MO_SW:
2490 tcg_gen_ext16s_i32(ret, val);
2491 break;
2492 case MO_UW:
2493 tcg_gen_ext16u_i32(ret, val);
2494 break;
2495 default:
2496 tcg_gen_mov_i32(ret, val);
2497 break;
2498 }
2499}
2500
2501static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, TCGMemOp opc)
2502{
2503 switch (opc & MO_SSIZE) {
2504 case MO_SB:
2505 tcg_gen_ext8s_i64(ret, val);
2506 break;
2507 case MO_UB:
2508 tcg_gen_ext8u_i64(ret, val);
2509 break;
2510 case MO_SW:
2511 tcg_gen_ext16s_i64(ret, val);
2512 break;
2513 case MO_UW:
2514 tcg_gen_ext16u_i64(ret, val);
2515 break;
2516 case MO_SL:
2517 tcg_gen_ext32s_i64(ret, val);
2518 break;
2519 case MO_UL:
2520 tcg_gen_ext32u_i64(ret, val);
2521 break;
2522 default:
2523 tcg_gen_mov_i64(ret, val);
2524 break;
2525 }
2526}
2527
2528#ifdef CONFIG_SOFTMMU
2529typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv,
2530 TCGv_i32, TCGv_i32, TCGv_i32);
2531typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv,
2532 TCGv_i64, TCGv_i64, TCGv_i32);
2533typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv,
2534 TCGv_i32, TCGv_i32);
2535typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv,
2536 TCGv_i64, TCGv_i32);
2537#else
2538typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv, TCGv_i32, TCGv_i32);
2539typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv, TCGv_i64, TCGv_i64);
2540typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv, TCGv_i32);
2541typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv, TCGv_i64);
2542#endif
2543
df79b996
RH
2544#ifdef CONFIG_ATOMIC64
2545# define WITH_ATOMIC64(X) X,
2546#else
2547# define WITH_ATOMIC64(X)
2548#endif
2549
c482cb11
RH
2550static void * const table_cmpxchg[16] = {
2551 [MO_8] = gen_helper_atomic_cmpxchgb,
2552 [MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le,
2553 [MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be,
2554 [MO_32 | MO_LE] = gen_helper_atomic_cmpxchgl_le,
2555 [MO_32 | MO_BE] = gen_helper_atomic_cmpxchgl_be,
df79b996
RH
2556 WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le)
2557 WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be)
c482cb11
RH
2558};
2559
2560void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
2561 TCGv_i32 newv, TCGArg idx, TCGMemOp memop)
2562{
2563 memop = tcg_canonicalize_memop(memop, 0, 0);
2564
2565 if (!parallel_cpus) {
2566 TCGv_i32 t1 = tcg_temp_new_i32();
2567 TCGv_i32 t2 = tcg_temp_new_i32();
2568
2569 tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE);
2570
2571 tcg_gen_qemu_ld_i32(t1, addr, idx, memop & ~MO_SIGN);
2572 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1);
2573 tcg_gen_qemu_st_i32(t2, addr, idx, memop);
2574 tcg_temp_free_i32(t2);
2575
2576 if (memop & MO_SIGN) {
2577 tcg_gen_ext_i32(retv, t1, memop);
2578 } else {
2579 tcg_gen_mov_i32(retv, t1);
2580 }
2581 tcg_temp_free_i32(t1);
2582 } else {
2583 gen_atomic_cx_i32 gen;
2584
2585 gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
2586 tcg_debug_assert(gen != NULL);
2587
2588#ifdef CONFIG_SOFTMMU
2589 {
2590 TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx));
2591 gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv, oi);
2592 tcg_temp_free_i32(oi);
2593 }
2594#else
2595 gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv);
2596#endif
2597
2598 if (memop & MO_SIGN) {
2599 tcg_gen_ext_i32(retv, retv, memop);
2600 }
2601 }
2602}
2603
2604void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
2605 TCGv_i64 newv, TCGArg idx, TCGMemOp memop)
2606{
2607 memop = tcg_canonicalize_memop(memop, 1, 0);
2608
2609 if (!parallel_cpus) {
2610 TCGv_i64 t1 = tcg_temp_new_i64();
2611 TCGv_i64 t2 = tcg_temp_new_i64();
2612
2613 tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE);
2614
2615 tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN);
2616 tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1);
2617 tcg_gen_qemu_st_i64(t2, addr, idx, memop);
2618 tcg_temp_free_i64(t2);
2619
2620 if (memop & MO_SIGN) {
2621 tcg_gen_ext_i64(retv, t1, memop);
2622 } else {
2623 tcg_gen_mov_i64(retv, t1);
2624 }
2625 tcg_temp_free_i64(t1);
2626 } else if ((memop & MO_SIZE) == MO_64) {
df79b996 2627#ifdef CONFIG_ATOMIC64
c482cb11
RH
2628 gen_atomic_cx_i64 gen;
2629
2630 gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
2631 tcg_debug_assert(gen != NULL);
2632
2633#ifdef CONFIG_SOFTMMU
2634 {
2635 TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop, idx));
2636 gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv, oi);
2637 tcg_temp_free_i32(oi);
2638 }
2639#else
2640 gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv);
2641#endif
df79b996
RH
2642#else
2643 gen_helper_exit_atomic(tcg_ctx.tcg_env);
2644#endif /* CONFIG_ATOMIC64 */
c482cb11
RH
2645 } else {
2646 TCGv_i32 c32 = tcg_temp_new_i32();
2647 TCGv_i32 n32 = tcg_temp_new_i32();
2648 TCGv_i32 r32 = tcg_temp_new_i32();
2649
2650 tcg_gen_extrl_i64_i32(c32, cmpv);
2651 tcg_gen_extrl_i64_i32(n32, newv);
2652 tcg_gen_atomic_cmpxchg_i32(r32, addr, c32, n32, idx, memop & ~MO_SIGN);
2653 tcg_temp_free_i32(c32);
2654 tcg_temp_free_i32(n32);
2655
2656 tcg_gen_extu_i32_i64(retv, r32);
2657 tcg_temp_free_i32(r32);
2658
2659 if (memop & MO_SIGN) {
2660 tcg_gen_ext_i64(retv, retv, memop);
2661 }
2662 }
2663}
2664
2665static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val,
2666 TCGArg idx, TCGMemOp memop, bool new_val,
2667 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
2668{
2669 TCGv_i32 t1 = tcg_temp_new_i32();
2670 TCGv_i32 t2 = tcg_temp_new_i32();
2671
2672 memop = tcg_canonicalize_memop(memop, 0, 0);
2673
2674 tcg_gen_qemu_ld_i32(t1, addr, idx, memop & ~MO_SIGN);
2675 gen(t2, t1, val);
2676 tcg_gen_qemu_st_i32(t2, addr, idx, memop);
2677
2678 tcg_gen_ext_i32(ret, (new_val ? t2 : t1), memop);
2679 tcg_temp_free_i32(t1);
2680 tcg_temp_free_i32(t2);
2681}
2682
2683static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val,
2684 TCGArg idx, TCGMemOp memop, void * const table[])
2685{
2686 gen_atomic_op_i32 gen;
2687
2688 memop = tcg_canonicalize_memop(memop, 0, 0);
2689
2690 gen = table[memop & (MO_SIZE | MO_BSWAP)];
2691 tcg_debug_assert(gen != NULL);
2692
2693#ifdef CONFIG_SOFTMMU
2694 {
2695 TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx));
2696 gen(ret, tcg_ctx.tcg_env, addr, val, oi);
2697 tcg_temp_free_i32(oi);
2698 }
2699#else
2700 gen(ret, tcg_ctx.tcg_env, addr, val);
2701#endif
2702
2703 if (memop & MO_SIGN) {
2704 tcg_gen_ext_i32(ret, ret, memop);
2705 }
2706}
2707
2708static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
2709 TCGArg idx, TCGMemOp memop, bool new_val,
2710 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
2711{
2712 TCGv_i64 t1 = tcg_temp_new_i64();
2713 TCGv_i64 t2 = tcg_temp_new_i64();
2714
2715 memop = tcg_canonicalize_memop(memop, 1, 0);
2716
2717 tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN);
2718 gen(t2, t1, val);
2719 tcg_gen_qemu_st_i64(t2, addr, idx, memop);
2720
2721 tcg_gen_ext_i64(ret, (new_val ? t2 : t1), memop);
2722 tcg_temp_free_i64(t1);
2723 tcg_temp_free_i64(t2);
2724}
2725
2726static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
2727 TCGArg idx, TCGMemOp memop, void * const table[])
2728{
2729 memop = tcg_canonicalize_memop(memop, 1, 0);
2730
2731 if ((memop & MO_SIZE) == MO_64) {
df79b996 2732#ifdef CONFIG_ATOMIC64
c482cb11
RH
2733 gen_atomic_op_i64 gen;
2734
2735 gen = table[memop & (MO_SIZE | MO_BSWAP)];
2736 tcg_debug_assert(gen != NULL);
2737
2738#ifdef CONFIG_SOFTMMU
2739 {
2740 TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx));
2741 gen(ret, tcg_ctx.tcg_env, addr, val, oi);
2742 tcg_temp_free_i32(oi);
2743 }
2744#else
2745 gen(ret, tcg_ctx.tcg_env, addr, val);
2746#endif
df79b996
RH
2747#else
2748 gen_helper_exit_atomic(tcg_ctx.tcg_env);
2749#endif /* CONFIG_ATOMIC64 */
c482cb11
RH
2750 } else {
2751 TCGv_i32 v32 = tcg_temp_new_i32();
2752 TCGv_i32 r32 = tcg_temp_new_i32();
2753
2754 tcg_gen_extrl_i64_i32(v32, val);
2755 do_atomic_op_i32(r32, addr, v32, idx, memop & ~MO_SIGN, table);
2756 tcg_temp_free_i32(v32);
2757
2758 tcg_gen_extu_i32_i64(ret, r32);
2759 tcg_temp_free_i32(r32);
2760
2761 if (memop & MO_SIGN) {
2762 tcg_gen_ext_i64(ret, ret, memop);
2763 }
2764 }
2765}
2766
2767#define GEN_ATOMIC_HELPER(NAME, OP, NEW) \
2768static void * const table_##NAME[16] = { \
2769 [MO_8] = gen_helper_atomic_##NAME##b, \
2770 [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \
2771 [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \
2772 [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le, \
2773 [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be, \
df79b996
RH
2774 WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le) \
2775 WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \
c482cb11
RH
2776}; \
2777void tcg_gen_atomic_##NAME##_i32 \
2778 (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, TCGMemOp memop) \
2779{ \
2780 if (parallel_cpus) { \
2781 do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \
2782 } else { \
2783 do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \
2784 tcg_gen_##OP##_i32); \
2785 } \
2786} \
2787void tcg_gen_atomic_##NAME##_i64 \
2788 (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, TCGMemOp memop) \
2789{ \
2790 if (parallel_cpus) { \
2791 do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \
2792 } else { \
2793 do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \
2794 tcg_gen_##OP##_i64); \
2795 } \
2796}
2797
2798GEN_ATOMIC_HELPER(fetch_add, add, 0)
2799GEN_ATOMIC_HELPER(fetch_and, and, 0)
2800GEN_ATOMIC_HELPER(fetch_or, or, 0)
2801GEN_ATOMIC_HELPER(fetch_xor, xor, 0)
2802
2803GEN_ATOMIC_HELPER(add_fetch, add, 1)
2804GEN_ATOMIC_HELPER(and_fetch, and, 1)
2805GEN_ATOMIC_HELPER(or_fetch, or, 1)
2806GEN_ATOMIC_HELPER(xor_fetch, xor, 1)
2807
2808static void tcg_gen_mov2_i32(TCGv_i32 r, TCGv_i32 a, TCGv_i32 b)
2809{
2810 tcg_gen_mov_i32(r, b);
2811}
2812
2813static void tcg_gen_mov2_i64(TCGv_i64 r, TCGv_i64 a, TCGv_i64 b)
2814{
2815 tcg_gen_mov_i64(r, b);
2816}
2817
2818GEN_ATOMIC_HELPER(xchg, mov2, 0)
2819
2820#undef GEN_ATOMIC_HELPER