]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/tcg-op.c
tcg: Minor adjustments to deposit expanders
[mirror_qemu.git] / tcg / tcg-op.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include "qemu-common.h"
27 #include "cpu.h"
28 #include "exec/exec-all.h"
29 #include "tcg.h"
30 #include "tcg-op.h"
31 #include "trace-tcg.h"
32 #include "trace/mem.h"
33
34 /* Reduce the number of ifdefs below. This assumes that all uses of
35 TCGV_HIGH and TCGV_LOW are properly protected by a conditional that
36 the compiler can eliminate. */
37 #if TCG_TARGET_REG_BITS == 64
38 extern TCGv_i32 TCGV_LOW_link_error(TCGv_i64);
39 extern TCGv_i32 TCGV_HIGH_link_error(TCGv_i64);
40 #define TCGV_LOW TCGV_LOW_link_error
41 #define TCGV_HIGH TCGV_HIGH_link_error
42 #endif
43
44 /* Note that this is optimized for sequential allocation during translate.
45 Up to and including filling in the forward link immediately. We'll do
46 proper termination of the end of the list after we finish translation. */
47
48 static void tcg_emit_op(TCGContext *ctx, TCGOpcode opc, int args)
49 {
50 int oi = ctx->gen_next_op_idx;
51 int ni = oi + 1;
52 int pi = oi - 1;
53
54 tcg_debug_assert(oi < OPC_BUF_SIZE);
55 ctx->gen_op_buf[0].prev = oi;
56 ctx->gen_next_op_idx = ni;
57
58 ctx->gen_op_buf[oi] = (TCGOp){
59 .opc = opc,
60 .args = args,
61 .prev = pi,
62 .next = ni
63 };
64 }
65
66 void tcg_gen_op1(TCGContext *ctx, TCGOpcode opc, TCGArg a1)
67 {
68 int pi = ctx->gen_next_parm_idx;
69
70 tcg_debug_assert(pi + 1 <= OPPARAM_BUF_SIZE);
71 ctx->gen_next_parm_idx = pi + 1;
72 ctx->gen_opparam_buf[pi] = a1;
73
74 tcg_emit_op(ctx, opc, pi);
75 }
76
77 void tcg_gen_op2(TCGContext *ctx, TCGOpcode opc, TCGArg a1, TCGArg a2)
78 {
79 int pi = ctx->gen_next_parm_idx;
80
81 tcg_debug_assert(pi + 2 <= OPPARAM_BUF_SIZE);
82 ctx->gen_next_parm_idx = pi + 2;
83 ctx->gen_opparam_buf[pi + 0] = a1;
84 ctx->gen_opparam_buf[pi + 1] = a2;
85
86 tcg_emit_op(ctx, opc, pi);
87 }
88
89 void tcg_gen_op3(TCGContext *ctx, TCGOpcode opc, TCGArg a1,
90 TCGArg a2, TCGArg a3)
91 {
92 int pi = ctx->gen_next_parm_idx;
93
94 tcg_debug_assert(pi + 3 <= OPPARAM_BUF_SIZE);
95 ctx->gen_next_parm_idx = pi + 3;
96 ctx->gen_opparam_buf[pi + 0] = a1;
97 ctx->gen_opparam_buf[pi + 1] = a2;
98 ctx->gen_opparam_buf[pi + 2] = a3;
99
100 tcg_emit_op(ctx, opc, pi);
101 }
102
103 void tcg_gen_op4(TCGContext *ctx, TCGOpcode opc, TCGArg a1,
104 TCGArg a2, TCGArg a3, TCGArg a4)
105 {
106 int pi = ctx->gen_next_parm_idx;
107
108 tcg_debug_assert(pi + 4 <= OPPARAM_BUF_SIZE);
109 ctx->gen_next_parm_idx = pi + 4;
110 ctx->gen_opparam_buf[pi + 0] = a1;
111 ctx->gen_opparam_buf[pi + 1] = a2;
112 ctx->gen_opparam_buf[pi + 2] = a3;
113 ctx->gen_opparam_buf[pi + 3] = a4;
114
115 tcg_emit_op(ctx, opc, pi);
116 }
117
118 void tcg_gen_op5(TCGContext *ctx, TCGOpcode opc, TCGArg a1,
119 TCGArg a2, TCGArg a3, TCGArg a4, TCGArg a5)
120 {
121 int pi = ctx->gen_next_parm_idx;
122
123 tcg_debug_assert(pi + 5 <= OPPARAM_BUF_SIZE);
124 ctx->gen_next_parm_idx = pi + 5;
125 ctx->gen_opparam_buf[pi + 0] = a1;
126 ctx->gen_opparam_buf[pi + 1] = a2;
127 ctx->gen_opparam_buf[pi + 2] = a3;
128 ctx->gen_opparam_buf[pi + 3] = a4;
129 ctx->gen_opparam_buf[pi + 4] = a5;
130
131 tcg_emit_op(ctx, opc, pi);
132 }
133
134 void tcg_gen_op6(TCGContext *ctx, TCGOpcode opc, TCGArg a1, TCGArg a2,
135 TCGArg a3, TCGArg a4, TCGArg a5, TCGArg a6)
136 {
137 int pi = ctx->gen_next_parm_idx;
138
139 tcg_debug_assert(pi + 6 <= OPPARAM_BUF_SIZE);
140 ctx->gen_next_parm_idx = pi + 6;
141 ctx->gen_opparam_buf[pi + 0] = a1;
142 ctx->gen_opparam_buf[pi + 1] = a2;
143 ctx->gen_opparam_buf[pi + 2] = a3;
144 ctx->gen_opparam_buf[pi + 3] = a4;
145 ctx->gen_opparam_buf[pi + 4] = a5;
146 ctx->gen_opparam_buf[pi + 5] = a6;
147
148 tcg_emit_op(ctx, opc, pi);
149 }
150
151 void tcg_gen_mb(TCGBar mb_type)
152 {
153 if (parallel_cpus) {
154 tcg_gen_op1(&tcg_ctx, INDEX_op_mb, mb_type);
155 }
156 }
157
158 /* 32 bit ops */
159
160 void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
161 {
162 /* some cases can be optimized here */
163 if (arg2 == 0) {
164 tcg_gen_mov_i32(ret, arg1);
165 } else {
166 TCGv_i32 t0 = tcg_const_i32(arg2);
167 tcg_gen_add_i32(ret, arg1, t0);
168 tcg_temp_free_i32(t0);
169 }
170 }
171
172 void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2)
173 {
174 if (arg1 == 0 && TCG_TARGET_HAS_neg_i32) {
175 /* Don't recurse with tcg_gen_neg_i32. */
176 tcg_gen_op2_i32(INDEX_op_neg_i32, ret, arg2);
177 } else {
178 TCGv_i32 t0 = tcg_const_i32(arg1);
179 tcg_gen_sub_i32(ret, t0, arg2);
180 tcg_temp_free_i32(t0);
181 }
182 }
183
184 void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
185 {
186 /* some cases can be optimized here */
187 if (arg2 == 0) {
188 tcg_gen_mov_i32(ret, arg1);
189 } else {
190 TCGv_i32 t0 = tcg_const_i32(arg2);
191 tcg_gen_sub_i32(ret, arg1, t0);
192 tcg_temp_free_i32(t0);
193 }
194 }
195
196 void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
197 {
198 TCGv_i32 t0;
199 /* Some cases can be optimized here. */
200 switch (arg2) {
201 case 0:
202 tcg_gen_movi_i32(ret, 0);
203 return;
204 case 0xffffffffu:
205 tcg_gen_mov_i32(ret, arg1);
206 return;
207 case 0xffu:
208 /* Don't recurse with tcg_gen_ext8u_i32. */
209 if (TCG_TARGET_HAS_ext8u_i32) {
210 tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg1);
211 return;
212 }
213 break;
214 case 0xffffu:
215 if (TCG_TARGET_HAS_ext16u_i32) {
216 tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg1);
217 return;
218 }
219 break;
220 }
221 t0 = tcg_const_i32(arg2);
222 tcg_gen_and_i32(ret, arg1, t0);
223 tcg_temp_free_i32(t0);
224 }
225
226 void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
227 {
228 /* Some cases can be optimized here. */
229 if (arg2 == -1) {
230 tcg_gen_movi_i32(ret, -1);
231 } else if (arg2 == 0) {
232 tcg_gen_mov_i32(ret, arg1);
233 } else {
234 TCGv_i32 t0 = tcg_const_i32(arg2);
235 tcg_gen_or_i32(ret, arg1, t0);
236 tcg_temp_free_i32(t0);
237 }
238 }
239
240 void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
241 {
242 /* Some cases can be optimized here. */
243 if (arg2 == 0) {
244 tcg_gen_mov_i32(ret, arg1);
245 } else if (arg2 == -1 && TCG_TARGET_HAS_not_i32) {
246 /* Don't recurse with tcg_gen_not_i32. */
247 tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg1);
248 } else {
249 TCGv_i32 t0 = tcg_const_i32(arg2);
250 tcg_gen_xor_i32(ret, arg1, t0);
251 tcg_temp_free_i32(t0);
252 }
253 }
254
255 void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
256 {
257 tcg_debug_assert(arg2 < 32);
258 if (arg2 == 0) {
259 tcg_gen_mov_i32(ret, arg1);
260 } else {
261 TCGv_i32 t0 = tcg_const_i32(arg2);
262 tcg_gen_shl_i32(ret, arg1, t0);
263 tcg_temp_free_i32(t0);
264 }
265 }
266
267 void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
268 {
269 tcg_debug_assert(arg2 < 32);
270 if (arg2 == 0) {
271 tcg_gen_mov_i32(ret, arg1);
272 } else {
273 TCGv_i32 t0 = tcg_const_i32(arg2);
274 tcg_gen_shr_i32(ret, arg1, t0);
275 tcg_temp_free_i32(t0);
276 }
277 }
278
279 void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
280 {
281 tcg_debug_assert(arg2 < 32);
282 if (arg2 == 0) {
283 tcg_gen_mov_i32(ret, arg1);
284 } else {
285 TCGv_i32 t0 = tcg_const_i32(arg2);
286 tcg_gen_sar_i32(ret, arg1, t0);
287 tcg_temp_free_i32(t0);
288 }
289 }
290
291 void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, TCGLabel *l)
292 {
293 if (cond == TCG_COND_ALWAYS) {
294 tcg_gen_br(l);
295 } else if (cond != TCG_COND_NEVER) {
296 tcg_gen_op4ii_i32(INDEX_op_brcond_i32, arg1, arg2, cond, label_arg(l));
297 }
298 }
299
300 void tcg_gen_brcondi_i32(TCGCond cond, TCGv_i32 arg1, int32_t arg2, TCGLabel *l)
301 {
302 if (cond == TCG_COND_ALWAYS) {
303 tcg_gen_br(l);
304 } else if (cond != TCG_COND_NEVER) {
305 TCGv_i32 t0 = tcg_const_i32(arg2);
306 tcg_gen_brcond_i32(cond, arg1, t0, l);
307 tcg_temp_free_i32(t0);
308 }
309 }
310
311 void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret,
312 TCGv_i32 arg1, TCGv_i32 arg2)
313 {
314 if (cond == TCG_COND_ALWAYS) {
315 tcg_gen_movi_i32(ret, 1);
316 } else if (cond == TCG_COND_NEVER) {
317 tcg_gen_movi_i32(ret, 0);
318 } else {
319 tcg_gen_op4i_i32(INDEX_op_setcond_i32, ret, arg1, arg2, cond);
320 }
321 }
322
323 void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret,
324 TCGv_i32 arg1, int32_t arg2)
325 {
326 TCGv_i32 t0 = tcg_const_i32(arg2);
327 tcg_gen_setcond_i32(cond, ret, arg1, t0);
328 tcg_temp_free_i32(t0);
329 }
330
331 void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
332 {
333 TCGv_i32 t0 = tcg_const_i32(arg2);
334 tcg_gen_mul_i32(ret, arg1, t0);
335 tcg_temp_free_i32(t0);
336 }
337
338 void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
339 {
340 if (TCG_TARGET_HAS_div_i32) {
341 tcg_gen_op3_i32(INDEX_op_div_i32, ret, arg1, arg2);
342 } else if (TCG_TARGET_HAS_div2_i32) {
343 TCGv_i32 t0 = tcg_temp_new_i32();
344 tcg_gen_sari_i32(t0, arg1, 31);
345 tcg_gen_op5_i32(INDEX_op_div2_i32, ret, t0, arg1, t0, arg2);
346 tcg_temp_free_i32(t0);
347 } else {
348 gen_helper_div_i32(ret, arg1, arg2);
349 }
350 }
351
352 void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
353 {
354 if (TCG_TARGET_HAS_rem_i32) {
355 tcg_gen_op3_i32(INDEX_op_rem_i32, ret, arg1, arg2);
356 } else if (TCG_TARGET_HAS_div_i32) {
357 TCGv_i32 t0 = tcg_temp_new_i32();
358 tcg_gen_op3_i32(INDEX_op_div_i32, t0, arg1, arg2);
359 tcg_gen_mul_i32(t0, t0, arg2);
360 tcg_gen_sub_i32(ret, arg1, t0);
361 tcg_temp_free_i32(t0);
362 } else if (TCG_TARGET_HAS_div2_i32) {
363 TCGv_i32 t0 = tcg_temp_new_i32();
364 tcg_gen_sari_i32(t0, arg1, 31);
365 tcg_gen_op5_i32(INDEX_op_div2_i32, t0, ret, arg1, t0, arg2);
366 tcg_temp_free_i32(t0);
367 } else {
368 gen_helper_rem_i32(ret, arg1, arg2);
369 }
370 }
371
372 void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
373 {
374 if (TCG_TARGET_HAS_div_i32) {
375 tcg_gen_op3_i32(INDEX_op_divu_i32, ret, arg1, arg2);
376 } else if (TCG_TARGET_HAS_div2_i32) {
377 TCGv_i32 t0 = tcg_temp_new_i32();
378 tcg_gen_movi_i32(t0, 0);
379 tcg_gen_op5_i32(INDEX_op_divu2_i32, ret, t0, arg1, t0, arg2);
380 tcg_temp_free_i32(t0);
381 } else {
382 gen_helper_divu_i32(ret, arg1, arg2);
383 }
384 }
385
386 void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
387 {
388 if (TCG_TARGET_HAS_rem_i32) {
389 tcg_gen_op3_i32(INDEX_op_remu_i32, ret, arg1, arg2);
390 } else if (TCG_TARGET_HAS_div_i32) {
391 TCGv_i32 t0 = tcg_temp_new_i32();
392 tcg_gen_op3_i32(INDEX_op_divu_i32, t0, arg1, arg2);
393 tcg_gen_mul_i32(t0, t0, arg2);
394 tcg_gen_sub_i32(ret, arg1, t0);
395 tcg_temp_free_i32(t0);
396 } else if (TCG_TARGET_HAS_div2_i32) {
397 TCGv_i32 t0 = tcg_temp_new_i32();
398 tcg_gen_movi_i32(t0, 0);
399 tcg_gen_op5_i32(INDEX_op_divu2_i32, t0, ret, arg1, t0, arg2);
400 tcg_temp_free_i32(t0);
401 } else {
402 gen_helper_remu_i32(ret, arg1, arg2);
403 }
404 }
405
406 void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
407 {
408 if (TCG_TARGET_HAS_andc_i32) {
409 tcg_gen_op3_i32(INDEX_op_andc_i32, ret, arg1, arg2);
410 } else {
411 TCGv_i32 t0 = tcg_temp_new_i32();
412 tcg_gen_not_i32(t0, arg2);
413 tcg_gen_and_i32(ret, arg1, t0);
414 tcg_temp_free_i32(t0);
415 }
416 }
417
418 void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
419 {
420 if (TCG_TARGET_HAS_eqv_i32) {
421 tcg_gen_op3_i32(INDEX_op_eqv_i32, ret, arg1, arg2);
422 } else {
423 tcg_gen_xor_i32(ret, arg1, arg2);
424 tcg_gen_not_i32(ret, ret);
425 }
426 }
427
428 void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
429 {
430 if (TCG_TARGET_HAS_nand_i32) {
431 tcg_gen_op3_i32(INDEX_op_nand_i32, ret, arg1, arg2);
432 } else {
433 tcg_gen_and_i32(ret, arg1, arg2);
434 tcg_gen_not_i32(ret, ret);
435 }
436 }
437
438 void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
439 {
440 if (TCG_TARGET_HAS_nor_i32) {
441 tcg_gen_op3_i32(INDEX_op_nor_i32, ret, arg1, arg2);
442 } else {
443 tcg_gen_or_i32(ret, arg1, arg2);
444 tcg_gen_not_i32(ret, ret);
445 }
446 }
447
448 void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
449 {
450 if (TCG_TARGET_HAS_orc_i32) {
451 tcg_gen_op3_i32(INDEX_op_orc_i32, ret, arg1, arg2);
452 } else {
453 TCGv_i32 t0 = tcg_temp_new_i32();
454 tcg_gen_not_i32(t0, arg2);
455 tcg_gen_or_i32(ret, arg1, t0);
456 tcg_temp_free_i32(t0);
457 }
458 }
459
460 void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
461 {
462 if (TCG_TARGET_HAS_rot_i32) {
463 tcg_gen_op3_i32(INDEX_op_rotl_i32, ret, arg1, arg2);
464 } else {
465 TCGv_i32 t0, t1;
466
467 t0 = tcg_temp_new_i32();
468 t1 = tcg_temp_new_i32();
469 tcg_gen_shl_i32(t0, arg1, arg2);
470 tcg_gen_subfi_i32(t1, 32, arg2);
471 tcg_gen_shr_i32(t1, arg1, t1);
472 tcg_gen_or_i32(ret, t0, t1);
473 tcg_temp_free_i32(t0);
474 tcg_temp_free_i32(t1);
475 }
476 }
477
478 void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
479 {
480 tcg_debug_assert(arg2 < 32);
481 /* some cases can be optimized here */
482 if (arg2 == 0) {
483 tcg_gen_mov_i32(ret, arg1);
484 } else if (TCG_TARGET_HAS_rot_i32) {
485 TCGv_i32 t0 = tcg_const_i32(arg2);
486 tcg_gen_rotl_i32(ret, arg1, t0);
487 tcg_temp_free_i32(t0);
488 } else {
489 TCGv_i32 t0, t1;
490 t0 = tcg_temp_new_i32();
491 t1 = tcg_temp_new_i32();
492 tcg_gen_shli_i32(t0, arg1, arg2);
493 tcg_gen_shri_i32(t1, arg1, 32 - arg2);
494 tcg_gen_or_i32(ret, t0, t1);
495 tcg_temp_free_i32(t0);
496 tcg_temp_free_i32(t1);
497 }
498 }
499
500 void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
501 {
502 if (TCG_TARGET_HAS_rot_i32) {
503 tcg_gen_op3_i32(INDEX_op_rotr_i32, ret, arg1, arg2);
504 } else {
505 TCGv_i32 t0, t1;
506
507 t0 = tcg_temp_new_i32();
508 t1 = tcg_temp_new_i32();
509 tcg_gen_shr_i32(t0, arg1, arg2);
510 tcg_gen_subfi_i32(t1, 32, arg2);
511 tcg_gen_shl_i32(t1, arg1, t1);
512 tcg_gen_or_i32(ret, t0, t1);
513 tcg_temp_free_i32(t0);
514 tcg_temp_free_i32(t1);
515 }
516 }
517
518 void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
519 {
520 tcg_debug_assert(arg2 < 32);
521 /* some cases can be optimized here */
522 if (arg2 == 0) {
523 tcg_gen_mov_i32(ret, arg1);
524 } else {
525 tcg_gen_rotli_i32(ret, arg1, 32 - arg2);
526 }
527 }
528
529 void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
530 unsigned int ofs, unsigned int len)
531 {
532 uint32_t mask;
533 TCGv_i32 t1;
534
535 tcg_debug_assert(ofs < 32);
536 tcg_debug_assert(len > 0);
537 tcg_debug_assert(len <= 32);
538 tcg_debug_assert(ofs + len <= 32);
539
540 if (len == 32) {
541 tcg_gen_mov_i32(ret, arg2);
542 return;
543 }
544 if (TCG_TARGET_HAS_deposit_i32 && TCG_TARGET_deposit_i32_valid(ofs, len)) {
545 tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, arg1, arg2, ofs, len);
546 return;
547 }
548
549 mask = (1u << len) - 1;
550 t1 = tcg_temp_new_i32();
551
552 if (ofs + len < 32) {
553 tcg_gen_andi_i32(t1, arg2, mask);
554 tcg_gen_shli_i32(t1, t1, ofs);
555 } else {
556 tcg_gen_shli_i32(t1, arg2, ofs);
557 }
558 tcg_gen_andi_i32(ret, arg1, ~(mask << ofs));
559 tcg_gen_or_i32(ret, ret, t1);
560
561 tcg_temp_free_i32(t1);
562 }
563
564 void tcg_gen_extract_i32(TCGv_i32 ret, TCGv_i32 arg,
565 unsigned int ofs, unsigned int len)
566 {
567 tcg_debug_assert(ofs < 32);
568 tcg_debug_assert(len > 0);
569 tcg_debug_assert(len <= 32);
570 tcg_debug_assert(ofs + len <= 32);
571
572 /* Canonicalize certain special cases, even if extract is supported. */
573 if (ofs + len == 32) {
574 tcg_gen_shri_i32(ret, arg, 32 - len);
575 return;
576 }
577 if (ofs == 0) {
578 tcg_gen_andi_i32(ret, arg, (1u << len) - 1);
579 return;
580 }
581
582 if (TCG_TARGET_HAS_extract_i32
583 && TCG_TARGET_extract_i32_valid(ofs, len)) {
584 tcg_gen_op4ii_i32(INDEX_op_extract_i32, ret, arg, ofs, len);
585 return;
586 }
587
588 /* Assume that zero-extension, if available, is cheaper than a shift. */
589 switch (ofs + len) {
590 case 16:
591 if (TCG_TARGET_HAS_ext16u_i32) {
592 tcg_gen_ext16u_i32(ret, arg);
593 tcg_gen_shri_i32(ret, ret, ofs);
594 return;
595 }
596 break;
597 case 8:
598 if (TCG_TARGET_HAS_ext8u_i32) {
599 tcg_gen_ext8u_i32(ret, arg);
600 tcg_gen_shri_i32(ret, ret, ofs);
601 return;
602 }
603 break;
604 }
605
606 /* ??? Ideally we'd know what values are available for immediate AND.
607 Assume that 8 bits are available, plus the special case of 16,
608 so that we get ext8u, ext16u. */
609 switch (len) {
610 case 1 ... 8: case 16:
611 tcg_gen_shri_i32(ret, arg, ofs);
612 tcg_gen_andi_i32(ret, ret, (1u << len) - 1);
613 break;
614 default:
615 tcg_gen_shli_i32(ret, arg, 32 - len - ofs);
616 tcg_gen_shri_i32(ret, ret, 32 - len);
617 break;
618 }
619 }
620
621 void tcg_gen_sextract_i32(TCGv_i32 ret, TCGv_i32 arg,
622 unsigned int ofs, unsigned int len)
623 {
624 tcg_debug_assert(ofs < 32);
625 tcg_debug_assert(len > 0);
626 tcg_debug_assert(len <= 32);
627 tcg_debug_assert(ofs + len <= 32);
628
629 /* Canonicalize certain special cases, even if extract is supported. */
630 if (ofs + len == 32) {
631 tcg_gen_sari_i32(ret, arg, 32 - len);
632 return;
633 }
634 if (ofs == 0) {
635 switch (len) {
636 case 16:
637 tcg_gen_ext16s_i32(ret, arg);
638 return;
639 case 8:
640 tcg_gen_ext8s_i32(ret, arg);
641 return;
642 }
643 }
644
645 if (TCG_TARGET_HAS_sextract_i32
646 && TCG_TARGET_extract_i32_valid(ofs, len)) {
647 tcg_gen_op4ii_i32(INDEX_op_sextract_i32, ret, arg, ofs, len);
648 return;
649 }
650
651 /* Assume that sign-extension, if available, is cheaper than a shift. */
652 switch (ofs + len) {
653 case 16:
654 if (TCG_TARGET_HAS_ext16s_i32) {
655 tcg_gen_ext16s_i32(ret, arg);
656 tcg_gen_sari_i32(ret, ret, ofs);
657 return;
658 }
659 break;
660 case 8:
661 if (TCG_TARGET_HAS_ext8s_i32) {
662 tcg_gen_ext8s_i32(ret, arg);
663 tcg_gen_sari_i32(ret, ret, ofs);
664 return;
665 }
666 break;
667 }
668 switch (len) {
669 case 16:
670 if (TCG_TARGET_HAS_ext16s_i32) {
671 tcg_gen_shri_i32(ret, arg, ofs);
672 tcg_gen_ext16s_i32(ret, ret);
673 return;
674 }
675 break;
676 case 8:
677 if (TCG_TARGET_HAS_ext8s_i32) {
678 tcg_gen_shri_i32(ret, arg, ofs);
679 tcg_gen_ext8s_i32(ret, ret);
680 return;
681 }
682 break;
683 }
684
685 tcg_gen_shli_i32(ret, arg, 32 - len - ofs);
686 tcg_gen_sari_i32(ret, ret, 32 - len);
687 }
688
689 void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1,
690 TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2)
691 {
692 if (cond == TCG_COND_ALWAYS) {
693 tcg_gen_mov_i32(ret, v1);
694 } else if (cond == TCG_COND_NEVER) {
695 tcg_gen_mov_i32(ret, v2);
696 } else if (TCG_TARGET_HAS_movcond_i32) {
697 tcg_gen_op6i_i32(INDEX_op_movcond_i32, ret, c1, c2, v1, v2, cond);
698 } else {
699 TCGv_i32 t0 = tcg_temp_new_i32();
700 TCGv_i32 t1 = tcg_temp_new_i32();
701 tcg_gen_setcond_i32(cond, t0, c1, c2);
702 tcg_gen_neg_i32(t0, t0);
703 tcg_gen_and_i32(t1, v1, t0);
704 tcg_gen_andc_i32(ret, v2, t0);
705 tcg_gen_or_i32(ret, ret, t1);
706 tcg_temp_free_i32(t0);
707 tcg_temp_free_i32(t1);
708 }
709 }
710
711 void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
712 TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh)
713 {
714 if (TCG_TARGET_HAS_add2_i32) {
715 tcg_gen_op6_i32(INDEX_op_add2_i32, rl, rh, al, ah, bl, bh);
716 } else {
717 TCGv_i64 t0 = tcg_temp_new_i64();
718 TCGv_i64 t1 = tcg_temp_new_i64();
719 tcg_gen_concat_i32_i64(t0, al, ah);
720 tcg_gen_concat_i32_i64(t1, bl, bh);
721 tcg_gen_add_i64(t0, t0, t1);
722 tcg_gen_extr_i64_i32(rl, rh, t0);
723 tcg_temp_free_i64(t0);
724 tcg_temp_free_i64(t1);
725 }
726 }
727
728 void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
729 TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh)
730 {
731 if (TCG_TARGET_HAS_sub2_i32) {
732 tcg_gen_op6_i32(INDEX_op_sub2_i32, rl, rh, al, ah, bl, bh);
733 } else {
734 TCGv_i64 t0 = tcg_temp_new_i64();
735 TCGv_i64 t1 = tcg_temp_new_i64();
736 tcg_gen_concat_i32_i64(t0, al, ah);
737 tcg_gen_concat_i32_i64(t1, bl, bh);
738 tcg_gen_sub_i64(t0, t0, t1);
739 tcg_gen_extr_i64_i32(rl, rh, t0);
740 tcg_temp_free_i64(t0);
741 tcg_temp_free_i64(t1);
742 }
743 }
744
745 void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
746 {
747 if (TCG_TARGET_HAS_mulu2_i32) {
748 tcg_gen_op4_i32(INDEX_op_mulu2_i32, rl, rh, arg1, arg2);
749 } else if (TCG_TARGET_HAS_muluh_i32) {
750 TCGv_i32 t = tcg_temp_new_i32();
751 tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2);
752 tcg_gen_op3_i32(INDEX_op_muluh_i32, rh, arg1, arg2);
753 tcg_gen_mov_i32(rl, t);
754 tcg_temp_free_i32(t);
755 } else {
756 TCGv_i64 t0 = tcg_temp_new_i64();
757 TCGv_i64 t1 = tcg_temp_new_i64();
758 tcg_gen_extu_i32_i64(t0, arg1);
759 tcg_gen_extu_i32_i64(t1, arg2);
760 tcg_gen_mul_i64(t0, t0, t1);
761 tcg_gen_extr_i64_i32(rl, rh, t0);
762 tcg_temp_free_i64(t0);
763 tcg_temp_free_i64(t1);
764 }
765 }
766
767 void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
768 {
769 if (TCG_TARGET_HAS_muls2_i32) {
770 tcg_gen_op4_i32(INDEX_op_muls2_i32, rl, rh, arg1, arg2);
771 } else if (TCG_TARGET_HAS_mulsh_i32) {
772 TCGv_i32 t = tcg_temp_new_i32();
773 tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2);
774 tcg_gen_op3_i32(INDEX_op_mulsh_i32, rh, arg1, arg2);
775 tcg_gen_mov_i32(rl, t);
776 tcg_temp_free_i32(t);
777 } else if (TCG_TARGET_REG_BITS == 32) {
778 TCGv_i32 t0 = tcg_temp_new_i32();
779 TCGv_i32 t1 = tcg_temp_new_i32();
780 TCGv_i32 t2 = tcg_temp_new_i32();
781 TCGv_i32 t3 = tcg_temp_new_i32();
782 tcg_gen_mulu2_i32(t0, t1, arg1, arg2);
783 /* Adjust for negative inputs. */
784 tcg_gen_sari_i32(t2, arg1, 31);
785 tcg_gen_sari_i32(t3, arg2, 31);
786 tcg_gen_and_i32(t2, t2, arg2);
787 tcg_gen_and_i32(t3, t3, arg1);
788 tcg_gen_sub_i32(rh, t1, t2);
789 tcg_gen_sub_i32(rh, rh, t3);
790 tcg_gen_mov_i32(rl, t0);
791 tcg_temp_free_i32(t0);
792 tcg_temp_free_i32(t1);
793 tcg_temp_free_i32(t2);
794 tcg_temp_free_i32(t3);
795 } else {
796 TCGv_i64 t0 = tcg_temp_new_i64();
797 TCGv_i64 t1 = tcg_temp_new_i64();
798 tcg_gen_ext_i32_i64(t0, arg1);
799 tcg_gen_ext_i32_i64(t1, arg2);
800 tcg_gen_mul_i64(t0, t0, t1);
801 tcg_gen_extr_i64_i32(rl, rh, t0);
802 tcg_temp_free_i64(t0);
803 tcg_temp_free_i64(t1);
804 }
805 }
806
807 void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
808 {
809 if (TCG_TARGET_REG_BITS == 32) {
810 TCGv_i32 t0 = tcg_temp_new_i32();
811 TCGv_i32 t1 = tcg_temp_new_i32();
812 TCGv_i32 t2 = tcg_temp_new_i32();
813 tcg_gen_mulu2_i32(t0, t1, arg1, arg2);
814 /* Adjust for negative input for the signed arg1. */
815 tcg_gen_sari_i32(t2, arg1, 31);
816 tcg_gen_and_i32(t2, t2, arg2);
817 tcg_gen_sub_i32(rh, t1, t2);
818 tcg_gen_mov_i32(rl, t0);
819 tcg_temp_free_i32(t0);
820 tcg_temp_free_i32(t1);
821 tcg_temp_free_i32(t2);
822 } else {
823 TCGv_i64 t0 = tcg_temp_new_i64();
824 TCGv_i64 t1 = tcg_temp_new_i64();
825 tcg_gen_ext_i32_i64(t0, arg1);
826 tcg_gen_extu_i32_i64(t1, arg2);
827 tcg_gen_mul_i64(t0, t0, t1);
828 tcg_gen_extr_i64_i32(rl, rh, t0);
829 tcg_temp_free_i64(t0);
830 tcg_temp_free_i64(t1);
831 }
832 }
833
834 void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg)
835 {
836 if (TCG_TARGET_HAS_ext8s_i32) {
837 tcg_gen_op2_i32(INDEX_op_ext8s_i32, ret, arg);
838 } else {
839 tcg_gen_shli_i32(ret, arg, 24);
840 tcg_gen_sari_i32(ret, ret, 24);
841 }
842 }
843
844 void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg)
845 {
846 if (TCG_TARGET_HAS_ext16s_i32) {
847 tcg_gen_op2_i32(INDEX_op_ext16s_i32, ret, arg);
848 } else {
849 tcg_gen_shli_i32(ret, arg, 16);
850 tcg_gen_sari_i32(ret, ret, 16);
851 }
852 }
853
854 void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg)
855 {
856 if (TCG_TARGET_HAS_ext8u_i32) {
857 tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg);
858 } else {
859 tcg_gen_andi_i32(ret, arg, 0xffu);
860 }
861 }
862
863 void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg)
864 {
865 if (TCG_TARGET_HAS_ext16u_i32) {
866 tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg);
867 } else {
868 tcg_gen_andi_i32(ret, arg, 0xffffu);
869 }
870 }
871
872 /* Note: we assume the two high bytes are set to zero */
873 void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg)
874 {
875 if (TCG_TARGET_HAS_bswap16_i32) {
876 tcg_gen_op2_i32(INDEX_op_bswap16_i32, ret, arg);
877 } else {
878 TCGv_i32 t0 = tcg_temp_new_i32();
879
880 tcg_gen_ext8u_i32(t0, arg);
881 tcg_gen_shli_i32(t0, t0, 8);
882 tcg_gen_shri_i32(ret, arg, 8);
883 tcg_gen_or_i32(ret, ret, t0);
884 tcg_temp_free_i32(t0);
885 }
886 }
887
888 void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg)
889 {
890 if (TCG_TARGET_HAS_bswap32_i32) {
891 tcg_gen_op2_i32(INDEX_op_bswap32_i32, ret, arg);
892 } else {
893 TCGv_i32 t0, t1;
894 t0 = tcg_temp_new_i32();
895 t1 = tcg_temp_new_i32();
896
897 tcg_gen_shli_i32(t0, arg, 24);
898
899 tcg_gen_andi_i32(t1, arg, 0x0000ff00);
900 tcg_gen_shli_i32(t1, t1, 8);
901 tcg_gen_or_i32(t0, t0, t1);
902
903 tcg_gen_shri_i32(t1, arg, 8);
904 tcg_gen_andi_i32(t1, t1, 0x0000ff00);
905 tcg_gen_or_i32(t0, t0, t1);
906
907 tcg_gen_shri_i32(t1, arg, 24);
908 tcg_gen_or_i32(ret, t0, t1);
909 tcg_temp_free_i32(t0);
910 tcg_temp_free_i32(t1);
911 }
912 }
913
914 /* 64-bit ops */
915
916 #if TCG_TARGET_REG_BITS == 32
917 /* These are all inline for TCG_TARGET_REG_BITS == 64. */
918
919 void tcg_gen_discard_i64(TCGv_i64 arg)
920 {
921 tcg_gen_discard_i32(TCGV_LOW(arg));
922 tcg_gen_discard_i32(TCGV_HIGH(arg));
923 }
924
925 void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg)
926 {
927 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
928 tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg));
929 }
930
931 void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg)
932 {
933 tcg_gen_movi_i32(TCGV_LOW(ret), arg);
934 tcg_gen_movi_i32(TCGV_HIGH(ret), arg >> 32);
935 }
936
937 void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
938 {
939 tcg_gen_ld8u_i32(TCGV_LOW(ret), arg2, offset);
940 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
941 }
942
943 void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
944 {
945 tcg_gen_ld8s_i32(TCGV_LOW(ret), arg2, offset);
946 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
947 }
948
949 void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
950 {
951 tcg_gen_ld16u_i32(TCGV_LOW(ret), arg2, offset);
952 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
953 }
954
955 void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
956 {
957 tcg_gen_ld16s_i32(TCGV_LOW(ret), arg2, offset);
958 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
959 }
960
961 void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
962 {
963 tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset);
964 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
965 }
966
967 void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
968 {
969 tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset);
970 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
971 }
972
973 void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
974 {
975 /* Since arg2 and ret have different types,
976 they cannot be the same temporary */
977 #ifdef HOST_WORDS_BIGENDIAN
978 tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset);
979 tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset + 4);
980 #else
981 tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset);
982 tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset + 4);
983 #endif
984 }
985
986 void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset)
987 {
988 #ifdef HOST_WORDS_BIGENDIAN
989 tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset);
990 tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset + 4);
991 #else
992 tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset);
993 tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset + 4);
994 #endif
995 }
996
997 void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
998 {
999 tcg_gen_and_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1000 tcg_gen_and_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1001 }
1002
1003 void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1004 {
1005 tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1006 tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1007 }
1008
1009 void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1010 {
1011 tcg_gen_xor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1012 tcg_gen_xor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1013 }
1014
1015 void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1016 {
1017 gen_helper_shl_i64(ret, arg1, arg2);
1018 }
1019
1020 void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1021 {
1022 gen_helper_shr_i64(ret, arg1, arg2);
1023 }
1024
1025 void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1026 {
1027 gen_helper_sar_i64(ret, arg1, arg2);
1028 }
1029
1030 void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1031 {
1032 TCGv_i64 t0;
1033 TCGv_i32 t1;
1034
1035 t0 = tcg_temp_new_i64();
1036 t1 = tcg_temp_new_i32();
1037
1038 tcg_gen_mulu2_i32(TCGV_LOW(t0), TCGV_HIGH(t0),
1039 TCGV_LOW(arg1), TCGV_LOW(arg2));
1040
1041 tcg_gen_mul_i32(t1, TCGV_LOW(arg1), TCGV_HIGH(arg2));
1042 tcg_gen_add_i32(TCGV_HIGH(t0), TCGV_HIGH(t0), t1);
1043 tcg_gen_mul_i32(t1, TCGV_HIGH(arg1), TCGV_LOW(arg2));
1044 tcg_gen_add_i32(TCGV_HIGH(t0), TCGV_HIGH(t0), t1);
1045
1046 tcg_gen_mov_i64(ret, t0);
1047 tcg_temp_free_i64(t0);
1048 tcg_temp_free_i32(t1);
1049 }
1050 #endif /* TCG_TARGET_REG_SIZE == 32 */
1051
1052 void tcg_gen_addi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
1053 {
1054 /* some cases can be optimized here */
1055 if (arg2 == 0) {
1056 tcg_gen_mov_i64(ret, arg1);
1057 } else {
1058 TCGv_i64 t0 = tcg_const_i64(arg2);
1059 tcg_gen_add_i64(ret, arg1, t0);
1060 tcg_temp_free_i64(t0);
1061 }
1062 }
1063
1064 void tcg_gen_subfi_i64(TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2)
1065 {
1066 if (arg1 == 0 && TCG_TARGET_HAS_neg_i64) {
1067 /* Don't recurse with tcg_gen_neg_i64. */
1068 tcg_gen_op2_i64(INDEX_op_neg_i64, ret, arg2);
1069 } else {
1070 TCGv_i64 t0 = tcg_const_i64(arg1);
1071 tcg_gen_sub_i64(ret, t0, arg2);
1072 tcg_temp_free_i64(t0);
1073 }
1074 }
1075
1076 void tcg_gen_subi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
1077 {
1078 /* some cases can be optimized here */
1079 if (arg2 == 0) {
1080 tcg_gen_mov_i64(ret, arg1);
1081 } else {
1082 TCGv_i64 t0 = tcg_const_i64(arg2);
1083 tcg_gen_sub_i64(ret, arg1, t0);
1084 tcg_temp_free_i64(t0);
1085 }
1086 }
1087
1088 void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
1089 {
1090 TCGv_i64 t0;
1091
1092 if (TCG_TARGET_REG_BITS == 32) {
1093 tcg_gen_andi_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2);
1094 tcg_gen_andi_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32);
1095 return;
1096 }
1097
1098 /* Some cases can be optimized here. */
1099 switch (arg2) {
1100 case 0:
1101 tcg_gen_movi_i64(ret, 0);
1102 return;
1103 case 0xffffffffffffffffull:
1104 tcg_gen_mov_i64(ret, arg1);
1105 return;
1106 case 0xffull:
1107 /* Don't recurse with tcg_gen_ext8u_i64. */
1108 if (TCG_TARGET_HAS_ext8u_i64) {
1109 tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg1);
1110 return;
1111 }
1112 break;
1113 case 0xffffu:
1114 if (TCG_TARGET_HAS_ext16u_i64) {
1115 tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg1);
1116 return;
1117 }
1118 break;
1119 case 0xffffffffull:
1120 if (TCG_TARGET_HAS_ext32u_i64) {
1121 tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg1);
1122 return;
1123 }
1124 break;
1125 }
1126 t0 = tcg_const_i64(arg2);
1127 tcg_gen_and_i64(ret, arg1, t0);
1128 tcg_temp_free_i64(t0);
1129 }
1130
1131 void tcg_gen_ori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
1132 {
1133 if (TCG_TARGET_REG_BITS == 32) {
1134 tcg_gen_ori_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2);
1135 tcg_gen_ori_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32);
1136 return;
1137 }
1138 /* Some cases can be optimized here. */
1139 if (arg2 == -1) {
1140 tcg_gen_movi_i64(ret, -1);
1141 } else if (arg2 == 0) {
1142 tcg_gen_mov_i64(ret, arg1);
1143 } else {
1144 TCGv_i64 t0 = tcg_const_i64(arg2);
1145 tcg_gen_or_i64(ret, arg1, t0);
1146 tcg_temp_free_i64(t0);
1147 }
1148 }
1149
1150 void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
1151 {
1152 if (TCG_TARGET_REG_BITS == 32) {
1153 tcg_gen_xori_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2);
1154 tcg_gen_xori_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32);
1155 return;
1156 }
1157 /* Some cases can be optimized here. */
1158 if (arg2 == 0) {
1159 tcg_gen_mov_i64(ret, arg1);
1160 } else if (arg2 == -1 && TCG_TARGET_HAS_not_i64) {
1161 /* Don't recurse with tcg_gen_not_i64. */
1162 tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg1);
1163 } else {
1164 TCGv_i64 t0 = tcg_const_i64(arg2);
1165 tcg_gen_xor_i64(ret, arg1, t0);
1166 tcg_temp_free_i64(t0);
1167 }
1168 }
1169
1170 static inline void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
1171 unsigned c, bool right, bool arith)
1172 {
1173 tcg_debug_assert(c < 64);
1174 if (c == 0) {
1175 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
1176 tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
1177 } else if (c >= 32) {
1178 c -= 32;
1179 if (right) {
1180 if (arith) {
1181 tcg_gen_sari_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c);
1182 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), 31);
1183 } else {
1184 tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c);
1185 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1186 }
1187 } else {
1188 tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_LOW(arg1), c);
1189 tcg_gen_movi_i32(TCGV_LOW(ret), 0);
1190 }
1191 } else {
1192 TCGv_i32 t0, t1;
1193
1194 t0 = tcg_temp_new_i32();
1195 t1 = tcg_temp_new_i32();
1196 if (right) {
1197 tcg_gen_shli_i32(t0, TCGV_HIGH(arg1), 32 - c);
1198 if (arith) {
1199 tcg_gen_sari_i32(t1, TCGV_HIGH(arg1), c);
1200 } else {
1201 tcg_gen_shri_i32(t1, TCGV_HIGH(arg1), c);
1202 }
1203 tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c);
1204 tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t0);
1205 tcg_gen_mov_i32(TCGV_HIGH(ret), t1);
1206 } else {
1207 tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c);
1208 /* Note: ret can be the same as arg1, so we use t1 */
1209 tcg_gen_shli_i32(t1, TCGV_LOW(arg1), c);
1210 tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
1211 tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t0);
1212 tcg_gen_mov_i32(TCGV_LOW(ret), t1);
1213 }
1214 tcg_temp_free_i32(t0);
1215 tcg_temp_free_i32(t1);
1216 }
1217 }
1218
1219 void tcg_gen_shli_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
1220 {
1221 tcg_debug_assert(arg2 < 64);
1222 if (TCG_TARGET_REG_BITS == 32) {
1223 tcg_gen_shifti_i64(ret, arg1, arg2, 0, 0);
1224 } else if (arg2 == 0) {
1225 tcg_gen_mov_i64(ret, arg1);
1226 } else {
1227 TCGv_i64 t0 = tcg_const_i64(arg2);
1228 tcg_gen_shl_i64(ret, arg1, t0);
1229 tcg_temp_free_i64(t0);
1230 }
1231 }
1232
1233 void tcg_gen_shri_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
1234 {
1235 tcg_debug_assert(arg2 < 64);
1236 if (TCG_TARGET_REG_BITS == 32) {
1237 tcg_gen_shifti_i64(ret, arg1, arg2, 1, 0);
1238 } else if (arg2 == 0) {
1239 tcg_gen_mov_i64(ret, arg1);
1240 } else {
1241 TCGv_i64 t0 = tcg_const_i64(arg2);
1242 tcg_gen_shr_i64(ret, arg1, t0);
1243 tcg_temp_free_i64(t0);
1244 }
1245 }
1246
1247 void tcg_gen_sari_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
1248 {
1249 tcg_debug_assert(arg2 < 64);
1250 if (TCG_TARGET_REG_BITS == 32) {
1251 tcg_gen_shifti_i64(ret, arg1, arg2, 1, 1);
1252 } else if (arg2 == 0) {
1253 tcg_gen_mov_i64(ret, arg1);
1254 } else {
1255 TCGv_i64 t0 = tcg_const_i64(arg2);
1256 tcg_gen_sar_i64(ret, arg1, t0);
1257 tcg_temp_free_i64(t0);
1258 }
1259 }
1260
1261 void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *l)
1262 {
1263 if (cond == TCG_COND_ALWAYS) {
1264 tcg_gen_br(l);
1265 } else if (cond != TCG_COND_NEVER) {
1266 if (TCG_TARGET_REG_BITS == 32) {
1267 tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, TCGV_LOW(arg1),
1268 TCGV_HIGH(arg1), TCGV_LOW(arg2),
1269 TCGV_HIGH(arg2), cond, label_arg(l));
1270 } else {
1271 tcg_gen_op4ii_i64(INDEX_op_brcond_i64, arg1, arg2, cond,
1272 label_arg(l));
1273 }
1274 }
1275 }
1276
1277 void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *l)
1278 {
1279 if (cond == TCG_COND_ALWAYS) {
1280 tcg_gen_br(l);
1281 } else if (cond != TCG_COND_NEVER) {
1282 TCGv_i64 t0 = tcg_const_i64(arg2);
1283 tcg_gen_brcond_i64(cond, arg1, t0, l);
1284 tcg_temp_free_i64(t0);
1285 }
1286 }
1287
1288 void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret,
1289 TCGv_i64 arg1, TCGv_i64 arg2)
1290 {
1291 if (cond == TCG_COND_ALWAYS) {
1292 tcg_gen_movi_i64(ret, 1);
1293 } else if (cond == TCG_COND_NEVER) {
1294 tcg_gen_movi_i64(ret, 0);
1295 } else {
1296 if (TCG_TARGET_REG_BITS == 32) {
1297 tcg_gen_op6i_i32(INDEX_op_setcond2_i32, TCGV_LOW(ret),
1298 TCGV_LOW(arg1), TCGV_HIGH(arg1),
1299 TCGV_LOW(arg2), TCGV_HIGH(arg2), cond);
1300 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1301 } else {
1302 tcg_gen_op4i_i64(INDEX_op_setcond_i64, ret, arg1, arg2, cond);
1303 }
1304 }
1305 }
1306
1307 void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret,
1308 TCGv_i64 arg1, int64_t arg2)
1309 {
1310 TCGv_i64 t0 = tcg_const_i64(arg2);
1311 tcg_gen_setcond_i64(cond, ret, arg1, t0);
1312 tcg_temp_free_i64(t0);
1313 }
1314
1315 void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
1316 {
1317 TCGv_i64 t0 = tcg_const_i64(arg2);
1318 tcg_gen_mul_i64(ret, arg1, t0);
1319 tcg_temp_free_i64(t0);
1320 }
1321
1322 void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1323 {
1324 if (TCG_TARGET_HAS_div_i64) {
1325 tcg_gen_op3_i64(INDEX_op_div_i64, ret, arg1, arg2);
1326 } else if (TCG_TARGET_HAS_div2_i64) {
1327 TCGv_i64 t0 = tcg_temp_new_i64();
1328 tcg_gen_sari_i64(t0, arg1, 63);
1329 tcg_gen_op5_i64(INDEX_op_div2_i64, ret, t0, arg1, t0, arg2);
1330 tcg_temp_free_i64(t0);
1331 } else {
1332 gen_helper_div_i64(ret, arg1, arg2);
1333 }
1334 }
1335
1336 void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1337 {
1338 if (TCG_TARGET_HAS_rem_i64) {
1339 tcg_gen_op3_i64(INDEX_op_rem_i64, ret, arg1, arg2);
1340 } else if (TCG_TARGET_HAS_div_i64) {
1341 TCGv_i64 t0 = tcg_temp_new_i64();
1342 tcg_gen_op3_i64(INDEX_op_div_i64, t0, arg1, arg2);
1343 tcg_gen_mul_i64(t0, t0, arg2);
1344 tcg_gen_sub_i64(ret, arg1, t0);
1345 tcg_temp_free_i64(t0);
1346 } else if (TCG_TARGET_HAS_div2_i64) {
1347 TCGv_i64 t0 = tcg_temp_new_i64();
1348 tcg_gen_sari_i64(t0, arg1, 63);
1349 tcg_gen_op5_i64(INDEX_op_div2_i64, t0, ret, arg1, t0, arg2);
1350 tcg_temp_free_i64(t0);
1351 } else {
1352 gen_helper_rem_i64(ret, arg1, arg2);
1353 }
1354 }
1355
1356 void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1357 {
1358 if (TCG_TARGET_HAS_div_i64) {
1359 tcg_gen_op3_i64(INDEX_op_divu_i64, ret, arg1, arg2);
1360 } else if (TCG_TARGET_HAS_div2_i64) {
1361 TCGv_i64 t0 = tcg_temp_new_i64();
1362 tcg_gen_movi_i64(t0, 0);
1363 tcg_gen_op5_i64(INDEX_op_divu2_i64, ret, t0, arg1, t0, arg2);
1364 tcg_temp_free_i64(t0);
1365 } else {
1366 gen_helper_divu_i64(ret, arg1, arg2);
1367 }
1368 }
1369
1370 void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1371 {
1372 if (TCG_TARGET_HAS_rem_i64) {
1373 tcg_gen_op3_i64(INDEX_op_remu_i64, ret, arg1, arg2);
1374 } else if (TCG_TARGET_HAS_div_i64) {
1375 TCGv_i64 t0 = tcg_temp_new_i64();
1376 tcg_gen_op3_i64(INDEX_op_divu_i64, t0, arg1, arg2);
1377 tcg_gen_mul_i64(t0, t0, arg2);
1378 tcg_gen_sub_i64(ret, arg1, t0);
1379 tcg_temp_free_i64(t0);
1380 } else if (TCG_TARGET_HAS_div2_i64) {
1381 TCGv_i64 t0 = tcg_temp_new_i64();
1382 tcg_gen_movi_i64(t0, 0);
1383 tcg_gen_op5_i64(INDEX_op_divu2_i64, t0, ret, arg1, t0, arg2);
1384 tcg_temp_free_i64(t0);
1385 } else {
1386 gen_helper_remu_i64(ret, arg1, arg2);
1387 }
1388 }
1389
1390 void tcg_gen_ext8s_i64(TCGv_i64 ret, TCGv_i64 arg)
1391 {
1392 if (TCG_TARGET_REG_BITS == 32) {
1393 tcg_gen_ext8s_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1394 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
1395 } else if (TCG_TARGET_HAS_ext8s_i64) {
1396 tcg_gen_op2_i64(INDEX_op_ext8s_i64, ret, arg);
1397 } else {
1398 tcg_gen_shli_i64(ret, arg, 56);
1399 tcg_gen_sari_i64(ret, ret, 56);
1400 }
1401 }
1402
1403 void tcg_gen_ext16s_i64(TCGv_i64 ret, TCGv_i64 arg)
1404 {
1405 if (TCG_TARGET_REG_BITS == 32) {
1406 tcg_gen_ext16s_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1407 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
1408 } else if (TCG_TARGET_HAS_ext16s_i64) {
1409 tcg_gen_op2_i64(INDEX_op_ext16s_i64, ret, arg);
1410 } else {
1411 tcg_gen_shli_i64(ret, arg, 48);
1412 tcg_gen_sari_i64(ret, ret, 48);
1413 }
1414 }
1415
1416 void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg)
1417 {
1418 if (TCG_TARGET_REG_BITS == 32) {
1419 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1420 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
1421 } else if (TCG_TARGET_HAS_ext32s_i64) {
1422 tcg_gen_op2_i64(INDEX_op_ext32s_i64, ret, arg);
1423 } else {
1424 tcg_gen_shli_i64(ret, arg, 32);
1425 tcg_gen_sari_i64(ret, ret, 32);
1426 }
1427 }
1428
1429 void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg)
1430 {
1431 if (TCG_TARGET_REG_BITS == 32) {
1432 tcg_gen_ext8u_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1433 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1434 } else if (TCG_TARGET_HAS_ext8u_i64) {
1435 tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg);
1436 } else {
1437 tcg_gen_andi_i64(ret, arg, 0xffu);
1438 }
1439 }
1440
1441 void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg)
1442 {
1443 if (TCG_TARGET_REG_BITS == 32) {
1444 tcg_gen_ext16u_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1445 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1446 } else if (TCG_TARGET_HAS_ext16u_i64) {
1447 tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg);
1448 } else {
1449 tcg_gen_andi_i64(ret, arg, 0xffffu);
1450 }
1451 }
1452
1453 void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg)
1454 {
1455 if (TCG_TARGET_REG_BITS == 32) {
1456 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1457 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1458 } else if (TCG_TARGET_HAS_ext32u_i64) {
1459 tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg);
1460 } else {
1461 tcg_gen_andi_i64(ret, arg, 0xffffffffu);
1462 }
1463 }
1464
1465 /* Note: we assume the six high bytes are set to zero */
1466 void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg)
1467 {
1468 if (TCG_TARGET_REG_BITS == 32) {
1469 tcg_gen_bswap16_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1470 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1471 } else if (TCG_TARGET_HAS_bswap16_i64) {
1472 tcg_gen_op2_i64(INDEX_op_bswap16_i64, ret, arg);
1473 } else {
1474 TCGv_i64 t0 = tcg_temp_new_i64();
1475
1476 tcg_gen_ext8u_i64(t0, arg);
1477 tcg_gen_shli_i64(t0, t0, 8);
1478 tcg_gen_shri_i64(ret, arg, 8);
1479 tcg_gen_or_i64(ret, ret, t0);
1480 tcg_temp_free_i64(t0);
1481 }
1482 }
1483
1484 /* Note: we assume the four high bytes are set to zero */
1485 void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg)
1486 {
1487 if (TCG_TARGET_REG_BITS == 32) {
1488 tcg_gen_bswap32_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1489 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1490 } else if (TCG_TARGET_HAS_bswap32_i64) {
1491 tcg_gen_op2_i64(INDEX_op_bswap32_i64, ret, arg);
1492 } else {
1493 TCGv_i64 t0, t1;
1494 t0 = tcg_temp_new_i64();
1495 t1 = tcg_temp_new_i64();
1496
1497 tcg_gen_shli_i64(t0, arg, 24);
1498 tcg_gen_ext32u_i64(t0, t0);
1499
1500 tcg_gen_andi_i64(t1, arg, 0x0000ff00);
1501 tcg_gen_shli_i64(t1, t1, 8);
1502 tcg_gen_or_i64(t0, t0, t1);
1503
1504 tcg_gen_shri_i64(t1, arg, 8);
1505 tcg_gen_andi_i64(t1, t1, 0x0000ff00);
1506 tcg_gen_or_i64(t0, t0, t1);
1507
1508 tcg_gen_shri_i64(t1, arg, 24);
1509 tcg_gen_or_i64(ret, t0, t1);
1510 tcg_temp_free_i64(t0);
1511 tcg_temp_free_i64(t1);
1512 }
1513 }
1514
1515 void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg)
1516 {
1517 if (TCG_TARGET_REG_BITS == 32) {
1518 TCGv_i32 t0, t1;
1519 t0 = tcg_temp_new_i32();
1520 t1 = tcg_temp_new_i32();
1521
1522 tcg_gen_bswap32_i32(t0, TCGV_LOW(arg));
1523 tcg_gen_bswap32_i32(t1, TCGV_HIGH(arg));
1524 tcg_gen_mov_i32(TCGV_LOW(ret), t1);
1525 tcg_gen_mov_i32(TCGV_HIGH(ret), t0);
1526 tcg_temp_free_i32(t0);
1527 tcg_temp_free_i32(t1);
1528 } else if (TCG_TARGET_HAS_bswap64_i64) {
1529 tcg_gen_op2_i64(INDEX_op_bswap64_i64, ret, arg);
1530 } else {
1531 TCGv_i64 t0 = tcg_temp_new_i64();
1532 TCGv_i64 t1 = tcg_temp_new_i64();
1533
1534 tcg_gen_shli_i64(t0, arg, 56);
1535
1536 tcg_gen_andi_i64(t1, arg, 0x0000ff00);
1537 tcg_gen_shli_i64(t1, t1, 40);
1538 tcg_gen_or_i64(t0, t0, t1);
1539
1540 tcg_gen_andi_i64(t1, arg, 0x00ff0000);
1541 tcg_gen_shli_i64(t1, t1, 24);
1542 tcg_gen_or_i64(t0, t0, t1);
1543
1544 tcg_gen_andi_i64(t1, arg, 0xff000000);
1545 tcg_gen_shli_i64(t1, t1, 8);
1546 tcg_gen_or_i64(t0, t0, t1);
1547
1548 tcg_gen_shri_i64(t1, arg, 8);
1549 tcg_gen_andi_i64(t1, t1, 0xff000000);
1550 tcg_gen_or_i64(t0, t0, t1);
1551
1552 tcg_gen_shri_i64(t1, arg, 24);
1553 tcg_gen_andi_i64(t1, t1, 0x00ff0000);
1554 tcg_gen_or_i64(t0, t0, t1);
1555
1556 tcg_gen_shri_i64(t1, arg, 40);
1557 tcg_gen_andi_i64(t1, t1, 0x0000ff00);
1558 tcg_gen_or_i64(t0, t0, t1);
1559
1560 tcg_gen_shri_i64(t1, arg, 56);
1561 tcg_gen_or_i64(ret, t0, t1);
1562 tcg_temp_free_i64(t0);
1563 tcg_temp_free_i64(t1);
1564 }
1565 }
1566
1567 void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg)
1568 {
1569 if (TCG_TARGET_REG_BITS == 32) {
1570 tcg_gen_not_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1571 tcg_gen_not_i32(TCGV_HIGH(ret), TCGV_HIGH(arg));
1572 } else if (TCG_TARGET_HAS_not_i64) {
1573 tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg);
1574 } else {
1575 tcg_gen_xori_i64(ret, arg, -1);
1576 }
1577 }
1578
1579 void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1580 {
1581 if (TCG_TARGET_REG_BITS == 32) {
1582 tcg_gen_andc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1583 tcg_gen_andc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1584 } else if (TCG_TARGET_HAS_andc_i64) {
1585 tcg_gen_op3_i64(INDEX_op_andc_i64, ret, arg1, arg2);
1586 } else {
1587 TCGv_i64 t0 = tcg_temp_new_i64();
1588 tcg_gen_not_i64(t0, arg2);
1589 tcg_gen_and_i64(ret, arg1, t0);
1590 tcg_temp_free_i64(t0);
1591 }
1592 }
1593
1594 void tcg_gen_eqv_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1595 {
1596 if (TCG_TARGET_REG_BITS == 32) {
1597 tcg_gen_eqv_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1598 tcg_gen_eqv_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1599 } else if (TCG_TARGET_HAS_eqv_i64) {
1600 tcg_gen_op3_i64(INDEX_op_eqv_i64, ret, arg1, arg2);
1601 } else {
1602 tcg_gen_xor_i64(ret, arg1, arg2);
1603 tcg_gen_not_i64(ret, ret);
1604 }
1605 }
1606
1607 void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1608 {
1609 if (TCG_TARGET_REG_BITS == 32) {
1610 tcg_gen_nand_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1611 tcg_gen_nand_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1612 } else if (TCG_TARGET_HAS_nand_i64) {
1613 tcg_gen_op3_i64(INDEX_op_nand_i64, ret, arg1, arg2);
1614 } else {
1615 tcg_gen_and_i64(ret, arg1, arg2);
1616 tcg_gen_not_i64(ret, ret);
1617 }
1618 }
1619
1620 void tcg_gen_nor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1621 {
1622 if (TCG_TARGET_REG_BITS == 32) {
1623 tcg_gen_nor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1624 tcg_gen_nor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1625 } else if (TCG_TARGET_HAS_nor_i64) {
1626 tcg_gen_op3_i64(INDEX_op_nor_i64, ret, arg1, arg2);
1627 } else {
1628 tcg_gen_or_i64(ret, arg1, arg2);
1629 tcg_gen_not_i64(ret, ret);
1630 }
1631 }
1632
1633 void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1634 {
1635 if (TCG_TARGET_REG_BITS == 32) {
1636 tcg_gen_orc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
1637 tcg_gen_orc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
1638 } else if (TCG_TARGET_HAS_orc_i64) {
1639 tcg_gen_op3_i64(INDEX_op_orc_i64, ret, arg1, arg2);
1640 } else {
1641 TCGv_i64 t0 = tcg_temp_new_i64();
1642 tcg_gen_not_i64(t0, arg2);
1643 tcg_gen_or_i64(ret, arg1, t0);
1644 tcg_temp_free_i64(t0);
1645 }
1646 }
1647
1648 void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1649 {
1650 if (TCG_TARGET_HAS_rot_i64) {
1651 tcg_gen_op3_i64(INDEX_op_rotl_i64, ret, arg1, arg2);
1652 } else {
1653 TCGv_i64 t0, t1;
1654 t0 = tcg_temp_new_i64();
1655 t1 = tcg_temp_new_i64();
1656 tcg_gen_shl_i64(t0, arg1, arg2);
1657 tcg_gen_subfi_i64(t1, 64, arg2);
1658 tcg_gen_shr_i64(t1, arg1, t1);
1659 tcg_gen_or_i64(ret, t0, t1);
1660 tcg_temp_free_i64(t0);
1661 tcg_temp_free_i64(t1);
1662 }
1663 }
1664
1665 void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
1666 {
1667 tcg_debug_assert(arg2 < 64);
1668 /* some cases can be optimized here */
1669 if (arg2 == 0) {
1670 tcg_gen_mov_i64(ret, arg1);
1671 } else if (TCG_TARGET_HAS_rot_i64) {
1672 TCGv_i64 t0 = tcg_const_i64(arg2);
1673 tcg_gen_rotl_i64(ret, arg1, t0);
1674 tcg_temp_free_i64(t0);
1675 } else {
1676 TCGv_i64 t0, t1;
1677 t0 = tcg_temp_new_i64();
1678 t1 = tcg_temp_new_i64();
1679 tcg_gen_shli_i64(t0, arg1, arg2);
1680 tcg_gen_shri_i64(t1, arg1, 64 - arg2);
1681 tcg_gen_or_i64(ret, t0, t1);
1682 tcg_temp_free_i64(t0);
1683 tcg_temp_free_i64(t1);
1684 }
1685 }
1686
1687 void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1688 {
1689 if (TCG_TARGET_HAS_rot_i64) {
1690 tcg_gen_op3_i64(INDEX_op_rotr_i64, ret, arg1, arg2);
1691 } else {
1692 TCGv_i64 t0, t1;
1693 t0 = tcg_temp_new_i64();
1694 t1 = tcg_temp_new_i64();
1695 tcg_gen_shr_i64(t0, arg1, arg2);
1696 tcg_gen_subfi_i64(t1, 64, arg2);
1697 tcg_gen_shl_i64(t1, arg1, t1);
1698 tcg_gen_or_i64(ret, t0, t1);
1699 tcg_temp_free_i64(t0);
1700 tcg_temp_free_i64(t1);
1701 }
1702 }
1703
1704 void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
1705 {
1706 tcg_debug_assert(arg2 < 64);
1707 /* some cases can be optimized here */
1708 if (arg2 == 0) {
1709 tcg_gen_mov_i64(ret, arg1);
1710 } else {
1711 tcg_gen_rotli_i64(ret, arg1, 64 - arg2);
1712 }
1713 }
1714
1715 void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
1716 unsigned int ofs, unsigned int len)
1717 {
1718 uint64_t mask;
1719 TCGv_i64 t1;
1720
1721 tcg_debug_assert(ofs < 64);
1722 tcg_debug_assert(len > 0);
1723 tcg_debug_assert(len <= 64);
1724 tcg_debug_assert(ofs + len <= 64);
1725
1726 if (len == 64) {
1727 tcg_gen_mov_i64(ret, arg2);
1728 return;
1729 }
1730 if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(ofs, len)) {
1731 tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, arg1, arg2, ofs, len);
1732 return;
1733 }
1734
1735 if (TCG_TARGET_REG_BITS == 32) {
1736 if (ofs >= 32) {
1737 tcg_gen_deposit_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1),
1738 TCGV_LOW(arg2), ofs - 32, len);
1739 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
1740 return;
1741 }
1742 if (ofs + len <= 32) {
1743 tcg_gen_deposit_i32(TCGV_LOW(ret), TCGV_LOW(arg1),
1744 TCGV_LOW(arg2), ofs, len);
1745 tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
1746 return;
1747 }
1748 }
1749
1750 mask = (1ull << len) - 1;
1751 t1 = tcg_temp_new_i64();
1752
1753 if (ofs + len < 64) {
1754 tcg_gen_andi_i64(t1, arg2, mask);
1755 tcg_gen_shli_i64(t1, t1, ofs);
1756 } else {
1757 tcg_gen_shli_i64(t1, arg2, ofs);
1758 }
1759 tcg_gen_andi_i64(ret, arg1, ~(mask << ofs));
1760 tcg_gen_or_i64(ret, ret, t1);
1761
1762 tcg_temp_free_i64(t1);
1763 }
1764
1765 void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
1766 unsigned int ofs, unsigned int len)
1767 {
1768 tcg_debug_assert(ofs < 64);
1769 tcg_debug_assert(len > 0);
1770 tcg_debug_assert(len <= 64);
1771 tcg_debug_assert(ofs + len <= 64);
1772
1773 /* Canonicalize certain special cases, even if extract is supported. */
1774 if (ofs + len == 64) {
1775 tcg_gen_shri_i64(ret, arg, 64 - len);
1776 return;
1777 }
1778 if (ofs == 0) {
1779 tcg_gen_andi_i64(ret, arg, (1ull << len) - 1);
1780 return;
1781 }
1782
1783 if (TCG_TARGET_REG_BITS == 32) {
1784 /* Look for a 32-bit extract within one of the two words. */
1785 if (ofs >= 32) {
1786 tcg_gen_extract_i32(TCGV_LOW(ret), TCGV_HIGH(arg), ofs - 32, len);
1787 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1788 return;
1789 }
1790 if (ofs + len <= 32) {
1791 tcg_gen_extract_i32(TCGV_LOW(ret), TCGV_LOW(arg), ofs, len);
1792 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
1793 return;
1794 }
1795 /* The field is split across two words. One double-word
1796 shift is better than two double-word shifts. */
1797 goto do_shift_and;
1798 }
1799
1800 if (TCG_TARGET_HAS_extract_i64
1801 && TCG_TARGET_extract_i64_valid(ofs, len)) {
1802 tcg_gen_op4ii_i64(INDEX_op_extract_i64, ret, arg, ofs, len);
1803 return;
1804 }
1805
1806 /* Assume that zero-extension, if available, is cheaper than a shift. */
1807 switch (ofs + len) {
1808 case 32:
1809 if (TCG_TARGET_HAS_ext32u_i64) {
1810 tcg_gen_ext32u_i64(ret, arg);
1811 tcg_gen_shri_i64(ret, ret, ofs);
1812 return;
1813 }
1814 break;
1815 case 16:
1816 if (TCG_TARGET_HAS_ext16u_i64) {
1817 tcg_gen_ext16u_i64(ret, arg);
1818 tcg_gen_shri_i64(ret, ret, ofs);
1819 return;
1820 }
1821 break;
1822 case 8:
1823 if (TCG_TARGET_HAS_ext8u_i64) {
1824 tcg_gen_ext8u_i64(ret, arg);
1825 tcg_gen_shri_i64(ret, ret, ofs);
1826 return;
1827 }
1828 break;
1829 }
1830
1831 /* ??? Ideally we'd know what values are available for immediate AND.
1832 Assume that 8 bits are available, plus the special cases of 16 and 32,
1833 so that we get ext8u, ext16u, and ext32u. */
1834 switch (len) {
1835 case 1 ... 8: case 16: case 32:
1836 do_shift_and:
1837 tcg_gen_shri_i64(ret, arg, ofs);
1838 tcg_gen_andi_i64(ret, ret, (1ull << len) - 1);
1839 break;
1840 default:
1841 tcg_gen_shli_i64(ret, arg, 64 - len - ofs);
1842 tcg_gen_shri_i64(ret, ret, 64 - len);
1843 break;
1844 }
1845 }
1846
1847 void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg,
1848 unsigned int ofs, unsigned int len)
1849 {
1850 tcg_debug_assert(ofs < 64);
1851 tcg_debug_assert(len > 0);
1852 tcg_debug_assert(len <= 64);
1853 tcg_debug_assert(ofs + len <= 64);
1854
1855 /* Canonicalize certain special cases, even if sextract is supported. */
1856 if (ofs + len == 64) {
1857 tcg_gen_sari_i64(ret, arg, 64 - len);
1858 return;
1859 }
1860 if (ofs == 0) {
1861 switch (len) {
1862 case 32:
1863 tcg_gen_ext32s_i64(ret, arg);
1864 return;
1865 case 16:
1866 tcg_gen_ext16s_i64(ret, arg);
1867 return;
1868 case 8:
1869 tcg_gen_ext8s_i64(ret, arg);
1870 return;
1871 }
1872 }
1873
1874 if (TCG_TARGET_REG_BITS == 32) {
1875 /* Look for a 32-bit extract within one of the two words. */
1876 if (ofs >= 32) {
1877 tcg_gen_sextract_i32(TCGV_LOW(ret), TCGV_HIGH(arg), ofs - 32, len);
1878 } else if (ofs + len <= 32) {
1879 tcg_gen_sextract_i32(TCGV_LOW(ret), TCGV_LOW(arg), ofs, len);
1880 } else if (ofs == 0) {
1881 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
1882 tcg_gen_sextract_i32(TCGV_HIGH(ret), TCGV_HIGH(arg), 0, len - 32);
1883 return;
1884 } else if (len > 32) {
1885 TCGv_i32 t = tcg_temp_new_i32();
1886 /* Extract the bits for the high word normally. */
1887 tcg_gen_sextract_i32(t, TCGV_HIGH(arg), ofs + 32, len - 32);
1888 /* Shift the field down for the low part. */
1889 tcg_gen_shri_i64(ret, arg, ofs);
1890 /* Overwrite the shift into the high part. */
1891 tcg_gen_mov_i32(TCGV_HIGH(ret), t);
1892 tcg_temp_free_i32(t);
1893 return;
1894 } else {
1895 /* Shift the field down for the low part, such that the
1896 field sits at the MSB. */
1897 tcg_gen_shri_i64(ret, arg, ofs + len - 32);
1898 /* Shift the field down from the MSB, sign extending. */
1899 tcg_gen_sari_i32(TCGV_LOW(ret), TCGV_LOW(ret), 32 - len);
1900 }
1901 /* Sign-extend the field from 32 bits. */
1902 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
1903 return;
1904 }
1905
1906 if (TCG_TARGET_HAS_sextract_i64
1907 && TCG_TARGET_extract_i64_valid(ofs, len)) {
1908 tcg_gen_op4ii_i64(INDEX_op_sextract_i64, ret, arg, ofs, len);
1909 return;
1910 }
1911
1912 /* Assume that sign-extension, if available, is cheaper than a shift. */
1913 switch (ofs + len) {
1914 case 32:
1915 if (TCG_TARGET_HAS_ext32s_i64) {
1916 tcg_gen_ext32s_i64(ret, arg);
1917 tcg_gen_sari_i64(ret, ret, ofs);
1918 return;
1919 }
1920 break;
1921 case 16:
1922 if (TCG_TARGET_HAS_ext16s_i64) {
1923 tcg_gen_ext16s_i64(ret, arg);
1924 tcg_gen_sari_i64(ret, ret, ofs);
1925 return;
1926 }
1927 break;
1928 case 8:
1929 if (TCG_TARGET_HAS_ext8s_i64) {
1930 tcg_gen_ext8s_i64(ret, arg);
1931 tcg_gen_sari_i64(ret, ret, ofs);
1932 return;
1933 }
1934 break;
1935 }
1936 switch (len) {
1937 case 32:
1938 if (TCG_TARGET_HAS_ext32s_i64) {
1939 tcg_gen_shri_i64(ret, arg, ofs);
1940 tcg_gen_ext32s_i64(ret, ret);
1941 return;
1942 }
1943 break;
1944 case 16:
1945 if (TCG_TARGET_HAS_ext16s_i64) {
1946 tcg_gen_shri_i64(ret, arg, ofs);
1947 tcg_gen_ext16s_i64(ret, ret);
1948 return;
1949 }
1950 break;
1951 case 8:
1952 if (TCG_TARGET_HAS_ext8s_i64) {
1953 tcg_gen_shri_i64(ret, arg, ofs);
1954 tcg_gen_ext8s_i64(ret, ret);
1955 return;
1956 }
1957 break;
1958 }
1959 tcg_gen_shli_i64(ret, arg, 64 - len - ofs);
1960 tcg_gen_sari_i64(ret, ret, 64 - len);
1961 }
1962
1963 void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
1964 TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2)
1965 {
1966 if (cond == TCG_COND_ALWAYS) {
1967 tcg_gen_mov_i64(ret, v1);
1968 } else if (cond == TCG_COND_NEVER) {
1969 tcg_gen_mov_i64(ret, v2);
1970 } else if (TCG_TARGET_REG_BITS == 32) {
1971 TCGv_i32 t0 = tcg_temp_new_i32();
1972 TCGv_i32 t1 = tcg_temp_new_i32();
1973 tcg_gen_op6i_i32(INDEX_op_setcond2_i32, t0,
1974 TCGV_LOW(c1), TCGV_HIGH(c1),
1975 TCGV_LOW(c2), TCGV_HIGH(c2), cond);
1976
1977 if (TCG_TARGET_HAS_movcond_i32) {
1978 tcg_gen_movi_i32(t1, 0);
1979 tcg_gen_movcond_i32(TCG_COND_NE, TCGV_LOW(ret), t0, t1,
1980 TCGV_LOW(v1), TCGV_LOW(v2));
1981 tcg_gen_movcond_i32(TCG_COND_NE, TCGV_HIGH(ret), t0, t1,
1982 TCGV_HIGH(v1), TCGV_HIGH(v2));
1983 } else {
1984 tcg_gen_neg_i32(t0, t0);
1985
1986 tcg_gen_and_i32(t1, TCGV_LOW(v1), t0);
1987 tcg_gen_andc_i32(TCGV_LOW(ret), TCGV_LOW(v2), t0);
1988 tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t1);
1989
1990 tcg_gen_and_i32(t1, TCGV_HIGH(v1), t0);
1991 tcg_gen_andc_i32(TCGV_HIGH(ret), TCGV_HIGH(v2), t0);
1992 tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t1);
1993 }
1994 tcg_temp_free_i32(t0);
1995 tcg_temp_free_i32(t1);
1996 } else if (TCG_TARGET_HAS_movcond_i64) {
1997 tcg_gen_op6i_i64(INDEX_op_movcond_i64, ret, c1, c2, v1, v2, cond);
1998 } else {
1999 TCGv_i64 t0 = tcg_temp_new_i64();
2000 TCGv_i64 t1 = tcg_temp_new_i64();
2001 tcg_gen_setcond_i64(cond, t0, c1, c2);
2002 tcg_gen_neg_i64(t0, t0);
2003 tcg_gen_and_i64(t1, v1, t0);
2004 tcg_gen_andc_i64(ret, v2, t0);
2005 tcg_gen_or_i64(ret, ret, t1);
2006 tcg_temp_free_i64(t0);
2007 tcg_temp_free_i64(t1);
2008 }
2009 }
2010
2011 void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
2012 TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
2013 {
2014 if (TCG_TARGET_HAS_add2_i64) {
2015 tcg_gen_op6_i64(INDEX_op_add2_i64, rl, rh, al, ah, bl, bh);
2016 } else {
2017 TCGv_i64 t0 = tcg_temp_new_i64();
2018 TCGv_i64 t1 = tcg_temp_new_i64();
2019 tcg_gen_add_i64(t0, al, bl);
2020 tcg_gen_setcond_i64(TCG_COND_LTU, t1, t0, al);
2021 tcg_gen_add_i64(rh, ah, bh);
2022 tcg_gen_add_i64(rh, rh, t1);
2023 tcg_gen_mov_i64(rl, t0);
2024 tcg_temp_free_i64(t0);
2025 tcg_temp_free_i64(t1);
2026 }
2027 }
2028
2029 void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
2030 TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
2031 {
2032 if (TCG_TARGET_HAS_sub2_i64) {
2033 tcg_gen_op6_i64(INDEX_op_sub2_i64, rl, rh, al, ah, bl, bh);
2034 } else {
2035 TCGv_i64 t0 = tcg_temp_new_i64();
2036 TCGv_i64 t1 = tcg_temp_new_i64();
2037 tcg_gen_sub_i64(t0, al, bl);
2038 tcg_gen_setcond_i64(TCG_COND_LTU, t1, al, bl);
2039 tcg_gen_sub_i64(rh, ah, bh);
2040 tcg_gen_sub_i64(rh, rh, t1);
2041 tcg_gen_mov_i64(rl, t0);
2042 tcg_temp_free_i64(t0);
2043 tcg_temp_free_i64(t1);
2044 }
2045 }
2046
2047 void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
2048 {
2049 if (TCG_TARGET_HAS_mulu2_i64) {
2050 tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2);
2051 } else if (TCG_TARGET_HAS_muluh_i64) {
2052 TCGv_i64 t = tcg_temp_new_i64();
2053 tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2);
2054 tcg_gen_op3_i64(INDEX_op_muluh_i64, rh, arg1, arg2);
2055 tcg_gen_mov_i64(rl, t);
2056 tcg_temp_free_i64(t);
2057 } else {
2058 TCGv_i64 t0 = tcg_temp_new_i64();
2059 tcg_gen_mul_i64(t0, arg1, arg2);
2060 gen_helper_muluh_i64(rh, arg1, arg2);
2061 tcg_gen_mov_i64(rl, t0);
2062 tcg_temp_free_i64(t0);
2063 }
2064 }
2065
2066 void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
2067 {
2068 if (TCG_TARGET_HAS_muls2_i64) {
2069 tcg_gen_op4_i64(INDEX_op_muls2_i64, rl, rh, arg1, arg2);
2070 } else if (TCG_TARGET_HAS_mulsh_i64) {
2071 TCGv_i64 t = tcg_temp_new_i64();
2072 tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2);
2073 tcg_gen_op3_i64(INDEX_op_mulsh_i64, rh, arg1, arg2);
2074 tcg_gen_mov_i64(rl, t);
2075 tcg_temp_free_i64(t);
2076 } else if (TCG_TARGET_HAS_mulu2_i64 || TCG_TARGET_HAS_muluh_i64) {
2077 TCGv_i64 t0 = tcg_temp_new_i64();
2078 TCGv_i64 t1 = tcg_temp_new_i64();
2079 TCGv_i64 t2 = tcg_temp_new_i64();
2080 TCGv_i64 t3 = tcg_temp_new_i64();
2081 tcg_gen_mulu2_i64(t0, t1, arg1, arg2);
2082 /* Adjust for negative inputs. */
2083 tcg_gen_sari_i64(t2, arg1, 63);
2084 tcg_gen_sari_i64(t3, arg2, 63);
2085 tcg_gen_and_i64(t2, t2, arg2);
2086 tcg_gen_and_i64(t3, t3, arg1);
2087 tcg_gen_sub_i64(rh, t1, t2);
2088 tcg_gen_sub_i64(rh, rh, t3);
2089 tcg_gen_mov_i64(rl, t0);
2090 tcg_temp_free_i64(t0);
2091 tcg_temp_free_i64(t1);
2092 tcg_temp_free_i64(t2);
2093 tcg_temp_free_i64(t3);
2094 } else {
2095 TCGv_i64 t0 = tcg_temp_new_i64();
2096 tcg_gen_mul_i64(t0, arg1, arg2);
2097 gen_helper_mulsh_i64(rh, arg1, arg2);
2098 tcg_gen_mov_i64(rl, t0);
2099 tcg_temp_free_i64(t0);
2100 }
2101 }
2102
2103 void tcg_gen_mulsu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
2104 {
2105 TCGv_i64 t0 = tcg_temp_new_i64();
2106 TCGv_i64 t1 = tcg_temp_new_i64();
2107 TCGv_i64 t2 = tcg_temp_new_i64();
2108 tcg_gen_mulu2_i64(t0, t1, arg1, arg2);
2109 /* Adjust for negative input for the signed arg1. */
2110 tcg_gen_sari_i64(t2, arg1, 63);
2111 tcg_gen_and_i64(t2, t2, arg2);
2112 tcg_gen_sub_i64(rh, t1, t2);
2113 tcg_gen_mov_i64(rl, t0);
2114 tcg_temp_free_i64(t0);
2115 tcg_temp_free_i64(t1);
2116 tcg_temp_free_i64(t2);
2117 }
2118
2119 /* Size changing operations. */
2120
2121 void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
2122 {
2123 if (TCG_TARGET_REG_BITS == 32) {
2124 tcg_gen_mov_i32(ret, TCGV_LOW(arg));
2125 } else if (TCG_TARGET_HAS_extrl_i64_i32) {
2126 tcg_gen_op2(&tcg_ctx, INDEX_op_extrl_i64_i32,
2127 GET_TCGV_I32(ret), GET_TCGV_I64(arg));
2128 } else {
2129 tcg_gen_mov_i32(ret, MAKE_TCGV_I32(GET_TCGV_I64(arg)));
2130 }
2131 }
2132
2133 void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
2134 {
2135 if (TCG_TARGET_REG_BITS == 32) {
2136 tcg_gen_mov_i32(ret, TCGV_HIGH(arg));
2137 } else if (TCG_TARGET_HAS_extrh_i64_i32) {
2138 tcg_gen_op2(&tcg_ctx, INDEX_op_extrh_i64_i32,
2139 GET_TCGV_I32(ret), GET_TCGV_I64(arg));
2140 } else {
2141 TCGv_i64 t = tcg_temp_new_i64();
2142 tcg_gen_shri_i64(t, arg, 32);
2143 tcg_gen_mov_i32(ret, MAKE_TCGV_I32(GET_TCGV_I64(t)));
2144 tcg_temp_free_i64(t);
2145 }
2146 }
2147
2148 void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
2149 {
2150 if (TCG_TARGET_REG_BITS == 32) {
2151 tcg_gen_mov_i32(TCGV_LOW(ret), arg);
2152 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
2153 } else {
2154 tcg_gen_op2(&tcg_ctx, INDEX_op_extu_i32_i64,
2155 GET_TCGV_I64(ret), GET_TCGV_I32(arg));
2156 }
2157 }
2158
2159 void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
2160 {
2161 if (TCG_TARGET_REG_BITS == 32) {
2162 tcg_gen_mov_i32(TCGV_LOW(ret), arg);
2163 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
2164 } else {
2165 tcg_gen_op2(&tcg_ctx, INDEX_op_ext_i32_i64,
2166 GET_TCGV_I64(ret), GET_TCGV_I32(arg));
2167 }
2168 }
2169
2170 void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high)
2171 {
2172 TCGv_i64 tmp;
2173
2174 if (TCG_TARGET_REG_BITS == 32) {
2175 tcg_gen_mov_i32(TCGV_LOW(dest), low);
2176 tcg_gen_mov_i32(TCGV_HIGH(dest), high);
2177 return;
2178 }
2179
2180 tmp = tcg_temp_new_i64();
2181 /* These extensions are only needed for type correctness.
2182 We may be able to do better given target specific information. */
2183 tcg_gen_extu_i32_i64(tmp, high);
2184 tcg_gen_extu_i32_i64(dest, low);
2185 /* If deposit is available, use it. Otherwise use the extra
2186 knowledge that we have of the zero-extensions above. */
2187 if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(32, 32)) {
2188 tcg_gen_deposit_i64(dest, dest, tmp, 32, 32);
2189 } else {
2190 tcg_gen_shli_i64(tmp, tmp, 32);
2191 tcg_gen_or_i64(dest, dest, tmp);
2192 }
2193 tcg_temp_free_i64(tmp);
2194 }
2195
2196 void tcg_gen_extr_i64_i32(TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg)
2197 {
2198 if (TCG_TARGET_REG_BITS == 32) {
2199 tcg_gen_mov_i32(lo, TCGV_LOW(arg));
2200 tcg_gen_mov_i32(hi, TCGV_HIGH(arg));
2201 } else {
2202 tcg_gen_extrl_i64_i32(lo, arg);
2203 tcg_gen_extrh_i64_i32(hi, arg);
2204 }
2205 }
2206
2207 void tcg_gen_extr32_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg)
2208 {
2209 tcg_gen_ext32u_i64(lo, arg);
2210 tcg_gen_shri_i64(hi, arg, 32);
2211 }
2212
2213 /* QEMU specific operations. */
2214
2215 void tcg_gen_goto_tb(unsigned idx)
2216 {
2217 /* We only support two chained exits. */
2218 tcg_debug_assert(idx <= 1);
2219 #ifdef CONFIG_DEBUG_TCG
2220 /* Verify that we havn't seen this numbered exit before. */
2221 tcg_debug_assert((tcg_ctx.goto_tb_issue_mask & (1 << idx)) == 0);
2222 tcg_ctx.goto_tb_issue_mask |= 1 << idx;
2223 #endif
2224 tcg_gen_op1i(INDEX_op_goto_tb, idx);
2225 }
2226
2227 static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st)
2228 {
2229 /* Trigger the asserts within as early as possible. */
2230 (void)get_alignment_bits(op);
2231
2232 switch (op & MO_SIZE) {
2233 case MO_8:
2234 op &= ~MO_BSWAP;
2235 break;
2236 case MO_16:
2237 break;
2238 case MO_32:
2239 if (!is64) {
2240 op &= ~MO_SIGN;
2241 }
2242 break;
2243 case MO_64:
2244 if (!is64) {
2245 tcg_abort();
2246 }
2247 break;
2248 }
2249 if (st) {
2250 op &= ~MO_SIGN;
2251 }
2252 return op;
2253 }
2254
2255 static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr,
2256 TCGMemOp memop, TCGArg idx)
2257 {
2258 TCGMemOpIdx oi = make_memop_idx(memop, idx);
2259 #if TARGET_LONG_BITS == 32
2260 tcg_gen_op3i_i32(opc, val, addr, oi);
2261 #else
2262 if (TCG_TARGET_REG_BITS == 32) {
2263 tcg_gen_op4i_i32(opc, val, TCGV_LOW(addr), TCGV_HIGH(addr), oi);
2264 } else {
2265 tcg_gen_op3(&tcg_ctx, opc, GET_TCGV_I32(val), GET_TCGV_I64(addr), oi);
2266 }
2267 #endif
2268 }
2269
2270 static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr,
2271 TCGMemOp memop, TCGArg idx)
2272 {
2273 TCGMemOpIdx oi = make_memop_idx(memop, idx);
2274 #if TARGET_LONG_BITS == 32
2275 if (TCG_TARGET_REG_BITS == 32) {
2276 tcg_gen_op4i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), addr, oi);
2277 } else {
2278 tcg_gen_op3(&tcg_ctx, opc, GET_TCGV_I64(val), GET_TCGV_I32(addr), oi);
2279 }
2280 #else
2281 if (TCG_TARGET_REG_BITS == 32) {
2282 tcg_gen_op5i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val),
2283 TCGV_LOW(addr), TCGV_HIGH(addr), oi);
2284 } else {
2285 tcg_gen_op3i_i64(opc, val, addr, oi);
2286 }
2287 #endif
2288 }
2289
2290 void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
2291 {
2292 memop = tcg_canonicalize_memop(memop, 0, 0);
2293 trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env,
2294 addr, trace_mem_get_info(memop, 0));
2295 gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx);
2296 }
2297
2298 void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
2299 {
2300 memop = tcg_canonicalize_memop(memop, 0, 1);
2301 trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env,
2302 addr, trace_mem_get_info(memop, 1));
2303 gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx);
2304 }
2305
2306 void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
2307 {
2308 if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
2309 tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop);
2310 if (memop & MO_SIGN) {
2311 tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31);
2312 } else {
2313 tcg_gen_movi_i32(TCGV_HIGH(val), 0);
2314 }
2315 return;
2316 }
2317
2318 memop = tcg_canonicalize_memop(memop, 1, 0);
2319 trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env,
2320 addr, trace_mem_get_info(memop, 0));
2321 gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx);
2322 }
2323
2324 void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
2325 {
2326 if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
2327 tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop);
2328 return;
2329 }
2330
2331 memop = tcg_canonicalize_memop(memop, 1, 1);
2332 trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env,
2333 addr, trace_mem_get_info(memop, 1));
2334 gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx);
2335 }
2336
2337 static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, TCGMemOp opc)
2338 {
2339 switch (opc & MO_SSIZE) {
2340 case MO_SB:
2341 tcg_gen_ext8s_i32(ret, val);
2342 break;
2343 case MO_UB:
2344 tcg_gen_ext8u_i32(ret, val);
2345 break;
2346 case MO_SW:
2347 tcg_gen_ext16s_i32(ret, val);
2348 break;
2349 case MO_UW:
2350 tcg_gen_ext16u_i32(ret, val);
2351 break;
2352 default:
2353 tcg_gen_mov_i32(ret, val);
2354 break;
2355 }
2356 }
2357
2358 static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, TCGMemOp opc)
2359 {
2360 switch (opc & MO_SSIZE) {
2361 case MO_SB:
2362 tcg_gen_ext8s_i64(ret, val);
2363 break;
2364 case MO_UB:
2365 tcg_gen_ext8u_i64(ret, val);
2366 break;
2367 case MO_SW:
2368 tcg_gen_ext16s_i64(ret, val);
2369 break;
2370 case MO_UW:
2371 tcg_gen_ext16u_i64(ret, val);
2372 break;
2373 case MO_SL:
2374 tcg_gen_ext32s_i64(ret, val);
2375 break;
2376 case MO_UL:
2377 tcg_gen_ext32u_i64(ret, val);
2378 break;
2379 default:
2380 tcg_gen_mov_i64(ret, val);
2381 break;
2382 }
2383 }
2384
2385 #ifdef CONFIG_SOFTMMU
2386 typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv,
2387 TCGv_i32, TCGv_i32, TCGv_i32);
2388 typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv,
2389 TCGv_i64, TCGv_i64, TCGv_i32);
2390 typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv,
2391 TCGv_i32, TCGv_i32);
2392 typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv,
2393 TCGv_i64, TCGv_i32);
2394 #else
2395 typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv, TCGv_i32, TCGv_i32);
2396 typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv, TCGv_i64, TCGv_i64);
2397 typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv, TCGv_i32);
2398 typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv, TCGv_i64);
2399 #endif
2400
2401 #ifdef CONFIG_ATOMIC64
2402 # define WITH_ATOMIC64(X) X,
2403 #else
2404 # define WITH_ATOMIC64(X)
2405 #endif
2406
2407 static void * const table_cmpxchg[16] = {
2408 [MO_8] = gen_helper_atomic_cmpxchgb,
2409 [MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le,
2410 [MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be,
2411 [MO_32 | MO_LE] = gen_helper_atomic_cmpxchgl_le,
2412 [MO_32 | MO_BE] = gen_helper_atomic_cmpxchgl_be,
2413 WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le)
2414 WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be)
2415 };
2416
2417 void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
2418 TCGv_i32 newv, TCGArg idx, TCGMemOp memop)
2419 {
2420 memop = tcg_canonicalize_memop(memop, 0, 0);
2421
2422 if (!parallel_cpus) {
2423 TCGv_i32 t1 = tcg_temp_new_i32();
2424 TCGv_i32 t2 = tcg_temp_new_i32();
2425
2426 tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE);
2427
2428 tcg_gen_qemu_ld_i32(t1, addr, idx, memop & ~MO_SIGN);
2429 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1);
2430 tcg_gen_qemu_st_i32(t2, addr, idx, memop);
2431 tcg_temp_free_i32(t2);
2432
2433 if (memop & MO_SIGN) {
2434 tcg_gen_ext_i32(retv, t1, memop);
2435 } else {
2436 tcg_gen_mov_i32(retv, t1);
2437 }
2438 tcg_temp_free_i32(t1);
2439 } else {
2440 gen_atomic_cx_i32 gen;
2441
2442 gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
2443 tcg_debug_assert(gen != NULL);
2444
2445 #ifdef CONFIG_SOFTMMU
2446 {
2447 TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx));
2448 gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv, oi);
2449 tcg_temp_free_i32(oi);
2450 }
2451 #else
2452 gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv);
2453 #endif
2454
2455 if (memop & MO_SIGN) {
2456 tcg_gen_ext_i32(retv, retv, memop);
2457 }
2458 }
2459 }
2460
2461 void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
2462 TCGv_i64 newv, TCGArg idx, TCGMemOp memop)
2463 {
2464 memop = tcg_canonicalize_memop(memop, 1, 0);
2465
2466 if (!parallel_cpus) {
2467 TCGv_i64 t1 = tcg_temp_new_i64();
2468 TCGv_i64 t2 = tcg_temp_new_i64();
2469
2470 tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE);
2471
2472 tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN);
2473 tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1);
2474 tcg_gen_qemu_st_i64(t2, addr, idx, memop);
2475 tcg_temp_free_i64(t2);
2476
2477 if (memop & MO_SIGN) {
2478 tcg_gen_ext_i64(retv, t1, memop);
2479 } else {
2480 tcg_gen_mov_i64(retv, t1);
2481 }
2482 tcg_temp_free_i64(t1);
2483 } else if ((memop & MO_SIZE) == MO_64) {
2484 #ifdef CONFIG_ATOMIC64
2485 gen_atomic_cx_i64 gen;
2486
2487 gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
2488 tcg_debug_assert(gen != NULL);
2489
2490 #ifdef CONFIG_SOFTMMU
2491 {
2492 TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop, idx));
2493 gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv, oi);
2494 tcg_temp_free_i32(oi);
2495 }
2496 #else
2497 gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv);
2498 #endif
2499 #else
2500 gen_helper_exit_atomic(tcg_ctx.tcg_env);
2501 #endif /* CONFIG_ATOMIC64 */
2502 } else {
2503 TCGv_i32 c32 = tcg_temp_new_i32();
2504 TCGv_i32 n32 = tcg_temp_new_i32();
2505 TCGv_i32 r32 = tcg_temp_new_i32();
2506
2507 tcg_gen_extrl_i64_i32(c32, cmpv);
2508 tcg_gen_extrl_i64_i32(n32, newv);
2509 tcg_gen_atomic_cmpxchg_i32(r32, addr, c32, n32, idx, memop & ~MO_SIGN);
2510 tcg_temp_free_i32(c32);
2511 tcg_temp_free_i32(n32);
2512
2513 tcg_gen_extu_i32_i64(retv, r32);
2514 tcg_temp_free_i32(r32);
2515
2516 if (memop & MO_SIGN) {
2517 tcg_gen_ext_i64(retv, retv, memop);
2518 }
2519 }
2520 }
2521
2522 static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val,
2523 TCGArg idx, TCGMemOp memop, bool new_val,
2524 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
2525 {
2526 TCGv_i32 t1 = tcg_temp_new_i32();
2527 TCGv_i32 t2 = tcg_temp_new_i32();
2528
2529 memop = tcg_canonicalize_memop(memop, 0, 0);
2530
2531 tcg_gen_qemu_ld_i32(t1, addr, idx, memop & ~MO_SIGN);
2532 gen(t2, t1, val);
2533 tcg_gen_qemu_st_i32(t2, addr, idx, memop);
2534
2535 tcg_gen_ext_i32(ret, (new_val ? t2 : t1), memop);
2536 tcg_temp_free_i32(t1);
2537 tcg_temp_free_i32(t2);
2538 }
2539
2540 static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val,
2541 TCGArg idx, TCGMemOp memop, void * const table[])
2542 {
2543 gen_atomic_op_i32 gen;
2544
2545 memop = tcg_canonicalize_memop(memop, 0, 0);
2546
2547 gen = table[memop & (MO_SIZE | MO_BSWAP)];
2548 tcg_debug_assert(gen != NULL);
2549
2550 #ifdef CONFIG_SOFTMMU
2551 {
2552 TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx));
2553 gen(ret, tcg_ctx.tcg_env, addr, val, oi);
2554 tcg_temp_free_i32(oi);
2555 }
2556 #else
2557 gen(ret, tcg_ctx.tcg_env, addr, val);
2558 #endif
2559
2560 if (memop & MO_SIGN) {
2561 tcg_gen_ext_i32(ret, ret, memop);
2562 }
2563 }
2564
2565 static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
2566 TCGArg idx, TCGMemOp memop, bool new_val,
2567 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
2568 {
2569 TCGv_i64 t1 = tcg_temp_new_i64();
2570 TCGv_i64 t2 = tcg_temp_new_i64();
2571
2572 memop = tcg_canonicalize_memop(memop, 1, 0);
2573
2574 tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN);
2575 gen(t2, t1, val);
2576 tcg_gen_qemu_st_i64(t2, addr, idx, memop);
2577
2578 tcg_gen_ext_i64(ret, (new_val ? t2 : t1), memop);
2579 tcg_temp_free_i64(t1);
2580 tcg_temp_free_i64(t2);
2581 }
2582
2583 static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
2584 TCGArg idx, TCGMemOp memop, void * const table[])
2585 {
2586 memop = tcg_canonicalize_memop(memop, 1, 0);
2587
2588 if ((memop & MO_SIZE) == MO_64) {
2589 #ifdef CONFIG_ATOMIC64
2590 gen_atomic_op_i64 gen;
2591
2592 gen = table[memop & (MO_SIZE | MO_BSWAP)];
2593 tcg_debug_assert(gen != NULL);
2594
2595 #ifdef CONFIG_SOFTMMU
2596 {
2597 TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop & ~MO_SIGN, idx));
2598 gen(ret, tcg_ctx.tcg_env, addr, val, oi);
2599 tcg_temp_free_i32(oi);
2600 }
2601 #else
2602 gen(ret, tcg_ctx.tcg_env, addr, val);
2603 #endif
2604 #else
2605 gen_helper_exit_atomic(tcg_ctx.tcg_env);
2606 #endif /* CONFIG_ATOMIC64 */
2607 } else {
2608 TCGv_i32 v32 = tcg_temp_new_i32();
2609 TCGv_i32 r32 = tcg_temp_new_i32();
2610
2611 tcg_gen_extrl_i64_i32(v32, val);
2612 do_atomic_op_i32(r32, addr, v32, idx, memop & ~MO_SIGN, table);
2613 tcg_temp_free_i32(v32);
2614
2615 tcg_gen_extu_i32_i64(ret, r32);
2616 tcg_temp_free_i32(r32);
2617
2618 if (memop & MO_SIGN) {
2619 tcg_gen_ext_i64(ret, ret, memop);
2620 }
2621 }
2622 }
2623
2624 #define GEN_ATOMIC_HELPER(NAME, OP, NEW) \
2625 static void * const table_##NAME[16] = { \
2626 [MO_8] = gen_helper_atomic_##NAME##b, \
2627 [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \
2628 [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \
2629 [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le, \
2630 [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be, \
2631 WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le) \
2632 WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \
2633 }; \
2634 void tcg_gen_atomic_##NAME##_i32 \
2635 (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, TCGMemOp memop) \
2636 { \
2637 if (parallel_cpus) { \
2638 do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \
2639 } else { \
2640 do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \
2641 tcg_gen_##OP##_i32); \
2642 } \
2643 } \
2644 void tcg_gen_atomic_##NAME##_i64 \
2645 (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, TCGMemOp memop) \
2646 { \
2647 if (parallel_cpus) { \
2648 do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \
2649 } else { \
2650 do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \
2651 tcg_gen_##OP##_i64); \
2652 } \
2653 }
2654
2655 GEN_ATOMIC_HELPER(fetch_add, add, 0)
2656 GEN_ATOMIC_HELPER(fetch_and, and, 0)
2657 GEN_ATOMIC_HELPER(fetch_or, or, 0)
2658 GEN_ATOMIC_HELPER(fetch_xor, xor, 0)
2659
2660 GEN_ATOMIC_HELPER(add_fetch, add, 1)
2661 GEN_ATOMIC_HELPER(and_fetch, and, 1)
2662 GEN_ATOMIC_HELPER(or_fetch, or, 1)
2663 GEN_ATOMIC_HELPER(xor_fetch, xor, 1)
2664
2665 static void tcg_gen_mov2_i32(TCGv_i32 r, TCGv_i32 a, TCGv_i32 b)
2666 {
2667 tcg_gen_mov_i32(r, b);
2668 }
2669
2670 static void tcg_gen_mov2_i64(TCGv_i64 r, TCGv_i64 a, TCGv_i64 b)
2671 {
2672 tcg_gen_mov_i64(r, b);
2673 }
2674
2675 GEN_ATOMIC_HELPER(xchg, mov2, 0)
2676
2677 #undef GEN_ATOMIC_HELPER