]> git.proxmox.com Git - mirror_qemu.git/blob - target/hexagon/genptr.c
Hexagon (target/hexagon) Add overrides for jumpr31 instructions
[mirror_qemu.git] / target / hexagon / genptr.c
1 /*
2 * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include "qemu/osdep.h"
19 #include "cpu.h"
20 #include "internal.h"
21 #include "tcg/tcg-op.h"
22 #include "tcg/tcg-op-gvec.h"
23 #include "insn.h"
24 #include "opcodes.h"
25 #include "translate.h"
26 #define QEMU_GENERATE /* Used internally by macros.h */
27 #include "macros.h"
28 #include "mmvec/macros.h"
29 #undef QEMU_GENERATE
30 #include "gen_tcg.h"
31 #include "gen_tcg_hvx.h"
32 #include "genptr.h"
33
34 TCGv gen_read_reg(TCGv result, int num)
35 {
36 tcg_gen_mov_tl(result, hex_gpr[num]);
37 return result;
38 }
39
40 TCGv gen_read_preg(TCGv pred, uint8_t num)
41 {
42 tcg_gen_mov_tl(pred, hex_pred[num]);
43 return pred;
44 }
45
46 #define IMMUTABLE (~0)
47
48 static const target_ulong reg_immut_masks[TOTAL_PER_THREAD_REGS] = {
49 [HEX_REG_USR] = 0xc13000c0,
50 [HEX_REG_PC] = IMMUTABLE,
51 [HEX_REG_GP] = 0x3f,
52 [HEX_REG_UPCYCLELO] = IMMUTABLE,
53 [HEX_REG_UPCYCLEHI] = IMMUTABLE,
54 [HEX_REG_UTIMERLO] = IMMUTABLE,
55 [HEX_REG_UTIMERHI] = IMMUTABLE,
56 };
57
58 static inline void gen_masked_reg_write(TCGv new_val, TCGv cur_val,
59 target_ulong reg_mask)
60 {
61 if (reg_mask) {
62 TCGv tmp = tcg_temp_new();
63
64 /* new_val = (new_val & ~reg_mask) | (cur_val & reg_mask) */
65 tcg_gen_andi_tl(new_val, new_val, ~reg_mask);
66 tcg_gen_andi_tl(tmp, cur_val, reg_mask);
67 tcg_gen_or_tl(new_val, new_val, tmp);
68 }
69 }
70
71 static inline void gen_log_predicated_reg_write(int rnum, TCGv val,
72 uint32_t slot)
73 {
74 TCGv zero = tcg_constant_tl(0);
75 TCGv slot_mask = tcg_temp_new();
76
77 tcg_gen_andi_tl(slot_mask, hex_slot_cancelled, 1 << slot);
78 tcg_gen_movcond_tl(TCG_COND_EQ, hex_new_value[rnum], slot_mask, zero,
79 val, hex_new_value[rnum]);
80 if (HEX_DEBUG) {
81 /*
82 * Do this so HELPER(debug_commit_end) will know
83 *
84 * Note that slot_mask indicates the value is not written
85 * (i.e., slot was cancelled), so we create a true/false value before
86 * or'ing with hex_reg_written[rnum].
87 */
88 tcg_gen_setcond_tl(TCG_COND_EQ, slot_mask, slot_mask, zero);
89 tcg_gen_or_tl(hex_reg_written[rnum], hex_reg_written[rnum], slot_mask);
90 }
91 }
92
93 void gen_log_reg_write(int rnum, TCGv val)
94 {
95 const target_ulong reg_mask = reg_immut_masks[rnum];
96
97 gen_masked_reg_write(val, hex_gpr[rnum], reg_mask);
98 tcg_gen_mov_tl(hex_new_value[rnum], val);
99 if (HEX_DEBUG) {
100 /* Do this so HELPER(debug_commit_end) will know */
101 tcg_gen_movi_tl(hex_reg_written[rnum], 1);
102 }
103 }
104
105 static void gen_log_predicated_reg_write_pair(int rnum, TCGv_i64 val,
106 uint32_t slot)
107 {
108 TCGv val32 = tcg_temp_new();
109 TCGv zero = tcg_constant_tl(0);
110 TCGv slot_mask = tcg_temp_new();
111
112 tcg_gen_andi_tl(slot_mask, hex_slot_cancelled, 1 << slot);
113 /* Low word */
114 tcg_gen_extrl_i64_i32(val32, val);
115 tcg_gen_movcond_tl(TCG_COND_EQ, hex_new_value[rnum],
116 slot_mask, zero,
117 val32, hex_new_value[rnum]);
118 /* High word */
119 tcg_gen_extrh_i64_i32(val32, val);
120 tcg_gen_movcond_tl(TCG_COND_EQ, hex_new_value[rnum + 1],
121 slot_mask, zero,
122 val32, hex_new_value[rnum + 1]);
123 if (HEX_DEBUG) {
124 /*
125 * Do this so HELPER(debug_commit_end) will know
126 *
127 * Note that slot_mask indicates the value is not written
128 * (i.e., slot was cancelled), so we create a true/false value before
129 * or'ing with hex_reg_written[rnum].
130 */
131 tcg_gen_setcond_tl(TCG_COND_EQ, slot_mask, slot_mask, zero);
132 tcg_gen_or_tl(hex_reg_written[rnum], hex_reg_written[rnum], slot_mask);
133 tcg_gen_or_tl(hex_reg_written[rnum + 1], hex_reg_written[rnum + 1],
134 slot_mask);
135 }
136 }
137
138 static void gen_log_reg_write_pair(int rnum, TCGv_i64 val)
139 {
140 const target_ulong reg_mask_low = reg_immut_masks[rnum];
141 const target_ulong reg_mask_high = reg_immut_masks[rnum + 1];
142 TCGv val32 = tcg_temp_new();
143
144 /* Low word */
145 tcg_gen_extrl_i64_i32(val32, val);
146 gen_masked_reg_write(val32, hex_gpr[rnum], reg_mask_low);
147 tcg_gen_mov_tl(hex_new_value[rnum], val32);
148 if (HEX_DEBUG) {
149 /* Do this so HELPER(debug_commit_end) will know */
150 tcg_gen_movi_tl(hex_reg_written[rnum], 1);
151 }
152
153 /* High word */
154 tcg_gen_extrh_i64_i32(val32, val);
155 gen_masked_reg_write(val32, hex_gpr[rnum + 1], reg_mask_high);
156 tcg_gen_mov_tl(hex_new_value[rnum + 1], val32);
157 if (HEX_DEBUG) {
158 /* Do this so HELPER(debug_commit_end) will know */
159 tcg_gen_movi_tl(hex_reg_written[rnum + 1], 1);
160 }
161 }
162
163 void gen_log_pred_write(DisasContext *ctx, int pnum, TCGv val)
164 {
165 TCGv base_val = tcg_temp_new();
166
167 tcg_gen_andi_tl(base_val, val, 0xff);
168
169 /*
170 * Section 6.1.3 of the Hexagon V67 Programmer's Reference Manual
171 *
172 * Multiple writes to the same preg are and'ed together
173 * If this is the first predicate write in the packet, do a
174 * straight assignment. Otherwise, do an and.
175 */
176 if (!test_bit(pnum, ctx->pregs_written)) {
177 tcg_gen_mov_tl(hex_new_pred_value[pnum], base_val);
178 } else {
179 tcg_gen_and_tl(hex_new_pred_value[pnum],
180 hex_new_pred_value[pnum], base_val);
181 }
182 tcg_gen_ori_tl(hex_pred_written, hex_pred_written, 1 << pnum);
183 }
184
185 static inline void gen_read_p3_0(TCGv control_reg)
186 {
187 tcg_gen_movi_tl(control_reg, 0);
188 for (int i = 0; i < NUM_PREGS; i++) {
189 tcg_gen_deposit_tl(control_reg, control_reg, hex_pred[i], i * 8, 8);
190 }
191 }
192
193 /*
194 * Certain control registers require special handling on read
195 * HEX_REG_P3_0_ALIASED aliased to the predicate registers
196 * -> concat the 4 predicate registers together
197 * HEX_REG_PC actual value stored in DisasContext
198 * -> assign from ctx->base.pc_next
199 * HEX_REG_QEMU_*_CNT changes in current TB in DisasContext
200 * -> add current TB changes to existing reg value
201 */
202 static inline void gen_read_ctrl_reg(DisasContext *ctx, const int reg_num,
203 TCGv dest)
204 {
205 if (reg_num == HEX_REG_P3_0_ALIASED) {
206 gen_read_p3_0(dest);
207 } else if (reg_num == HEX_REG_PC) {
208 tcg_gen_movi_tl(dest, ctx->base.pc_next);
209 } else if (reg_num == HEX_REG_QEMU_PKT_CNT) {
210 tcg_gen_addi_tl(dest, hex_gpr[HEX_REG_QEMU_PKT_CNT],
211 ctx->num_packets);
212 } else if (reg_num == HEX_REG_QEMU_INSN_CNT) {
213 tcg_gen_addi_tl(dest, hex_gpr[HEX_REG_QEMU_INSN_CNT],
214 ctx->num_insns);
215 } else if (reg_num == HEX_REG_QEMU_HVX_CNT) {
216 tcg_gen_addi_tl(dest, hex_gpr[HEX_REG_QEMU_HVX_CNT],
217 ctx->num_hvx_insns);
218 } else {
219 tcg_gen_mov_tl(dest, hex_gpr[reg_num]);
220 }
221 }
222
223 static inline void gen_read_ctrl_reg_pair(DisasContext *ctx, const int reg_num,
224 TCGv_i64 dest)
225 {
226 if (reg_num == HEX_REG_P3_0_ALIASED) {
227 TCGv p3_0 = tcg_temp_new();
228 gen_read_p3_0(p3_0);
229 tcg_gen_concat_i32_i64(dest, p3_0, hex_gpr[reg_num + 1]);
230 } else if (reg_num == HEX_REG_PC - 1) {
231 TCGv pc = tcg_constant_tl(ctx->base.pc_next);
232 tcg_gen_concat_i32_i64(dest, hex_gpr[reg_num], pc);
233 } else if (reg_num == HEX_REG_QEMU_PKT_CNT) {
234 TCGv pkt_cnt = tcg_temp_new();
235 TCGv insn_cnt = tcg_temp_new();
236 tcg_gen_addi_tl(pkt_cnt, hex_gpr[HEX_REG_QEMU_PKT_CNT],
237 ctx->num_packets);
238 tcg_gen_addi_tl(insn_cnt, hex_gpr[HEX_REG_QEMU_INSN_CNT],
239 ctx->num_insns);
240 tcg_gen_concat_i32_i64(dest, pkt_cnt, insn_cnt);
241 } else if (reg_num == HEX_REG_QEMU_HVX_CNT) {
242 TCGv hvx_cnt = tcg_temp_new();
243 tcg_gen_addi_tl(hvx_cnt, hex_gpr[HEX_REG_QEMU_HVX_CNT],
244 ctx->num_hvx_insns);
245 tcg_gen_concat_i32_i64(dest, hvx_cnt, hex_gpr[reg_num + 1]);
246 } else {
247 tcg_gen_concat_i32_i64(dest,
248 hex_gpr[reg_num],
249 hex_gpr[reg_num + 1]);
250 }
251 }
252
253 static void gen_write_p3_0(DisasContext *ctx, TCGv control_reg)
254 {
255 TCGv hex_p8 = tcg_temp_new();
256 for (int i = 0; i < NUM_PREGS; i++) {
257 tcg_gen_extract_tl(hex_p8, control_reg, i * 8, 8);
258 gen_log_pred_write(ctx, i, hex_p8);
259 ctx_log_pred_write(ctx, i);
260 }
261 }
262
263 /*
264 * Certain control registers require special handling on write
265 * HEX_REG_P3_0_ALIASED aliased to the predicate registers
266 * -> break the value across 4 predicate registers
267 * HEX_REG_QEMU_*_CNT changes in current TB in DisasContext
268 * -> clear the changes
269 */
270 static inline void gen_write_ctrl_reg(DisasContext *ctx, int reg_num,
271 TCGv val)
272 {
273 if (reg_num == HEX_REG_P3_0_ALIASED) {
274 gen_write_p3_0(ctx, val);
275 } else {
276 gen_log_reg_write(reg_num, val);
277 ctx_log_reg_write(ctx, reg_num);
278 if (reg_num == HEX_REG_QEMU_PKT_CNT) {
279 ctx->num_packets = 0;
280 }
281 if (reg_num == HEX_REG_QEMU_INSN_CNT) {
282 ctx->num_insns = 0;
283 }
284 if (reg_num == HEX_REG_QEMU_HVX_CNT) {
285 ctx->num_hvx_insns = 0;
286 }
287 }
288 }
289
290 static inline void gen_write_ctrl_reg_pair(DisasContext *ctx, int reg_num,
291 TCGv_i64 val)
292 {
293 if (reg_num == HEX_REG_P3_0_ALIASED) {
294 TCGv val32 = tcg_temp_new();
295 tcg_gen_extrl_i64_i32(val32, val);
296 gen_write_p3_0(ctx, val32);
297 tcg_gen_extrh_i64_i32(val32, val);
298 gen_log_reg_write(reg_num + 1, val32);
299 ctx_log_reg_write(ctx, reg_num + 1);
300 } else {
301 gen_log_reg_write_pair(reg_num, val);
302 ctx_log_reg_write_pair(ctx, reg_num);
303 if (reg_num == HEX_REG_QEMU_PKT_CNT) {
304 ctx->num_packets = 0;
305 ctx->num_insns = 0;
306 }
307 if (reg_num == HEX_REG_QEMU_HVX_CNT) {
308 ctx->num_hvx_insns = 0;
309 }
310 }
311 }
312
313 TCGv gen_get_byte(TCGv result, int N, TCGv src, bool sign)
314 {
315 if (sign) {
316 tcg_gen_sextract_tl(result, src, N * 8, 8);
317 } else {
318 tcg_gen_extract_tl(result, src, N * 8, 8);
319 }
320 return result;
321 }
322
323 TCGv gen_get_byte_i64(TCGv result, int N, TCGv_i64 src, bool sign)
324 {
325 TCGv_i64 res64 = tcg_temp_new_i64();
326 if (sign) {
327 tcg_gen_sextract_i64(res64, src, N * 8, 8);
328 } else {
329 tcg_gen_extract_i64(res64, src, N * 8, 8);
330 }
331 tcg_gen_extrl_i64_i32(result, res64);
332
333 return result;
334 }
335
336 TCGv gen_get_half(TCGv result, int N, TCGv src, bool sign)
337 {
338 if (sign) {
339 tcg_gen_sextract_tl(result, src, N * 16, 16);
340 } else {
341 tcg_gen_extract_tl(result, src, N * 16, 16);
342 }
343 return result;
344 }
345
346 void gen_set_half(int N, TCGv result, TCGv src)
347 {
348 tcg_gen_deposit_tl(result, result, src, N * 16, 16);
349 }
350
351 void gen_set_half_i64(int N, TCGv_i64 result, TCGv src)
352 {
353 TCGv_i64 src64 = tcg_temp_new_i64();
354 tcg_gen_extu_i32_i64(src64, src);
355 tcg_gen_deposit_i64(result, result, src64, N * 16, 16);
356 }
357
358 void gen_set_byte_i64(int N, TCGv_i64 result, TCGv src)
359 {
360 TCGv_i64 src64 = tcg_temp_new_i64();
361 tcg_gen_extu_i32_i64(src64, src);
362 tcg_gen_deposit_i64(result, result, src64, N * 8, 8);
363 }
364
365 static inline void gen_load_locked4u(TCGv dest, TCGv vaddr, int mem_index)
366 {
367 tcg_gen_qemu_ld32u(dest, vaddr, mem_index);
368 tcg_gen_mov_tl(hex_llsc_addr, vaddr);
369 tcg_gen_mov_tl(hex_llsc_val, dest);
370 }
371
372 static inline void gen_load_locked8u(TCGv_i64 dest, TCGv vaddr, int mem_index)
373 {
374 tcg_gen_qemu_ld64(dest, vaddr, mem_index);
375 tcg_gen_mov_tl(hex_llsc_addr, vaddr);
376 tcg_gen_mov_i64(hex_llsc_val_i64, dest);
377 }
378
379 static inline void gen_store_conditional4(DisasContext *ctx,
380 TCGv pred, TCGv vaddr, TCGv src)
381 {
382 TCGLabel *fail = gen_new_label();
383 TCGLabel *done = gen_new_label();
384 TCGv one, zero, tmp;
385
386 tcg_gen_brcond_tl(TCG_COND_NE, vaddr, hex_llsc_addr, fail);
387
388 one = tcg_constant_tl(0xff);
389 zero = tcg_constant_tl(0);
390 tmp = tcg_temp_new();
391 tcg_gen_atomic_cmpxchg_tl(tmp, hex_llsc_addr, hex_llsc_val, src,
392 ctx->mem_idx, MO_32);
393 tcg_gen_movcond_tl(TCG_COND_EQ, pred, tmp, hex_llsc_val,
394 one, zero);
395 tcg_gen_br(done);
396
397 gen_set_label(fail);
398 tcg_gen_movi_tl(pred, 0);
399
400 gen_set_label(done);
401 tcg_gen_movi_tl(hex_llsc_addr, ~0);
402 }
403
404 static inline void gen_store_conditional8(DisasContext *ctx,
405 TCGv pred, TCGv vaddr, TCGv_i64 src)
406 {
407 TCGLabel *fail = gen_new_label();
408 TCGLabel *done = gen_new_label();
409 TCGv_i64 one, zero, tmp;
410
411 tcg_gen_brcond_tl(TCG_COND_NE, vaddr, hex_llsc_addr, fail);
412
413 one = tcg_constant_i64(0xff);
414 zero = tcg_constant_i64(0);
415 tmp = tcg_temp_new_i64();
416 tcg_gen_atomic_cmpxchg_i64(tmp, hex_llsc_addr, hex_llsc_val_i64, src,
417 ctx->mem_idx, MO_64);
418 tcg_gen_movcond_i64(TCG_COND_EQ, tmp, tmp, hex_llsc_val_i64,
419 one, zero);
420 tcg_gen_extrl_i64_i32(pred, tmp);
421 tcg_gen_br(done);
422
423 gen_set_label(fail);
424 tcg_gen_movi_tl(pred, 0);
425
426 gen_set_label(done);
427 tcg_gen_movi_tl(hex_llsc_addr, ~0);
428 }
429
430 void gen_store32(TCGv vaddr, TCGv src, int width, uint32_t slot)
431 {
432 tcg_gen_mov_tl(hex_store_addr[slot], vaddr);
433 tcg_gen_movi_tl(hex_store_width[slot], width);
434 tcg_gen_mov_tl(hex_store_val32[slot], src);
435 }
436
437 void gen_store1(TCGv_env cpu_env, TCGv vaddr, TCGv src, uint32_t slot)
438 {
439 gen_store32(vaddr, src, 1, slot);
440 }
441
442 void gen_store1i(TCGv_env cpu_env, TCGv vaddr, int32_t src, uint32_t slot)
443 {
444 TCGv tmp = tcg_constant_tl(src);
445 gen_store1(cpu_env, vaddr, tmp, slot);
446 }
447
448 void gen_store2(TCGv_env cpu_env, TCGv vaddr, TCGv src, uint32_t slot)
449 {
450 gen_store32(vaddr, src, 2, slot);
451 }
452
453 void gen_store2i(TCGv_env cpu_env, TCGv vaddr, int32_t src, uint32_t slot)
454 {
455 TCGv tmp = tcg_constant_tl(src);
456 gen_store2(cpu_env, vaddr, tmp, slot);
457 }
458
459 void gen_store4(TCGv_env cpu_env, TCGv vaddr, TCGv src, uint32_t slot)
460 {
461 gen_store32(vaddr, src, 4, slot);
462 }
463
464 void gen_store4i(TCGv_env cpu_env, TCGv vaddr, int32_t src, uint32_t slot)
465 {
466 TCGv tmp = tcg_constant_tl(src);
467 gen_store4(cpu_env, vaddr, tmp, slot);
468 }
469
470 void gen_store8(TCGv_env cpu_env, TCGv vaddr, TCGv_i64 src, uint32_t slot)
471 {
472 tcg_gen_mov_tl(hex_store_addr[slot], vaddr);
473 tcg_gen_movi_tl(hex_store_width[slot], 8);
474 tcg_gen_mov_i64(hex_store_val64[slot], src);
475 }
476
477 void gen_store8i(TCGv_env cpu_env, TCGv vaddr, int64_t src, uint32_t slot)
478 {
479 TCGv_i64 tmp = tcg_constant_i64(src);
480 gen_store8(cpu_env, vaddr, tmp, slot);
481 }
482
483 TCGv gen_8bitsof(TCGv result, TCGv value)
484 {
485 TCGv zero = tcg_constant_tl(0);
486 TCGv ones = tcg_constant_tl(0xff);
487 tcg_gen_movcond_tl(TCG_COND_NE, result, value, zero, ones, zero);
488
489 return result;
490 }
491
492 static void gen_write_new_pc_addr(DisasContext *ctx, TCGv addr,
493 TCGCond cond, TCGv pred)
494 {
495 TCGLabel *pred_false = NULL;
496 if (cond != TCG_COND_ALWAYS) {
497 pred_false = gen_new_label();
498 tcg_gen_brcondi_tl(cond, pred, 0, pred_false);
499 }
500
501 if (ctx->pkt->pkt_has_multi_cof) {
502 /* If there are multiple branches in a packet, ignore the second one */
503 tcg_gen_movcond_tl(TCG_COND_NE, hex_gpr[HEX_REG_PC],
504 hex_branch_taken, tcg_constant_tl(0),
505 hex_gpr[HEX_REG_PC], addr);
506 tcg_gen_movi_tl(hex_branch_taken, 1);
507 } else {
508 tcg_gen_mov_tl(hex_gpr[HEX_REG_PC], addr);
509 }
510
511 if (cond != TCG_COND_ALWAYS) {
512 gen_set_label(pred_false);
513 }
514 }
515
516 static void gen_write_new_pc_pcrel(DisasContext *ctx, int pc_off,
517 TCGCond cond, TCGv pred)
518 {
519 target_ulong dest = ctx->pkt->pc + pc_off;
520 if (ctx->pkt->pkt_has_multi_cof) {
521 gen_write_new_pc_addr(ctx, tcg_constant_tl(dest), cond, pred);
522 } else {
523 /* Defer this jump to the end of the TB */
524 ctx->branch_cond = TCG_COND_ALWAYS;
525 if (pred != NULL) {
526 ctx->branch_cond = cond;
527 tcg_gen_mov_tl(hex_branch_taken, pred);
528 }
529 ctx->branch_dest = dest;
530 }
531 }
532
533 void gen_set_usr_field(int field, TCGv val)
534 {
535 tcg_gen_deposit_tl(hex_new_value[HEX_REG_USR], hex_new_value[HEX_REG_USR],
536 val,
537 reg_field_info[field].offset,
538 reg_field_info[field].width);
539 }
540
541 void gen_set_usr_fieldi(int field, int x)
542 {
543 if (reg_field_info[field].width == 1) {
544 target_ulong bit = 1 << reg_field_info[field].offset;
545 if ((x & 1) == 1) {
546 tcg_gen_ori_tl(hex_new_value[HEX_REG_USR],
547 hex_new_value[HEX_REG_USR],
548 bit);
549 } else {
550 tcg_gen_andi_tl(hex_new_value[HEX_REG_USR],
551 hex_new_value[HEX_REG_USR],
552 ~bit);
553 }
554 } else {
555 TCGv val = tcg_constant_tl(x);
556 gen_set_usr_field(field, val);
557 }
558 }
559
560 static void gen_compare(TCGCond cond, TCGv res, TCGv arg1, TCGv arg2)
561 {
562 TCGv one = tcg_constant_tl(0xff);
563 TCGv zero = tcg_constant_tl(0);
564
565 tcg_gen_movcond_tl(cond, res, arg1, arg2, one, zero);
566 }
567
568 static void gen_cond_jumpr(DisasContext *ctx, TCGv dst_pc,
569 TCGCond cond, TCGv pred)
570 {
571 gen_write_new_pc_addr(ctx, dst_pc, cond, pred);
572 }
573
574 static void gen_cond_jumpr31(DisasContext *ctx, TCGCond cond, TCGv pred)
575 {
576 TCGv LSB = tcg_temp_new();
577 tcg_gen_andi_tl(LSB, pred, 1);
578 gen_cond_jumpr(ctx, hex_gpr[HEX_REG_LR], cond, LSB);
579 }
580
581 static void gen_cond_jump(DisasContext *ctx, TCGCond cond, TCGv pred,
582 int pc_off)
583 {
584 gen_write_new_pc_pcrel(ctx, pc_off, cond, pred);
585 }
586
587 static void gen_cmpnd_cmp_jmp(DisasContext *ctx,
588 int pnum, TCGCond cond1, TCGv arg1, TCGv arg2,
589 TCGCond cond2, int pc_off)
590 {
591 if (ctx->insn->part1) {
592 TCGv pred = tcg_temp_new();
593 gen_compare(cond1, pred, arg1, arg2);
594 gen_log_pred_write(ctx, pnum, pred);
595 } else {
596 TCGv pred = tcg_temp_new();
597 tcg_gen_mov_tl(pred, hex_new_pred_value[pnum]);
598 gen_cond_jump(ctx, cond2, pred, pc_off);
599 }
600 }
601
602 static void gen_cmpnd_cmp_jmp_t(DisasContext *ctx,
603 int pnum, TCGCond cond, TCGv arg1, TCGv arg2,
604 int pc_off)
605 {
606 gen_cmpnd_cmp_jmp(ctx, pnum, cond, arg1, arg2, TCG_COND_EQ, pc_off);
607 }
608
609 static void gen_cmpnd_cmp_jmp_f(DisasContext *ctx,
610 int pnum, TCGCond cond, TCGv arg1, TCGv arg2,
611 int pc_off)
612 {
613 gen_cmpnd_cmp_jmp(ctx, pnum, cond, arg1, arg2, TCG_COND_NE, pc_off);
614 }
615
616 static void gen_cmpnd_cmpi_jmp_t(DisasContext *ctx,
617 int pnum, TCGCond cond, TCGv arg1, int arg2,
618 int pc_off)
619 {
620 TCGv tmp = tcg_constant_tl(arg2);
621 gen_cmpnd_cmp_jmp(ctx, pnum, cond, arg1, tmp, TCG_COND_EQ, pc_off);
622 }
623
624 static void gen_cmpnd_cmpi_jmp_f(DisasContext *ctx,
625 int pnum, TCGCond cond, TCGv arg1, int arg2,
626 int pc_off)
627 {
628 TCGv tmp = tcg_constant_tl(arg2);
629 gen_cmpnd_cmp_jmp(ctx, pnum, cond, arg1, tmp, TCG_COND_NE, pc_off);
630 }
631
632 static void gen_cmpnd_cmp_n1_jmp_t(DisasContext *ctx, int pnum, TCGCond cond,
633 TCGv arg, int pc_off)
634 {
635 gen_cmpnd_cmpi_jmp_t(ctx, pnum, cond, arg, -1, pc_off);
636 }
637
638 static void gen_cmpnd_cmp_n1_jmp_f(DisasContext *ctx, int pnum, TCGCond cond,
639 TCGv arg, int pc_off)
640 {
641 gen_cmpnd_cmpi_jmp_f(ctx, pnum, cond, arg, -1, pc_off);
642 }
643
644 static void gen_cmpnd_tstbit0_jmp(DisasContext *ctx,
645 int pnum, TCGv arg, TCGCond cond, int pc_off)
646 {
647 if (ctx->insn->part1) {
648 TCGv pred = tcg_temp_new();
649 tcg_gen_andi_tl(pred, arg, 1);
650 gen_8bitsof(pred, pred);
651 gen_log_pred_write(ctx, pnum, pred);
652 } else {
653 TCGv pred = tcg_temp_new();
654 tcg_gen_mov_tl(pred, hex_new_pred_value[pnum]);
655 gen_cond_jump(ctx, cond, pred, pc_off);
656 }
657 }
658
659 static void gen_testbit0_jumpnv(DisasContext *ctx,
660 TCGv arg, TCGCond cond, int pc_off)
661 {
662 TCGv pred = tcg_temp_new();
663 tcg_gen_andi_tl(pred, arg, 1);
664 gen_cond_jump(ctx, cond, pred, pc_off);
665 }
666
667 static void gen_jump(DisasContext *ctx, int pc_off)
668 {
669 gen_write_new_pc_pcrel(ctx, pc_off, TCG_COND_ALWAYS, NULL);
670 }
671
672 static void gen_jumpr(DisasContext *ctx, TCGv new_pc)
673 {
674 gen_write_new_pc_addr(ctx, new_pc, TCG_COND_ALWAYS, NULL);
675 }
676
677 static void gen_call(DisasContext *ctx, int pc_off)
678 {
679 TCGv next_PC =
680 tcg_constant_tl(ctx->pkt->pc + ctx->pkt->encod_pkt_size_in_bytes);
681 gen_log_reg_write(HEX_REG_LR, next_PC);
682 gen_write_new_pc_pcrel(ctx, pc_off, TCG_COND_ALWAYS, NULL);
683 }
684
685 static void gen_cond_call(DisasContext *ctx, TCGv pred,
686 TCGCond cond, int pc_off)
687 {
688 TCGv next_PC;
689 TCGv lsb = tcg_temp_new();
690 TCGLabel *skip = gen_new_label();
691 tcg_gen_andi_tl(lsb, pred, 1);
692 gen_write_new_pc_pcrel(ctx, pc_off, cond, lsb);
693 tcg_gen_brcondi_tl(cond, lsb, 0, skip);
694 next_PC =
695 tcg_constant_tl(ctx->pkt->pc + ctx->pkt->encod_pkt_size_in_bytes);
696 gen_log_reg_write(HEX_REG_LR, next_PC);
697 gen_set_label(skip);
698 }
699
700 static void gen_endloop0(DisasContext *ctx)
701 {
702 TCGv lpcfg = tcg_temp_new();
703
704 GET_USR_FIELD(USR_LPCFG, lpcfg);
705
706 /*
707 * if (lpcfg == 1) {
708 * hex_new_pred_value[3] = 0xff;
709 * hex_pred_written |= 1 << 3;
710 * }
711 */
712 TCGLabel *label1 = gen_new_label();
713 tcg_gen_brcondi_tl(TCG_COND_NE, lpcfg, 1, label1);
714 {
715 tcg_gen_movi_tl(hex_new_pred_value[3], 0xff);
716 tcg_gen_ori_tl(hex_pred_written, hex_pred_written, 1 << 3);
717 }
718 gen_set_label(label1);
719
720 /*
721 * if (lpcfg) {
722 * SET_USR_FIELD(USR_LPCFG, lpcfg - 1);
723 * }
724 */
725 TCGLabel *label2 = gen_new_label();
726 tcg_gen_brcondi_tl(TCG_COND_EQ, lpcfg, 0, label2);
727 {
728 tcg_gen_subi_tl(lpcfg, lpcfg, 1);
729 SET_USR_FIELD(USR_LPCFG, lpcfg);
730 }
731 gen_set_label(label2);
732
733 /*
734 * If we're in a tight loop, we'll do this at the end of the TB to take
735 * advantage of direct block chaining.
736 */
737 if (!ctx->is_tight_loop) {
738 /*
739 * if (hex_gpr[HEX_REG_LC0] > 1) {
740 * PC = hex_gpr[HEX_REG_SA0];
741 * hex_new_value[HEX_REG_LC0] = hex_gpr[HEX_REG_LC0] - 1;
742 * }
743 */
744 TCGLabel *label3 = gen_new_label();
745 tcg_gen_brcondi_tl(TCG_COND_LEU, hex_gpr[HEX_REG_LC0], 1, label3);
746 {
747 gen_jumpr(ctx, hex_gpr[HEX_REG_SA0]);
748 tcg_gen_subi_tl(hex_new_value[HEX_REG_LC0],
749 hex_gpr[HEX_REG_LC0], 1);
750 }
751 gen_set_label(label3);
752 }
753 }
754
755 static void gen_cmp_jumpnv(DisasContext *ctx,
756 TCGCond cond, TCGv val, TCGv src, int pc_off)
757 {
758 TCGv pred = tcg_temp_new();
759 tcg_gen_setcond_tl(cond, pred, val, src);
760 gen_cond_jump(ctx, TCG_COND_EQ, pred, pc_off);
761 }
762
763 static void gen_cmpi_jumpnv(DisasContext *ctx,
764 TCGCond cond, TCGv val, int src, int pc_off)
765 {
766 TCGv pred = tcg_temp_new();
767 tcg_gen_setcondi_tl(cond, pred, val, src);
768 gen_cond_jump(ctx, TCG_COND_EQ, pred, pc_off);
769 }
770
771 /* Shift left with saturation */
772 static void gen_shl_sat(TCGv dst, TCGv src, TCGv shift_amt)
773 {
774 TCGv sh32 = tcg_temp_new();
775 TCGv dst_sar = tcg_temp_new();
776 TCGv ovf = tcg_temp_new();
777 TCGv satval = tcg_temp_new();
778 TCGv min = tcg_constant_tl(0x80000000);
779 TCGv max = tcg_constant_tl(0x7fffffff);
780
781 /*
782 * Possible values for shift_amt are 0 .. 64
783 * We need special handling for values above 31
784 *
785 * sh32 = shift & 31;
786 * dst = sh32 == shift ? src : 0;
787 * dst <<= sh32;
788 * dst_sar = dst >> sh32;
789 * satval = src < 0 ? min : max;
790 * if (dst_asr != src) {
791 * usr.OVF |= 1;
792 * dst = satval;
793 * }
794 */
795
796 tcg_gen_andi_tl(sh32, shift_amt, 31);
797 tcg_gen_movcond_tl(TCG_COND_EQ, dst, sh32, shift_amt,
798 src, tcg_constant_tl(0));
799 tcg_gen_shl_tl(dst, dst, sh32);
800 tcg_gen_sar_tl(dst_sar, dst, sh32);
801 tcg_gen_movcond_tl(TCG_COND_LT, satval, src, tcg_constant_tl(0), min, max);
802
803 tcg_gen_setcond_tl(TCG_COND_NE, ovf, dst_sar, src);
804 tcg_gen_shli_tl(ovf, ovf, reg_field_info[USR_OVF].offset);
805 tcg_gen_or_tl(hex_new_value[HEX_REG_USR], hex_new_value[HEX_REG_USR], ovf);
806
807 tcg_gen_movcond_tl(TCG_COND_EQ, dst, dst_sar, src, dst, satval);
808 }
809
810 static void gen_sar(TCGv dst, TCGv src, TCGv shift_amt)
811 {
812 /*
813 * Shift arithmetic right
814 * Robust when shift_amt is >31 bits
815 */
816 TCGv tmp = tcg_temp_new();
817 tcg_gen_umin_tl(tmp, shift_amt, tcg_constant_tl(31));
818 tcg_gen_sar_tl(dst, src, tmp);
819 }
820
821 /* Bidirectional shift right with saturation */
822 static void gen_asr_r_r_sat(TCGv RdV, TCGv RsV, TCGv RtV)
823 {
824 TCGv shift_amt = tcg_temp_new();
825 TCGLabel *positive = gen_new_label();
826 TCGLabel *done = gen_new_label();
827
828 tcg_gen_sextract_i32(shift_amt, RtV, 0, 7);
829 tcg_gen_brcondi_tl(TCG_COND_GE, shift_amt, 0, positive);
830
831 /* Negative shift amount => shift left */
832 tcg_gen_neg_tl(shift_amt, shift_amt);
833 gen_shl_sat(RdV, RsV, shift_amt);
834 tcg_gen_br(done);
835
836 gen_set_label(positive);
837 /* Positive shift amount => shift right */
838 gen_sar(RdV, RsV, shift_amt);
839
840 gen_set_label(done);
841 }
842
843 /* Bidirectional shift left with saturation */
844 static void gen_asl_r_r_sat(TCGv RdV, TCGv RsV, TCGv RtV)
845 {
846 TCGv shift_amt = tcg_temp_new();
847 TCGLabel *positive = gen_new_label();
848 TCGLabel *done = gen_new_label();
849
850 tcg_gen_sextract_i32(shift_amt, RtV, 0, 7);
851 tcg_gen_brcondi_tl(TCG_COND_GE, shift_amt, 0, positive);
852
853 /* Negative shift amount => shift right */
854 tcg_gen_neg_tl(shift_amt, shift_amt);
855 gen_sar(RdV, RsV, shift_amt);
856 tcg_gen_br(done);
857
858 gen_set_label(positive);
859 /* Positive shift amount => shift left */
860 gen_shl_sat(RdV, RsV, shift_amt);
861
862 gen_set_label(done);
863 }
864
865 static intptr_t vreg_src_off(DisasContext *ctx, int num)
866 {
867 intptr_t offset = offsetof(CPUHexagonState, VRegs[num]);
868
869 if (test_bit(num, ctx->vregs_select)) {
870 offset = ctx_future_vreg_off(ctx, num, 1, false);
871 }
872 if (test_bit(num, ctx->vregs_updated_tmp)) {
873 offset = ctx_tmp_vreg_off(ctx, num, 1, false);
874 }
875 return offset;
876 }
877
878 static void gen_log_vreg_write(DisasContext *ctx, intptr_t srcoff, int num,
879 VRegWriteType type, int slot_num,
880 bool is_predicated)
881 {
882 TCGLabel *label_end = NULL;
883 intptr_t dstoff;
884
885 if (is_predicated) {
886 TCGv cancelled = tcg_temp_new();
887 label_end = gen_new_label();
888
889 /* Don't do anything if the slot was cancelled */
890 tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1);
891 tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end);
892 }
893
894 if (type != EXT_TMP) {
895 dstoff = ctx_future_vreg_off(ctx, num, 1, true);
896 tcg_gen_gvec_mov(MO_64, dstoff, srcoff,
897 sizeof(MMVector), sizeof(MMVector));
898 tcg_gen_ori_tl(hex_VRegs_updated, hex_VRegs_updated, 1 << num);
899 } else {
900 dstoff = ctx_tmp_vreg_off(ctx, num, 1, false);
901 tcg_gen_gvec_mov(MO_64, dstoff, srcoff,
902 sizeof(MMVector), sizeof(MMVector));
903 }
904
905 if (is_predicated) {
906 gen_set_label(label_end);
907 }
908 }
909
910 static void gen_log_vreg_write_pair(DisasContext *ctx, intptr_t srcoff, int num,
911 VRegWriteType type, int slot_num,
912 bool is_predicated)
913 {
914 gen_log_vreg_write(ctx, srcoff, num ^ 0, type, slot_num, is_predicated);
915 srcoff += sizeof(MMVector);
916 gen_log_vreg_write(ctx, srcoff, num ^ 1, type, slot_num, is_predicated);
917 }
918
919 static void gen_log_qreg_write(intptr_t srcoff, int num, int vnew,
920 int slot_num, bool is_predicated)
921 {
922 TCGLabel *label_end = NULL;
923 intptr_t dstoff;
924
925 if (is_predicated) {
926 TCGv cancelled = tcg_temp_new();
927 label_end = gen_new_label();
928
929 /* Don't do anything if the slot was cancelled */
930 tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1);
931 tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end);
932 }
933
934 dstoff = offsetof(CPUHexagonState, future_QRegs[num]);
935 tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMQReg), sizeof(MMQReg));
936
937 if (is_predicated) {
938 tcg_gen_ori_tl(hex_QRegs_updated, hex_QRegs_updated, 1 << num);
939 gen_set_label(label_end);
940 }
941 }
942
943 static void gen_vreg_load(DisasContext *ctx, intptr_t dstoff, TCGv src,
944 bool aligned)
945 {
946 TCGv_i64 tmp = tcg_temp_new_i64();
947 if (aligned) {
948 tcg_gen_andi_tl(src, src, ~((int32_t)sizeof(MMVector) - 1));
949 }
950 for (int i = 0; i < sizeof(MMVector) / 8; i++) {
951 tcg_gen_qemu_ld64(tmp, src, ctx->mem_idx);
952 tcg_gen_addi_tl(src, src, 8);
953 tcg_gen_st_i64(tmp, cpu_env, dstoff + i * 8);
954 }
955 }
956
957 static void gen_vreg_store(DisasContext *ctx, TCGv EA, intptr_t srcoff,
958 int slot, bool aligned)
959 {
960 intptr_t dstoff = offsetof(CPUHexagonState, vstore[slot].data);
961 intptr_t maskoff = offsetof(CPUHexagonState, vstore[slot].mask);
962
963 if (is_gather_store_insn(ctx)) {
964 TCGv sl = tcg_constant_tl(slot);
965 gen_helper_gather_store(cpu_env, EA, sl);
966 return;
967 }
968
969 tcg_gen_movi_tl(hex_vstore_pending[slot], 1);
970 if (aligned) {
971 tcg_gen_andi_tl(hex_vstore_addr[slot], EA,
972 ~((int32_t)sizeof(MMVector) - 1));
973 } else {
974 tcg_gen_mov_tl(hex_vstore_addr[slot], EA);
975 }
976 tcg_gen_movi_tl(hex_vstore_size[slot], sizeof(MMVector));
977
978 /* Copy the data to the vstore buffer */
979 tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMVector), sizeof(MMVector));
980 /* Set the mask to all 1's */
981 tcg_gen_gvec_dup_imm(MO_64, maskoff, sizeof(MMQReg), sizeof(MMQReg), ~0LL);
982 }
983
984 static void gen_vreg_masked_store(DisasContext *ctx, TCGv EA, intptr_t srcoff,
985 intptr_t bitsoff, int slot, bool invert)
986 {
987 intptr_t dstoff = offsetof(CPUHexagonState, vstore[slot].data);
988 intptr_t maskoff = offsetof(CPUHexagonState, vstore[slot].mask);
989
990 tcg_gen_movi_tl(hex_vstore_pending[slot], 1);
991 tcg_gen_andi_tl(hex_vstore_addr[slot], EA,
992 ~((int32_t)sizeof(MMVector) - 1));
993 tcg_gen_movi_tl(hex_vstore_size[slot], sizeof(MMVector));
994
995 /* Copy the data to the vstore buffer */
996 tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMVector), sizeof(MMVector));
997 /* Copy the mask */
998 tcg_gen_gvec_mov(MO_64, maskoff, bitsoff, sizeof(MMQReg), sizeof(MMQReg));
999 if (invert) {
1000 tcg_gen_gvec_not(MO_64, maskoff, maskoff,
1001 sizeof(MMQReg), sizeof(MMQReg));
1002 }
1003 }
1004
1005 static void vec_to_qvec(size_t size, intptr_t dstoff, intptr_t srcoff)
1006 {
1007 TCGv_i64 tmp = tcg_temp_new_i64();
1008 TCGv_i64 word = tcg_temp_new_i64();
1009 TCGv_i64 bits = tcg_temp_new_i64();
1010 TCGv_i64 mask = tcg_temp_new_i64();
1011 TCGv_i64 zero = tcg_constant_i64(0);
1012 TCGv_i64 ones = tcg_constant_i64(~0);
1013
1014 for (int i = 0; i < sizeof(MMVector) / 8; i++) {
1015 tcg_gen_ld_i64(tmp, cpu_env, srcoff + i * 8);
1016 tcg_gen_movi_i64(mask, 0);
1017
1018 for (int j = 0; j < 8; j += size) {
1019 tcg_gen_extract_i64(word, tmp, j * 8, size * 8);
1020 tcg_gen_movcond_i64(TCG_COND_NE, bits, word, zero, ones, zero);
1021 tcg_gen_deposit_i64(mask, mask, bits, j, size);
1022 }
1023
1024 tcg_gen_st8_i64(mask, cpu_env, dstoff + i);
1025 }
1026 }
1027
1028 void probe_noshuf_load(TCGv va, int s, int mi)
1029 {
1030 TCGv size = tcg_constant_tl(s);
1031 TCGv mem_idx = tcg_constant_tl(mi);
1032 gen_helper_probe_noshuf_load(cpu_env, va, size, mem_idx);
1033 }
1034
1035 /*
1036 * Note: Since this function might branch, `val` is
1037 * required to be a `tcg_temp_local`.
1038 */
1039 void gen_set_usr_field_if(int field, TCGv val)
1040 {
1041 /* Sets the USR field if `val` is non-zero */
1042 if (reg_field_info[field].width == 1) {
1043 TCGv tmp = tcg_temp_new();
1044 tcg_gen_extract_tl(tmp, val, 0, reg_field_info[field].width);
1045 tcg_gen_shli_tl(tmp, tmp, reg_field_info[field].offset);
1046 tcg_gen_or_tl(hex_new_value[HEX_REG_USR],
1047 hex_new_value[HEX_REG_USR],
1048 tmp);
1049 } else {
1050 TCGLabel *skip_label = gen_new_label();
1051 tcg_gen_brcondi_tl(TCG_COND_EQ, val, 0, skip_label);
1052 gen_set_usr_field(field, val);
1053 gen_set_label(skip_label);
1054 }
1055 }
1056
1057 void gen_sat_i32(TCGv dest, TCGv source, int width)
1058 {
1059 TCGv max_val = tcg_constant_tl((1 << (width - 1)) - 1);
1060 TCGv min_val = tcg_constant_tl(-(1 << (width - 1)));
1061 tcg_gen_smin_tl(dest, source, max_val);
1062 tcg_gen_smax_tl(dest, dest, min_val);
1063 }
1064
1065 void gen_sat_i32_ovfl(TCGv ovfl, TCGv dest, TCGv source, int width)
1066 {
1067 gen_sat_i32(dest, source, width);
1068 tcg_gen_setcond_tl(TCG_COND_NE, ovfl, source, dest);
1069 }
1070
1071 void gen_satu_i32(TCGv dest, TCGv source, int width)
1072 {
1073 TCGv max_val = tcg_constant_tl((1 << width) - 1);
1074 TCGv zero = tcg_constant_tl(0);
1075 tcg_gen_movcond_tl(TCG_COND_GTU, dest, source, max_val, max_val, source);
1076 tcg_gen_movcond_tl(TCG_COND_LT, dest, source, zero, zero, dest);
1077 }
1078
1079 void gen_satu_i32_ovfl(TCGv ovfl, TCGv dest, TCGv source, int width)
1080 {
1081 gen_satu_i32(dest, source, width);
1082 tcg_gen_setcond_tl(TCG_COND_NE, ovfl, source, dest);
1083 }
1084
1085 void gen_sat_i64(TCGv_i64 dest, TCGv_i64 source, int width)
1086 {
1087 TCGv_i64 max_val = tcg_constant_i64((1LL << (width - 1)) - 1LL);
1088 TCGv_i64 min_val = tcg_constant_i64(-(1LL << (width - 1)));
1089 tcg_gen_smin_i64(dest, source, max_val);
1090 tcg_gen_smax_i64(dest, dest, min_val);
1091 }
1092
1093 void gen_sat_i64_ovfl(TCGv ovfl, TCGv_i64 dest, TCGv_i64 source, int width)
1094 {
1095 TCGv_i64 ovfl_64;
1096 gen_sat_i64(dest, source, width);
1097 ovfl_64 = tcg_temp_new_i64();
1098 tcg_gen_setcond_i64(TCG_COND_NE, ovfl_64, dest, source);
1099 tcg_gen_trunc_i64_tl(ovfl, ovfl_64);
1100 }
1101
1102 void gen_satu_i64(TCGv_i64 dest, TCGv_i64 source, int width)
1103 {
1104 TCGv_i64 max_val = tcg_constant_i64((1LL << width) - 1LL);
1105 TCGv_i64 zero = tcg_constant_i64(0);
1106 tcg_gen_movcond_i64(TCG_COND_GTU, dest, source, max_val, max_val, source);
1107 tcg_gen_movcond_i64(TCG_COND_LT, dest, source, zero, zero, dest);
1108 }
1109
1110 void gen_satu_i64_ovfl(TCGv ovfl, TCGv_i64 dest, TCGv_i64 source, int width)
1111 {
1112 TCGv_i64 ovfl_64;
1113 gen_satu_i64(dest, source, width);
1114 ovfl_64 = tcg_temp_new_i64();
1115 tcg_gen_setcond_i64(TCG_COND_NE, ovfl_64, dest, source);
1116 tcg_gen_trunc_i64_tl(ovfl, ovfl_64);
1117 }
1118
1119 /* Implements the fADDSAT64 macro in TCG */
1120 void gen_add_sat_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b)
1121 {
1122 TCGv_i64 sum = tcg_temp_new_i64();
1123 TCGv_i64 xor = tcg_temp_new_i64();
1124 TCGv_i64 cond1 = tcg_temp_new_i64();
1125 TCGv_i64 cond2 = tcg_temp_new_i64();
1126 TCGv_i64 cond3 = tcg_temp_new_i64();
1127 TCGv_i64 mask = tcg_constant_i64(0x8000000000000000ULL);
1128 TCGv_i64 max_pos = tcg_constant_i64(0x7FFFFFFFFFFFFFFFLL);
1129 TCGv_i64 max_neg = tcg_constant_i64(0x8000000000000000LL);
1130 TCGv_i64 zero = tcg_constant_i64(0);
1131 TCGLabel *no_ovfl_label = gen_new_label();
1132 TCGLabel *ovfl_label = gen_new_label();
1133 TCGLabel *ret_label = gen_new_label();
1134
1135 tcg_gen_add_i64(sum, a, b);
1136 tcg_gen_xor_i64(xor, a, b);
1137
1138 /* if (xor & mask) */
1139 tcg_gen_and_i64(cond1, xor, mask);
1140 tcg_gen_brcondi_i64(TCG_COND_NE, cond1, 0, no_ovfl_label);
1141
1142 /* else if ((a ^ sum) & mask) */
1143 tcg_gen_xor_i64(cond2, a, sum);
1144 tcg_gen_and_i64(cond2, cond2, mask);
1145 tcg_gen_brcondi_i64(TCG_COND_NE, cond2, 0, ovfl_label);
1146 /* fallthrough to no_ovfl_label branch */
1147
1148 /* if branch */
1149 gen_set_label(no_ovfl_label);
1150 tcg_gen_mov_i64(ret, sum);
1151 tcg_gen_br(ret_label);
1152
1153 /* else if branch */
1154 gen_set_label(ovfl_label);
1155 tcg_gen_and_i64(cond3, sum, mask);
1156 tcg_gen_movcond_i64(TCG_COND_NE, ret, cond3, zero, max_pos, max_neg);
1157 SET_USR_FIELD(USR_OVF, 1);
1158
1159 gen_set_label(ret_label);
1160 }
1161
1162 #include "tcg_funcs_generated.c.inc"
1163 #include "tcg_func_table_generated.c.inc"