]>
git.proxmox.com Git - mirror_qemu.git/blob - target/hexagon/genptr.c
2 * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 #include "qemu/osdep.h"
21 #include "tcg/tcg-op.h"
22 #include "tcg/tcg-op-gvec.h"
25 #include "translate.h"
26 #define QEMU_GENERATE /* Used internally by macros.h */
28 #include "mmvec/macros.h"
31 #include "gen_tcg_hvx.h"
34 TCGv
gen_read_reg(TCGv result
, int num
)
36 tcg_gen_mov_tl(result
, hex_gpr
[num
]);
40 TCGv
gen_read_preg(TCGv pred
, uint8_t num
)
42 tcg_gen_mov_tl(pred
, hex_pred
[num
]);
46 static inline void gen_log_predicated_reg_write(int rnum
, TCGv val
,
49 TCGv zero
= tcg_constant_tl(0);
50 TCGv slot_mask
= tcg_temp_new();
52 tcg_gen_andi_tl(slot_mask
, hex_slot_cancelled
, 1 << slot
);
53 tcg_gen_movcond_tl(TCG_COND_EQ
, hex_new_value
[rnum
], slot_mask
, zero
,
54 val
, hex_new_value
[rnum
]);
57 * Do this so HELPER(debug_commit_end) will know
59 * Note that slot_mask indicates the value is not written
60 * (i.e., slot was cancelled), so we create a true/false value before
61 * or'ing with hex_reg_written[rnum].
63 tcg_gen_setcond_tl(TCG_COND_EQ
, slot_mask
, slot_mask
, zero
);
64 tcg_gen_or_tl(hex_reg_written
[rnum
], hex_reg_written
[rnum
], slot_mask
);
67 tcg_temp_free(slot_mask
);
70 void gen_log_reg_write(int rnum
, TCGv val
)
72 tcg_gen_mov_tl(hex_new_value
[rnum
], val
);
74 /* Do this so HELPER(debug_commit_end) will know */
75 tcg_gen_movi_tl(hex_reg_written
[rnum
], 1);
79 static void gen_log_predicated_reg_write_pair(int rnum
, TCGv_i64 val
,
82 TCGv val32
= tcg_temp_new();
83 TCGv zero
= tcg_constant_tl(0);
84 TCGv slot_mask
= tcg_temp_new();
86 tcg_gen_andi_tl(slot_mask
, hex_slot_cancelled
, 1 << slot
);
88 tcg_gen_extrl_i64_i32(val32
, val
);
89 tcg_gen_movcond_tl(TCG_COND_EQ
, hex_new_value
[rnum
],
91 val32
, hex_new_value
[rnum
]);
93 tcg_gen_extrh_i64_i32(val32
, val
);
94 tcg_gen_movcond_tl(TCG_COND_EQ
, hex_new_value
[rnum
+ 1],
96 val32
, hex_new_value
[rnum
+ 1]);
99 * Do this so HELPER(debug_commit_end) will know
101 * Note that slot_mask indicates the value is not written
102 * (i.e., slot was cancelled), so we create a true/false value before
103 * or'ing with hex_reg_written[rnum].
105 tcg_gen_setcond_tl(TCG_COND_EQ
, slot_mask
, slot_mask
, zero
);
106 tcg_gen_or_tl(hex_reg_written
[rnum
], hex_reg_written
[rnum
], slot_mask
);
107 tcg_gen_or_tl(hex_reg_written
[rnum
+ 1], hex_reg_written
[rnum
+ 1],
111 tcg_temp_free(val32
);
112 tcg_temp_free(slot_mask
);
115 static void gen_log_reg_write_pair(int rnum
, TCGv_i64 val
)
118 tcg_gen_extrl_i64_i32(hex_new_value
[rnum
], val
);
120 /* Do this so HELPER(debug_commit_end) will know */
121 tcg_gen_movi_tl(hex_reg_written
[rnum
], 1);
125 tcg_gen_extrh_i64_i32(hex_new_value
[rnum
+ 1], val
);
127 /* Do this so HELPER(debug_commit_end) will know */
128 tcg_gen_movi_tl(hex_reg_written
[rnum
+ 1], 1);
132 void gen_log_pred_write(DisasContext
*ctx
, int pnum
, TCGv val
)
134 TCGv base_val
= tcg_temp_new();
136 tcg_gen_andi_tl(base_val
, val
, 0xff);
139 * Section 6.1.3 of the Hexagon V67 Programmer's Reference Manual
141 * Multiple writes to the same preg are and'ed together
142 * If this is the first predicate write in the packet, do a
143 * straight assignment. Otherwise, do an and.
145 if (!test_bit(pnum
, ctx
->pregs_written
)) {
146 tcg_gen_mov_tl(hex_new_pred_value
[pnum
], base_val
);
148 tcg_gen_and_tl(hex_new_pred_value
[pnum
],
149 hex_new_pred_value
[pnum
], base_val
);
151 tcg_gen_ori_tl(hex_pred_written
, hex_pred_written
, 1 << pnum
);
153 tcg_temp_free(base_val
);
156 static inline void gen_read_p3_0(TCGv control_reg
)
158 tcg_gen_movi_tl(control_reg
, 0);
159 for (int i
= 0; i
< NUM_PREGS
; i
++) {
160 tcg_gen_deposit_tl(control_reg
, control_reg
, hex_pred
[i
], i
* 8, 8);
165 * Certain control registers require special handling on read
166 * HEX_REG_P3_0 aliased to the predicate registers
167 * -> concat the 4 predicate registers together
168 * HEX_REG_PC actual value stored in DisasContext
169 * -> assign from ctx->base.pc_next
170 * HEX_REG_QEMU_*_CNT changes in current TB in DisasContext
171 * -> add current TB changes to existing reg value
173 static inline void gen_read_ctrl_reg(DisasContext
*ctx
, const int reg_num
,
176 if (reg_num
== HEX_REG_P3_0
) {
178 } else if (reg_num
== HEX_REG_PC
) {
179 tcg_gen_movi_tl(dest
, ctx
->base
.pc_next
);
180 } else if (reg_num
== HEX_REG_QEMU_PKT_CNT
) {
181 tcg_gen_addi_tl(dest
, hex_gpr
[HEX_REG_QEMU_PKT_CNT
],
183 } else if (reg_num
== HEX_REG_QEMU_INSN_CNT
) {
184 tcg_gen_addi_tl(dest
, hex_gpr
[HEX_REG_QEMU_INSN_CNT
],
186 } else if (reg_num
== HEX_REG_QEMU_HVX_CNT
) {
187 tcg_gen_addi_tl(dest
, hex_gpr
[HEX_REG_QEMU_HVX_CNT
],
190 tcg_gen_mov_tl(dest
, hex_gpr
[reg_num
]);
194 static inline void gen_read_ctrl_reg_pair(DisasContext
*ctx
, const int reg_num
,
197 if (reg_num
== HEX_REG_P3_0
) {
198 TCGv p3_0
= tcg_temp_new();
200 tcg_gen_concat_i32_i64(dest
, p3_0
, hex_gpr
[reg_num
+ 1]);
202 } else if (reg_num
== HEX_REG_PC
- 1) {
203 TCGv pc
= tcg_constant_tl(ctx
->base
.pc_next
);
204 tcg_gen_concat_i32_i64(dest
, hex_gpr
[reg_num
], pc
);
205 } else if (reg_num
== HEX_REG_QEMU_PKT_CNT
) {
206 TCGv pkt_cnt
= tcg_temp_new();
207 TCGv insn_cnt
= tcg_temp_new();
208 tcg_gen_addi_tl(pkt_cnt
, hex_gpr
[HEX_REG_QEMU_PKT_CNT
],
210 tcg_gen_addi_tl(insn_cnt
, hex_gpr
[HEX_REG_QEMU_INSN_CNT
],
212 tcg_gen_concat_i32_i64(dest
, pkt_cnt
, insn_cnt
);
213 tcg_temp_free(pkt_cnt
);
214 tcg_temp_free(insn_cnt
);
215 } else if (reg_num
== HEX_REG_QEMU_HVX_CNT
) {
216 TCGv hvx_cnt
= tcg_temp_new();
217 tcg_gen_addi_tl(hvx_cnt
, hex_gpr
[HEX_REG_QEMU_HVX_CNT
],
219 tcg_gen_concat_i32_i64(dest
, hvx_cnt
, hex_gpr
[reg_num
+ 1]);
220 tcg_temp_free(hvx_cnt
);
222 tcg_gen_concat_i32_i64(dest
,
224 hex_gpr
[reg_num
+ 1]);
228 static void gen_write_p3_0(DisasContext
*ctx
, TCGv control_reg
)
230 TCGv hex_p8
= tcg_temp_new();
231 for (int i
= 0; i
< NUM_PREGS
; i
++) {
232 tcg_gen_extract_tl(hex_p8
, control_reg
, i
* 8, 8);
233 gen_log_pred_write(ctx
, i
, hex_p8
);
234 ctx_log_pred_write(ctx
, i
);
236 tcg_temp_free(hex_p8
);
240 * Certain control registers require special handling on write
241 * HEX_REG_P3_0 aliased to the predicate registers
242 * -> break the value across 4 predicate registers
243 * HEX_REG_QEMU_*_CNT changes in current TB in DisasContext
244 * -> clear the changes
246 static inline void gen_write_ctrl_reg(DisasContext
*ctx
, int reg_num
,
249 if (reg_num
== HEX_REG_P3_0
) {
250 gen_write_p3_0(ctx
, val
);
252 gen_log_reg_write(reg_num
, val
);
253 ctx_log_reg_write(ctx
, reg_num
);
254 if (reg_num
== HEX_REG_QEMU_PKT_CNT
) {
255 ctx
->num_packets
= 0;
257 if (reg_num
== HEX_REG_QEMU_INSN_CNT
) {
260 if (reg_num
== HEX_REG_QEMU_HVX_CNT
) {
261 ctx
->num_hvx_insns
= 0;
266 static inline void gen_write_ctrl_reg_pair(DisasContext
*ctx
, int reg_num
,
269 if (reg_num
== HEX_REG_P3_0
) {
270 TCGv val32
= tcg_temp_new();
271 tcg_gen_extrl_i64_i32(val32
, val
);
272 gen_write_p3_0(ctx
, val32
);
273 tcg_gen_extrh_i64_i32(val32
, val
);
274 gen_log_reg_write(reg_num
+ 1, val32
);
275 tcg_temp_free(val32
);
276 ctx_log_reg_write(ctx
, reg_num
+ 1);
278 gen_log_reg_write_pair(reg_num
, val
);
279 ctx_log_reg_write_pair(ctx
, reg_num
);
280 if (reg_num
== HEX_REG_QEMU_PKT_CNT
) {
281 ctx
->num_packets
= 0;
284 if (reg_num
== HEX_REG_QEMU_HVX_CNT
) {
285 ctx
->num_hvx_insns
= 0;
290 TCGv
gen_get_byte(TCGv result
, int N
, TCGv src
, bool sign
)
293 tcg_gen_sextract_tl(result
, src
, N
* 8, 8);
295 tcg_gen_extract_tl(result
, src
, N
* 8, 8);
300 TCGv
gen_get_byte_i64(TCGv result
, int N
, TCGv_i64 src
, bool sign
)
302 TCGv_i64 res64
= tcg_temp_new_i64();
304 tcg_gen_sextract_i64(res64
, src
, N
* 8, 8);
306 tcg_gen_extract_i64(res64
, src
, N
* 8, 8);
308 tcg_gen_extrl_i64_i32(result
, res64
);
309 tcg_temp_free_i64(res64
);
314 TCGv
gen_get_half(TCGv result
, int N
, TCGv src
, bool sign
)
317 tcg_gen_sextract_tl(result
, src
, N
* 16, 16);
319 tcg_gen_extract_tl(result
, src
, N
* 16, 16);
324 void gen_set_half(int N
, TCGv result
, TCGv src
)
326 tcg_gen_deposit_tl(result
, result
, src
, N
* 16, 16);
329 void gen_set_half_i64(int N
, TCGv_i64 result
, TCGv src
)
331 TCGv_i64 src64
= tcg_temp_new_i64();
332 tcg_gen_extu_i32_i64(src64
, src
);
333 tcg_gen_deposit_i64(result
, result
, src64
, N
* 16, 16);
334 tcg_temp_free_i64(src64
);
337 void gen_set_byte_i64(int N
, TCGv_i64 result
, TCGv src
)
339 TCGv_i64 src64
= tcg_temp_new_i64();
340 tcg_gen_extu_i32_i64(src64
, src
);
341 tcg_gen_deposit_i64(result
, result
, src64
, N
* 8, 8);
342 tcg_temp_free_i64(src64
);
345 static inline void gen_load_locked4u(TCGv dest
, TCGv vaddr
, int mem_index
)
347 tcg_gen_qemu_ld32u(dest
, vaddr
, mem_index
);
348 tcg_gen_mov_tl(hex_llsc_addr
, vaddr
);
349 tcg_gen_mov_tl(hex_llsc_val
, dest
);
352 static inline void gen_load_locked8u(TCGv_i64 dest
, TCGv vaddr
, int mem_index
)
354 tcg_gen_qemu_ld64(dest
, vaddr
, mem_index
);
355 tcg_gen_mov_tl(hex_llsc_addr
, vaddr
);
356 tcg_gen_mov_i64(hex_llsc_val_i64
, dest
);
359 static inline void gen_store_conditional4(DisasContext
*ctx
,
360 TCGv pred
, TCGv vaddr
, TCGv src
)
362 TCGLabel
*fail
= gen_new_label();
363 TCGLabel
*done
= gen_new_label();
366 tcg_gen_brcond_tl(TCG_COND_NE
, vaddr
, hex_llsc_addr
, fail
);
368 one
= tcg_constant_tl(0xff);
369 zero
= tcg_constant_tl(0);
370 tmp
= tcg_temp_new();
371 tcg_gen_atomic_cmpxchg_tl(tmp
, hex_llsc_addr
, hex_llsc_val
, src
,
372 ctx
->mem_idx
, MO_32
);
373 tcg_gen_movcond_tl(TCG_COND_EQ
, pred
, tmp
, hex_llsc_val
,
379 tcg_gen_movi_tl(pred
, 0);
382 tcg_gen_movi_tl(hex_llsc_addr
, ~0);
385 static inline void gen_store_conditional8(DisasContext
*ctx
,
386 TCGv pred
, TCGv vaddr
, TCGv_i64 src
)
388 TCGLabel
*fail
= gen_new_label();
389 TCGLabel
*done
= gen_new_label();
390 TCGv_i64 one
, zero
, tmp
;
392 tcg_gen_brcond_tl(TCG_COND_NE
, vaddr
, hex_llsc_addr
, fail
);
394 one
= tcg_constant_i64(0xff);
395 zero
= tcg_constant_i64(0);
396 tmp
= tcg_temp_new_i64();
397 tcg_gen_atomic_cmpxchg_i64(tmp
, hex_llsc_addr
, hex_llsc_val_i64
, src
,
398 ctx
->mem_idx
, MO_64
);
399 tcg_gen_movcond_i64(TCG_COND_EQ
, tmp
, tmp
, hex_llsc_val_i64
,
401 tcg_gen_extrl_i64_i32(pred
, tmp
);
402 tcg_temp_free_i64(tmp
);
406 tcg_gen_movi_tl(pred
, 0);
409 tcg_gen_movi_tl(hex_llsc_addr
, ~0);
412 void gen_store32(TCGv vaddr
, TCGv src
, int width
, uint32_t slot
)
414 tcg_gen_mov_tl(hex_store_addr
[slot
], vaddr
);
415 tcg_gen_movi_tl(hex_store_width
[slot
], width
);
416 tcg_gen_mov_tl(hex_store_val32
[slot
], src
);
419 void gen_store1(TCGv_env cpu_env
, TCGv vaddr
, TCGv src
, uint32_t slot
)
421 gen_store32(vaddr
, src
, 1, slot
);
424 void gen_store1i(TCGv_env cpu_env
, TCGv vaddr
, int32_t src
, uint32_t slot
)
426 TCGv tmp
= tcg_constant_tl(src
);
427 gen_store1(cpu_env
, vaddr
, tmp
, slot
);
430 void gen_store2(TCGv_env cpu_env
, TCGv vaddr
, TCGv src
, uint32_t slot
)
432 gen_store32(vaddr
, src
, 2, slot
);
435 void gen_store2i(TCGv_env cpu_env
, TCGv vaddr
, int32_t src
, uint32_t slot
)
437 TCGv tmp
= tcg_constant_tl(src
);
438 gen_store2(cpu_env
, vaddr
, tmp
, slot
);
441 void gen_store4(TCGv_env cpu_env
, TCGv vaddr
, TCGv src
, uint32_t slot
)
443 gen_store32(vaddr
, src
, 4, slot
);
446 void gen_store4i(TCGv_env cpu_env
, TCGv vaddr
, int32_t src
, uint32_t slot
)
448 TCGv tmp
= tcg_constant_tl(src
);
449 gen_store4(cpu_env
, vaddr
, tmp
, slot
);
452 void gen_store8(TCGv_env cpu_env
, TCGv vaddr
, TCGv_i64 src
, uint32_t slot
)
454 tcg_gen_mov_tl(hex_store_addr
[slot
], vaddr
);
455 tcg_gen_movi_tl(hex_store_width
[slot
], 8);
456 tcg_gen_mov_i64(hex_store_val64
[slot
], src
);
459 void gen_store8i(TCGv_env cpu_env
, TCGv vaddr
, int64_t src
, uint32_t slot
)
461 TCGv_i64 tmp
= tcg_constant_i64(src
);
462 gen_store8(cpu_env
, vaddr
, tmp
, slot
);
465 TCGv
gen_8bitsof(TCGv result
, TCGv value
)
467 TCGv zero
= tcg_constant_tl(0);
468 TCGv ones
= tcg_constant_tl(0xff);
469 tcg_gen_movcond_tl(TCG_COND_NE
, result
, value
, zero
, ones
, zero
);
474 static void gen_write_new_pc_addr(DisasContext
*ctx
, TCGv addr
,
475 TCGCond cond
, TCGv pred
)
477 TCGLabel
*pred_false
= NULL
;
478 if (cond
!= TCG_COND_ALWAYS
) {
479 pred_false
= gen_new_label();
480 tcg_gen_brcondi_tl(cond
, pred
, 0, pred_false
);
483 if (ctx
->pkt
->pkt_has_multi_cof
) {
484 /* If there are multiple branches in a packet, ignore the second one */
485 tcg_gen_movcond_tl(TCG_COND_NE
, hex_gpr
[HEX_REG_PC
],
486 hex_branch_taken
, tcg_constant_tl(0),
487 hex_gpr
[HEX_REG_PC
], addr
);
488 tcg_gen_movi_tl(hex_branch_taken
, 1);
490 tcg_gen_mov_tl(hex_gpr
[HEX_REG_PC
], addr
);
493 if (cond
!= TCG_COND_ALWAYS
) {
494 gen_set_label(pred_false
);
498 static void gen_write_new_pc_pcrel(DisasContext
*ctx
, int pc_off
,
499 TCGCond cond
, TCGv pred
)
501 target_ulong dest
= ctx
->pkt
->pc
+ pc_off
;
502 if (ctx
->pkt
->pkt_has_multi_cof
) {
503 gen_write_new_pc_addr(ctx
, tcg_constant_tl(dest
), cond
, pred
);
505 /* Defer this jump to the end of the TB */
506 ctx
->branch_cond
= TCG_COND_ALWAYS
;
508 ctx
->branch_cond
= cond
;
509 tcg_gen_mov_tl(hex_branch_taken
, pred
);
511 ctx
->branch_dest
= dest
;
515 void gen_set_usr_field(int field
, TCGv val
)
517 tcg_gen_deposit_tl(hex_new_value
[HEX_REG_USR
], hex_new_value
[HEX_REG_USR
],
519 reg_field_info
[field
].offset
,
520 reg_field_info
[field
].width
);
523 void gen_set_usr_fieldi(int field
, int x
)
525 if (reg_field_info
[field
].width
== 1) {
526 target_ulong bit
= 1 << reg_field_info
[field
].offset
;
528 tcg_gen_ori_tl(hex_new_value
[HEX_REG_USR
],
529 hex_new_value
[HEX_REG_USR
],
532 tcg_gen_andi_tl(hex_new_value
[HEX_REG_USR
],
533 hex_new_value
[HEX_REG_USR
],
537 TCGv val
= tcg_constant_tl(x
);
538 gen_set_usr_field(field
, val
);
542 static void gen_compare(TCGCond cond
, TCGv res
, TCGv arg1
, TCGv arg2
)
544 TCGv one
= tcg_constant_tl(0xff);
545 TCGv zero
= tcg_constant_tl(0);
547 tcg_gen_movcond_tl(cond
, res
, arg1
, arg2
, one
, zero
);
550 static void gen_cond_jumpr(DisasContext
*ctx
, TCGv dst_pc
,
551 TCGCond cond
, TCGv pred
)
553 gen_write_new_pc_addr(ctx
, dst_pc
, cond
, pred
);
556 static void gen_cond_jump(DisasContext
*ctx
, TCGCond cond
, TCGv pred
,
559 gen_write_new_pc_pcrel(ctx
, pc_off
, cond
, pred
);
562 static void gen_cmpnd_cmp_jmp(DisasContext
*ctx
,
563 int pnum
, TCGCond cond1
, TCGv arg1
, TCGv arg2
,
564 TCGCond cond2
, int pc_off
)
566 if (ctx
->insn
->part1
) {
567 TCGv pred
= tcg_temp_new();
568 gen_compare(cond1
, pred
, arg1
, arg2
);
569 gen_log_pred_write(ctx
, pnum
, pred
);
572 TCGv pred
= tcg_temp_new();
573 tcg_gen_mov_tl(pred
, hex_new_pred_value
[pnum
]);
574 gen_cond_jump(ctx
, cond2
, pred
, pc_off
);
579 static void gen_cmpnd_cmp_jmp_t(DisasContext
*ctx
,
580 int pnum
, TCGCond cond
, TCGv arg1
, TCGv arg2
,
583 gen_cmpnd_cmp_jmp(ctx
, pnum
, cond
, arg1
, arg2
, TCG_COND_EQ
, pc_off
);
586 static void gen_cmpnd_cmp_jmp_f(DisasContext
*ctx
,
587 int pnum
, TCGCond cond
, TCGv arg1
, TCGv arg2
,
590 gen_cmpnd_cmp_jmp(ctx
, pnum
, cond
, arg1
, arg2
, TCG_COND_NE
, pc_off
);
593 static void gen_cmpnd_cmpi_jmp_t(DisasContext
*ctx
,
594 int pnum
, TCGCond cond
, TCGv arg1
, int arg2
,
597 TCGv tmp
= tcg_constant_tl(arg2
);
598 gen_cmpnd_cmp_jmp(ctx
, pnum
, cond
, arg1
, tmp
, TCG_COND_EQ
, pc_off
);
601 static void gen_cmpnd_cmpi_jmp_f(DisasContext
*ctx
,
602 int pnum
, TCGCond cond
, TCGv arg1
, int arg2
,
605 TCGv tmp
= tcg_constant_tl(arg2
);
606 gen_cmpnd_cmp_jmp(ctx
, pnum
, cond
, arg1
, tmp
, TCG_COND_NE
, pc_off
);
609 static void gen_cmpnd_cmp_n1_jmp_t(DisasContext
*ctx
, int pnum
, TCGCond cond
,
610 TCGv arg
, int pc_off
)
612 gen_cmpnd_cmpi_jmp_t(ctx
, pnum
, cond
, arg
, -1, pc_off
);
615 static void gen_cmpnd_cmp_n1_jmp_f(DisasContext
*ctx
, int pnum
, TCGCond cond
,
616 TCGv arg
, int pc_off
)
618 gen_cmpnd_cmpi_jmp_f(ctx
, pnum
, cond
, arg
, -1, pc_off
);
621 static void gen_cmpnd_tstbit0_jmp(DisasContext
*ctx
,
622 int pnum
, TCGv arg
, TCGCond cond
, int pc_off
)
624 if (ctx
->insn
->part1
) {
625 TCGv pred
= tcg_temp_new();
626 tcg_gen_andi_tl(pred
, arg
, 1);
627 gen_8bitsof(pred
, pred
);
628 gen_log_pred_write(ctx
, pnum
, pred
);
631 TCGv pred
= tcg_temp_new();
632 tcg_gen_mov_tl(pred
, hex_new_pred_value
[pnum
]);
633 gen_cond_jump(ctx
, cond
, pred
, pc_off
);
638 static void gen_testbit0_jumpnv(DisasContext
*ctx
,
639 TCGv arg
, TCGCond cond
, int pc_off
)
641 TCGv pred
= tcg_temp_new();
642 tcg_gen_andi_tl(pred
, arg
, 1);
643 gen_cond_jump(ctx
, cond
, pred
, pc_off
);
647 static void gen_jump(DisasContext
*ctx
, int pc_off
)
649 gen_write_new_pc_pcrel(ctx
, pc_off
, TCG_COND_ALWAYS
, NULL
);
652 static void gen_jumpr(DisasContext
*ctx
, TCGv new_pc
)
654 gen_write_new_pc_addr(ctx
, new_pc
, TCG_COND_ALWAYS
, NULL
);
657 static void gen_call(DisasContext
*ctx
, int pc_off
)
660 tcg_constant_tl(ctx
->pkt
->pc
+ ctx
->pkt
->encod_pkt_size_in_bytes
);
661 gen_log_reg_write(HEX_REG_LR
, next_PC
);
662 gen_write_new_pc_pcrel(ctx
, pc_off
, TCG_COND_ALWAYS
, NULL
);
665 static void gen_cond_call(DisasContext
*ctx
, TCGv pred
,
666 TCGCond cond
, int pc_off
)
669 TCGv lsb
= tcg_temp_local_new();
670 TCGLabel
*skip
= gen_new_label();
671 tcg_gen_andi_tl(lsb
, pred
, 1);
672 gen_write_new_pc_pcrel(ctx
, pc_off
, cond
, lsb
);
673 tcg_gen_brcondi_tl(cond
, lsb
, 0, skip
);
676 tcg_constant_tl(ctx
->pkt
->pc
+ ctx
->pkt
->encod_pkt_size_in_bytes
);
677 gen_log_reg_write(HEX_REG_LR
, next_PC
);
681 static void gen_endloop0(DisasContext
*ctx
)
683 TCGv lpcfg
= tcg_temp_local_new();
685 GET_USR_FIELD(USR_LPCFG
, lpcfg
);
689 * hex_new_pred_value[3] = 0xff;
690 * hex_pred_written |= 1 << 3;
693 TCGLabel
*label1
= gen_new_label();
694 tcg_gen_brcondi_tl(TCG_COND_NE
, lpcfg
, 1, label1
);
696 tcg_gen_movi_tl(hex_new_pred_value
[3], 0xff);
697 tcg_gen_ori_tl(hex_pred_written
, hex_pred_written
, 1 << 3);
699 gen_set_label(label1
);
703 * SET_USR_FIELD(USR_LPCFG, lpcfg - 1);
706 TCGLabel
*label2
= gen_new_label();
707 tcg_gen_brcondi_tl(TCG_COND_EQ
, lpcfg
, 0, label2
);
709 tcg_gen_subi_tl(lpcfg
, lpcfg
, 1);
710 SET_USR_FIELD(USR_LPCFG
, lpcfg
);
712 gen_set_label(label2
);
715 * If we're in a tight loop, we'll do this at the end of the TB to take
716 * advantage of direct block chaining.
718 if (!ctx
->is_tight_loop
) {
720 * if (hex_gpr[HEX_REG_LC0] > 1) {
721 * PC = hex_gpr[HEX_REG_SA0];
722 * hex_new_value[HEX_REG_LC0] = hex_gpr[HEX_REG_LC0] - 1;
725 TCGLabel
*label3
= gen_new_label();
726 tcg_gen_brcondi_tl(TCG_COND_LEU
, hex_gpr
[HEX_REG_LC0
], 1, label3
);
728 gen_jumpr(ctx
, hex_gpr
[HEX_REG_SA0
]);
729 tcg_gen_subi_tl(hex_new_value
[HEX_REG_LC0
],
730 hex_gpr
[HEX_REG_LC0
], 1);
732 gen_set_label(label3
);
735 tcg_temp_free(lpcfg
);
738 static void gen_cmp_jumpnv(DisasContext
*ctx
,
739 TCGCond cond
, TCGv val
, TCGv src
, int pc_off
)
741 TCGv pred
= tcg_temp_new();
742 tcg_gen_setcond_tl(cond
, pred
, val
, src
);
743 gen_cond_jump(ctx
, TCG_COND_EQ
, pred
, pc_off
);
747 static void gen_cmpi_jumpnv(DisasContext
*ctx
,
748 TCGCond cond
, TCGv val
, int src
, int pc_off
)
750 TCGv pred
= tcg_temp_new();
751 tcg_gen_setcondi_tl(cond
, pred
, val
, src
);
752 gen_cond_jump(ctx
, TCG_COND_EQ
, pred
, pc_off
);
756 /* Shift left with saturation */
757 static void gen_shl_sat(TCGv dst
, TCGv src
, TCGv shift_amt
)
759 TCGv sh32
= tcg_temp_new();
760 TCGv dst_sar
= tcg_temp_new();
761 TCGv ovf
= tcg_temp_new();
762 TCGv satval
= tcg_temp_new();
763 TCGv min
= tcg_constant_tl(0x80000000);
764 TCGv max
= tcg_constant_tl(0x7fffffff);
767 * Possible values for shift_amt are 0 .. 64
768 * We need special handling for values above 31
771 * dst = sh32 == shift ? src : 0;
773 * dst_sar = dst >> sh32;
774 * satval = src < 0 ? min : max;
775 * if (dst_asr != src) {
781 tcg_gen_andi_tl(sh32
, shift_amt
, 31);
782 tcg_gen_movcond_tl(TCG_COND_EQ
, dst
, sh32
, shift_amt
,
783 src
, tcg_constant_tl(0));
784 tcg_gen_shl_tl(dst
, dst
, sh32
);
785 tcg_gen_sar_tl(dst_sar
, dst
, sh32
);
786 tcg_gen_movcond_tl(TCG_COND_LT
, satval
, src
, tcg_constant_tl(0), min
, max
);
788 tcg_gen_setcond_tl(TCG_COND_NE
, ovf
, dst_sar
, src
);
789 tcg_gen_shli_tl(ovf
, ovf
, reg_field_info
[USR_OVF
].offset
);
790 tcg_gen_or_tl(hex_new_value
[HEX_REG_USR
], hex_new_value
[HEX_REG_USR
], ovf
);
792 tcg_gen_movcond_tl(TCG_COND_EQ
, dst
, dst_sar
, src
, dst
, satval
);
795 tcg_temp_free(dst_sar
);
797 tcg_temp_free(satval
);
800 static void gen_sar(TCGv dst
, TCGv src
, TCGv shift_amt
)
803 * Shift arithmetic right
804 * Robust when shift_amt is >31 bits
806 TCGv tmp
= tcg_temp_new();
807 tcg_gen_umin_tl(tmp
, shift_amt
, tcg_constant_tl(31));
808 tcg_gen_sar_tl(dst
, src
, tmp
);
812 /* Bidirectional shift right with saturation */
813 static void gen_asr_r_r_sat(TCGv RdV
, TCGv RsV
, TCGv RtV
)
815 TCGv shift_amt
= tcg_temp_local_new();
816 TCGLabel
*positive
= gen_new_label();
817 TCGLabel
*done
= gen_new_label();
819 tcg_gen_sextract_i32(shift_amt
, RtV
, 0, 7);
820 tcg_gen_brcondi_tl(TCG_COND_GE
, shift_amt
, 0, positive
);
822 /* Negative shift amount => shift left */
823 tcg_gen_neg_tl(shift_amt
, shift_amt
);
824 gen_shl_sat(RdV
, RsV
, shift_amt
);
827 gen_set_label(positive
);
828 /* Positive shift amount => shift right */
829 gen_sar(RdV
, RsV
, shift_amt
);
833 tcg_temp_free(shift_amt
);
836 /* Bidirectional shift left with saturation */
837 static void gen_asl_r_r_sat(TCGv RdV
, TCGv RsV
, TCGv RtV
)
839 TCGv shift_amt
= tcg_temp_local_new();
840 TCGLabel
*positive
= gen_new_label();
841 TCGLabel
*done
= gen_new_label();
843 tcg_gen_sextract_i32(shift_amt
, RtV
, 0, 7);
844 tcg_gen_brcondi_tl(TCG_COND_GE
, shift_amt
, 0, positive
);
846 /* Negative shift amount => shift right */
847 tcg_gen_neg_tl(shift_amt
, shift_amt
);
848 gen_sar(RdV
, RsV
, shift_amt
);
851 gen_set_label(positive
);
852 /* Positive shift amount => shift left */
853 gen_shl_sat(RdV
, RsV
, shift_amt
);
857 tcg_temp_free(shift_amt
);
860 static intptr_t vreg_src_off(DisasContext
*ctx
, int num
)
862 intptr_t offset
= offsetof(CPUHexagonState
, VRegs
[num
]);
864 if (test_bit(num
, ctx
->vregs_select
)) {
865 offset
= ctx_future_vreg_off(ctx
, num
, 1, false);
867 if (test_bit(num
, ctx
->vregs_updated_tmp
)) {
868 offset
= ctx_tmp_vreg_off(ctx
, num
, 1, false);
873 static void gen_log_vreg_write(DisasContext
*ctx
, intptr_t srcoff
, int num
,
874 VRegWriteType type
, int slot_num
,
877 TCGLabel
*label_end
= NULL
;
881 TCGv cancelled
= tcg_temp_local_new();
882 label_end
= gen_new_label();
884 /* Don't do anything if the slot was cancelled */
885 tcg_gen_extract_tl(cancelled
, hex_slot_cancelled
, slot_num
, 1);
886 tcg_gen_brcondi_tl(TCG_COND_NE
, cancelled
, 0, label_end
);
887 tcg_temp_free(cancelled
);
890 if (type
!= EXT_TMP
) {
891 dstoff
= ctx_future_vreg_off(ctx
, num
, 1, true);
892 tcg_gen_gvec_mov(MO_64
, dstoff
, srcoff
,
893 sizeof(MMVector
), sizeof(MMVector
));
894 tcg_gen_ori_tl(hex_VRegs_updated
, hex_VRegs_updated
, 1 << num
);
896 dstoff
= ctx_tmp_vreg_off(ctx
, num
, 1, false);
897 tcg_gen_gvec_mov(MO_64
, dstoff
, srcoff
,
898 sizeof(MMVector
), sizeof(MMVector
));
902 gen_set_label(label_end
);
906 static void gen_log_vreg_write_pair(DisasContext
*ctx
, intptr_t srcoff
, int num
,
907 VRegWriteType type
, int slot_num
,
910 gen_log_vreg_write(ctx
, srcoff
, num
^ 0, type
, slot_num
, is_predicated
);
911 srcoff
+= sizeof(MMVector
);
912 gen_log_vreg_write(ctx
, srcoff
, num
^ 1, type
, slot_num
, is_predicated
);
915 static void gen_log_qreg_write(intptr_t srcoff
, int num
, int vnew
,
916 int slot_num
, bool is_predicated
)
918 TCGLabel
*label_end
= NULL
;
922 TCGv cancelled
= tcg_temp_local_new();
923 label_end
= gen_new_label();
925 /* Don't do anything if the slot was cancelled */
926 tcg_gen_extract_tl(cancelled
, hex_slot_cancelled
, slot_num
, 1);
927 tcg_gen_brcondi_tl(TCG_COND_NE
, cancelled
, 0, label_end
);
928 tcg_temp_free(cancelled
);
931 dstoff
= offsetof(CPUHexagonState
, future_QRegs
[num
]);
932 tcg_gen_gvec_mov(MO_64
, dstoff
, srcoff
, sizeof(MMQReg
), sizeof(MMQReg
));
935 tcg_gen_ori_tl(hex_QRegs_updated
, hex_QRegs_updated
, 1 << num
);
936 gen_set_label(label_end
);
940 static void gen_vreg_load(DisasContext
*ctx
, intptr_t dstoff
, TCGv src
,
943 TCGv_i64 tmp
= tcg_temp_new_i64();
945 tcg_gen_andi_tl(src
, src
, ~((int32_t)sizeof(MMVector
) - 1));
947 for (int i
= 0; i
< sizeof(MMVector
) / 8; i
++) {
948 tcg_gen_qemu_ld64(tmp
, src
, ctx
->mem_idx
);
949 tcg_gen_addi_tl(src
, src
, 8);
950 tcg_gen_st_i64(tmp
, cpu_env
, dstoff
+ i
* 8);
952 tcg_temp_free_i64(tmp
);
955 static void gen_vreg_store(DisasContext
*ctx
, TCGv EA
, intptr_t srcoff
,
956 int slot
, bool aligned
)
958 intptr_t dstoff
= offsetof(CPUHexagonState
, vstore
[slot
].data
);
959 intptr_t maskoff
= offsetof(CPUHexagonState
, vstore
[slot
].mask
);
961 if (is_gather_store_insn(ctx
)) {
962 TCGv sl
= tcg_constant_tl(slot
);
963 gen_helper_gather_store(cpu_env
, EA
, sl
);
967 tcg_gen_movi_tl(hex_vstore_pending
[slot
], 1);
969 tcg_gen_andi_tl(hex_vstore_addr
[slot
], EA
,
970 ~((int32_t)sizeof(MMVector
) - 1));
972 tcg_gen_mov_tl(hex_vstore_addr
[slot
], EA
);
974 tcg_gen_movi_tl(hex_vstore_size
[slot
], sizeof(MMVector
));
976 /* Copy the data to the vstore buffer */
977 tcg_gen_gvec_mov(MO_64
, dstoff
, srcoff
, sizeof(MMVector
), sizeof(MMVector
));
978 /* Set the mask to all 1's */
979 tcg_gen_gvec_dup_imm(MO_64
, maskoff
, sizeof(MMQReg
), sizeof(MMQReg
), ~0LL);
982 static void gen_vreg_masked_store(DisasContext
*ctx
, TCGv EA
, intptr_t srcoff
,
983 intptr_t bitsoff
, int slot
, bool invert
)
985 intptr_t dstoff
= offsetof(CPUHexagonState
, vstore
[slot
].data
);
986 intptr_t maskoff
= offsetof(CPUHexagonState
, vstore
[slot
].mask
);
988 tcg_gen_movi_tl(hex_vstore_pending
[slot
], 1);
989 tcg_gen_andi_tl(hex_vstore_addr
[slot
], EA
,
990 ~((int32_t)sizeof(MMVector
) - 1));
991 tcg_gen_movi_tl(hex_vstore_size
[slot
], sizeof(MMVector
));
993 /* Copy the data to the vstore buffer */
994 tcg_gen_gvec_mov(MO_64
, dstoff
, srcoff
, sizeof(MMVector
), sizeof(MMVector
));
996 tcg_gen_gvec_mov(MO_64
, maskoff
, bitsoff
, sizeof(MMQReg
), sizeof(MMQReg
));
998 tcg_gen_gvec_not(MO_64
, maskoff
, maskoff
,
999 sizeof(MMQReg
), sizeof(MMQReg
));
1003 static void vec_to_qvec(size_t size
, intptr_t dstoff
, intptr_t srcoff
)
1005 TCGv_i64 tmp
= tcg_temp_new_i64();
1006 TCGv_i64 word
= tcg_temp_new_i64();
1007 TCGv_i64 bits
= tcg_temp_new_i64();
1008 TCGv_i64 mask
= tcg_temp_new_i64();
1009 TCGv_i64 zero
= tcg_constant_i64(0);
1010 TCGv_i64 ones
= tcg_constant_i64(~0);
1012 for (int i
= 0; i
< sizeof(MMVector
) / 8; i
++) {
1013 tcg_gen_ld_i64(tmp
, cpu_env
, srcoff
+ i
* 8);
1014 tcg_gen_movi_i64(mask
, 0);
1016 for (int j
= 0; j
< 8; j
+= size
) {
1017 tcg_gen_extract_i64(word
, tmp
, j
* 8, size
* 8);
1018 tcg_gen_movcond_i64(TCG_COND_NE
, bits
, word
, zero
, ones
, zero
);
1019 tcg_gen_deposit_i64(mask
, mask
, bits
, j
, size
);
1022 tcg_gen_st8_i64(mask
, cpu_env
, dstoff
+ i
);
1024 tcg_temp_free_i64(tmp
);
1025 tcg_temp_free_i64(word
);
1026 tcg_temp_free_i64(bits
);
1027 tcg_temp_free_i64(mask
);
1030 void probe_noshuf_load(TCGv va
, int s
, int mi
)
1032 TCGv size
= tcg_constant_tl(s
);
1033 TCGv mem_idx
= tcg_constant_tl(mi
);
1034 gen_helper_probe_noshuf_load(cpu_env
, va
, size
, mem_idx
);
1038 * Note: Since this function might branch, `val` is
1039 * required to be a `tcg_temp_local`.
1041 void gen_set_usr_field_if(int field
, TCGv val
)
1043 /* Sets the USR field if `val` is non-zero */
1044 if (reg_field_info
[field
].width
== 1) {
1045 TCGv tmp
= tcg_temp_new();
1046 tcg_gen_extract_tl(tmp
, val
, 0, reg_field_info
[field
].width
);
1047 tcg_gen_shli_tl(tmp
, tmp
, reg_field_info
[field
].offset
);
1048 tcg_gen_or_tl(hex_new_value
[HEX_REG_USR
],
1049 hex_new_value
[HEX_REG_USR
],
1053 TCGLabel
*skip_label
= gen_new_label();
1054 tcg_gen_brcondi_tl(TCG_COND_EQ
, val
, 0, skip_label
);
1055 gen_set_usr_field(field
, val
);
1056 gen_set_label(skip_label
);
1060 void gen_sat_i32(TCGv dest
, TCGv source
, int width
)
1062 TCGv max_val
= tcg_constant_tl((1 << (width
- 1)) - 1);
1063 TCGv min_val
= tcg_constant_tl(-(1 << (width
- 1)));
1064 tcg_gen_smin_tl(dest
, source
, max_val
);
1065 tcg_gen_smax_tl(dest
, dest
, min_val
);
1068 void gen_sat_i32_ovfl(TCGv ovfl
, TCGv dest
, TCGv source
, int width
)
1070 gen_sat_i32(dest
, source
, width
);
1071 tcg_gen_setcond_tl(TCG_COND_NE
, ovfl
, source
, dest
);
1074 void gen_satu_i32(TCGv dest
, TCGv source
, int width
)
1076 TCGv max_val
= tcg_constant_tl((1 << width
) - 1);
1077 TCGv zero
= tcg_constant_tl(0);
1078 tcg_gen_movcond_tl(TCG_COND_GTU
, dest
, source
, max_val
, max_val
, source
);
1079 tcg_gen_movcond_tl(TCG_COND_LT
, dest
, source
, zero
, zero
, dest
);
1082 void gen_satu_i32_ovfl(TCGv ovfl
, TCGv dest
, TCGv source
, int width
)
1084 gen_satu_i32(dest
, source
, width
);
1085 tcg_gen_setcond_tl(TCG_COND_NE
, ovfl
, source
, dest
);
1088 void gen_sat_i64(TCGv_i64 dest
, TCGv_i64 source
, int width
)
1090 TCGv_i64 max_val
= tcg_constant_i64((1LL << (width
- 1)) - 1LL);
1091 TCGv_i64 min_val
= tcg_constant_i64(-(1LL << (width
- 1)));
1092 tcg_gen_smin_i64(dest
, source
, max_val
);
1093 tcg_gen_smax_i64(dest
, dest
, min_val
);
1096 void gen_sat_i64_ovfl(TCGv ovfl
, TCGv_i64 dest
, TCGv_i64 source
, int width
)
1099 gen_sat_i64(dest
, source
, width
);
1100 ovfl_64
= tcg_temp_new_i64();
1101 tcg_gen_setcond_i64(TCG_COND_NE
, ovfl_64
, dest
, source
);
1102 tcg_gen_trunc_i64_tl(ovfl
, ovfl_64
);
1103 tcg_temp_free_i64(ovfl_64
);
1106 void gen_satu_i64(TCGv_i64 dest
, TCGv_i64 source
, int width
)
1108 TCGv_i64 max_val
= tcg_constant_i64((1LL << width
) - 1LL);
1109 TCGv_i64 zero
= tcg_constant_i64(0);
1110 tcg_gen_movcond_i64(TCG_COND_GTU
, dest
, source
, max_val
, max_val
, source
);
1111 tcg_gen_movcond_i64(TCG_COND_LT
, dest
, source
, zero
, zero
, dest
);
1114 void gen_satu_i64_ovfl(TCGv ovfl
, TCGv_i64 dest
, TCGv_i64 source
, int width
)
1117 gen_satu_i64(dest
, source
, width
);
1118 ovfl_64
= tcg_temp_new_i64();
1119 tcg_gen_setcond_i64(TCG_COND_NE
, ovfl_64
, dest
, source
);
1120 tcg_gen_trunc_i64_tl(ovfl
, ovfl_64
);
1121 tcg_temp_free_i64(ovfl_64
);
1124 /* Implements the fADDSAT64 macro in TCG */
1125 void gen_add_sat_i64(TCGv_i64 ret
, TCGv_i64 a
, TCGv_i64 b
)
1127 TCGv_i64 sum
= tcg_temp_local_new_i64();
1128 TCGv_i64
xor = tcg_temp_new_i64();
1129 TCGv_i64 cond1
= tcg_temp_new_i64();
1130 TCGv_i64 cond2
= tcg_temp_local_new_i64();
1131 TCGv_i64 cond3
= tcg_temp_new_i64();
1132 TCGv_i64 mask
= tcg_constant_i64(0x8000000000000000ULL
);
1133 TCGv_i64 max_pos
= tcg_constant_i64(0x7FFFFFFFFFFFFFFFLL
);
1134 TCGv_i64 max_neg
= tcg_constant_i64(0x8000000000000000LL
);
1135 TCGv_i64 zero
= tcg_constant_i64(0);
1136 TCGLabel
*no_ovfl_label
= gen_new_label();
1137 TCGLabel
*ovfl_label
= gen_new_label();
1138 TCGLabel
*ret_label
= gen_new_label();
1140 tcg_gen_add_i64(sum
, a
, b
);
1141 tcg_gen_xor_i64(xor, a
, b
);
1143 /* if (xor & mask) */
1144 tcg_gen_and_i64(cond1
, xor, mask
);
1145 tcg_temp_free_i64(xor);
1146 tcg_gen_brcondi_i64(TCG_COND_NE
, cond1
, 0, no_ovfl_label
);
1147 tcg_temp_free_i64(cond1
);
1149 /* else if ((a ^ sum) & mask) */
1150 tcg_gen_xor_i64(cond2
, a
, sum
);
1151 tcg_gen_and_i64(cond2
, cond2
, mask
);
1152 tcg_gen_brcondi_i64(TCG_COND_NE
, cond2
, 0, ovfl_label
);
1153 tcg_temp_free_i64(cond2
);
1154 /* fallthrough to no_ovfl_label branch */
1157 gen_set_label(no_ovfl_label
);
1158 tcg_gen_mov_i64(ret
, sum
);
1159 tcg_gen_br(ret_label
);
1161 /* else if branch */
1162 gen_set_label(ovfl_label
);
1163 tcg_gen_and_i64(cond3
, sum
, mask
);
1164 tcg_temp_free_i64(mask
);
1165 tcg_temp_free_i64(sum
);
1166 tcg_gen_movcond_i64(TCG_COND_NE
, ret
, cond3
, zero
, max_pos
, max_neg
);
1167 tcg_temp_free_i64(cond3
);
1168 SET_USR_FIELD(USR_OVF
, 1);
1170 gen_set_label(ret_label
);
1173 #include "tcg_funcs_generated.c.inc"
1174 #include "tcg_func_table_generated.c.inc"