]> git.proxmox.com Git - mirror_qemu.git/blob - target/hexagon/translate.c
target/hexagon: translation changes
[mirror_qemu.git] / target / hexagon / translate.c
1 /*
2 * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #define QEMU_GENERATE
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "tcg/tcg-op.h"
22 #include "exec/cpu_ldst.h"
23 #include "exec/log.h"
24 #include "internal.h"
25 #include "attribs.h"
26 #include "insn.h"
27 #include "decode.h"
28 #include "translate.h"
29 #include "printinsn.h"
30
31 TCGv hex_gpr[TOTAL_PER_THREAD_REGS];
32 TCGv hex_pred[NUM_PREGS];
33 TCGv hex_next_PC;
34 TCGv hex_this_PC;
35 TCGv hex_slot_cancelled;
36 TCGv hex_branch_taken;
37 TCGv hex_new_value[TOTAL_PER_THREAD_REGS];
38 #if HEX_DEBUG
39 TCGv hex_reg_written[TOTAL_PER_THREAD_REGS];
40 #endif
41 TCGv hex_new_pred_value[NUM_PREGS];
42 TCGv hex_pred_written;
43 TCGv hex_store_addr[STORES_MAX];
44 TCGv hex_store_width[STORES_MAX];
45 TCGv hex_store_val32[STORES_MAX];
46 TCGv_i64 hex_store_val64[STORES_MAX];
47 TCGv hex_pkt_has_store_s1;
48 TCGv hex_dczero_addr;
49 TCGv hex_llsc_addr;
50 TCGv hex_llsc_val;
51 TCGv_i64 hex_llsc_val_i64;
52
53 static const char * const hexagon_prednames[] = {
54 "p0", "p1", "p2", "p3"
55 };
56
57 void gen_exception(int excp)
58 {
59 TCGv_i32 helper_tmp = tcg_const_i32(excp);
60 gen_helper_raise_exception(cpu_env, helper_tmp);
61 tcg_temp_free_i32(helper_tmp);
62 }
63
64 void gen_exception_debug(void)
65 {
66 gen_exception(EXCP_DEBUG);
67 }
68
69 #if HEX_DEBUG
70 #define PACKET_BUFFER_LEN 1028
71 static void print_pkt(Packet *pkt)
72 {
73 GString *buf = g_string_sized_new(PACKET_BUFFER_LEN);
74 snprint_a_pkt_debug(buf, pkt);
75 HEX_DEBUG_LOG("%s", buf->str);
76 g_string_free(buf, true);
77 }
78 #define HEX_DEBUG_PRINT_PKT(pkt) print_pkt(pkt)
79 #else
80 #define HEX_DEBUG_PRINT_PKT(pkt) /* nothing */
81 #endif
82
83 static int read_packet_words(CPUHexagonState *env, DisasContext *ctx,
84 uint32_t words[])
85 {
86 bool found_end = false;
87 int nwords, max_words;
88
89 memset(words, 0, PACKET_WORDS_MAX * sizeof(uint32_t));
90 for (nwords = 0; !found_end && nwords < PACKET_WORDS_MAX; nwords++) {
91 words[nwords] =
92 translator_ldl(env, ctx->base.pc_next + nwords * sizeof(uint32_t));
93 found_end = is_packet_end(words[nwords]);
94 }
95 if (!found_end) {
96 /* Read too many words without finding the end */
97 return 0;
98 }
99
100 /* Check for page boundary crossing */
101 max_words = -(ctx->base.pc_next | TARGET_PAGE_MASK) / sizeof(uint32_t);
102 if (nwords > max_words) {
103 /* We can only cross a page boundary at the beginning of a TB */
104 g_assert(ctx->base.num_insns == 1);
105 }
106
107 HEX_DEBUG_LOG("decode_packet: pc = 0x%x\n", ctx->base.pc_next);
108 HEX_DEBUG_LOG(" words = { ");
109 for (int i = 0; i < nwords; i++) {
110 HEX_DEBUG_LOG("0x%x, ", words[i]);
111 }
112 HEX_DEBUG_LOG("}\n");
113
114 return nwords;
115 }
116
117 static bool check_for_attrib(Packet *pkt, int attrib)
118 {
119 for (int i = 0; i < pkt->num_insns; i++) {
120 if (GET_ATTRIB(pkt->insn[i].opcode, attrib)) {
121 return true;
122 }
123 }
124 return false;
125 }
126
127 static bool need_pc(Packet *pkt)
128 {
129 return check_for_attrib(pkt, A_IMPLICIT_READS_PC);
130 }
131
132 static bool need_slot_cancelled(Packet *pkt)
133 {
134 return check_for_attrib(pkt, A_CONDEXEC);
135 }
136
137 static bool need_pred_written(Packet *pkt)
138 {
139 return check_for_attrib(pkt, A_WRITES_PRED_REG);
140 }
141
142 static void gen_start_packet(DisasContext *ctx, Packet *pkt)
143 {
144 target_ulong next_PC = ctx->base.pc_next + pkt->encod_pkt_size_in_bytes;
145 int i;
146
147 /* Clear out the disassembly context */
148 ctx->reg_log_idx = 0;
149 bitmap_zero(ctx->regs_written, TOTAL_PER_THREAD_REGS);
150 ctx->preg_log_idx = 0;
151 for (i = 0; i < STORES_MAX; i++) {
152 ctx->store_width[i] = 0;
153 }
154 tcg_gen_movi_tl(hex_pkt_has_store_s1, pkt->pkt_has_store_s1);
155 ctx->s1_store_processed = 0;
156
157 #if HEX_DEBUG
158 /* Handy place to set a breakpoint before the packet executes */
159 gen_helper_debug_start_packet(cpu_env);
160 tcg_gen_movi_tl(hex_this_PC, ctx->base.pc_next);
161 #endif
162
163 /* Initialize the runtime state for packet semantics */
164 if (need_pc(pkt)) {
165 tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->base.pc_next);
166 }
167 if (need_slot_cancelled(pkt)) {
168 tcg_gen_movi_tl(hex_slot_cancelled, 0);
169 }
170 if (pkt->pkt_has_cof) {
171 tcg_gen_movi_tl(hex_branch_taken, 0);
172 tcg_gen_movi_tl(hex_next_PC, next_PC);
173 }
174 if (need_pred_written(pkt)) {
175 tcg_gen_movi_tl(hex_pred_written, 0);
176 }
177 }
178
179 /*
180 * The LOG_*_WRITE macros mark most of the writes in a packet
181 * However, there are some implicit writes marked as attributes
182 * of the applicable instructions.
183 */
184 static void mark_implicit_reg_write(DisasContext *ctx, Insn *insn,
185 int attrib, int rnum)
186 {
187 if (GET_ATTRIB(insn->opcode, attrib)) {
188 int is_predicated = GET_ATTRIB(insn->opcode, A_CONDEXEC);
189 if (is_predicated && !is_preloaded(ctx, rnum)) {
190 tcg_gen_mov_tl(hex_new_value[rnum], hex_gpr[rnum]);
191 }
192
193 ctx_log_reg_write(ctx, rnum);
194 }
195 }
196
197 static void mark_implicit_pred_write(DisasContext *ctx, Insn *insn,
198 int attrib, int pnum)
199 {
200 if (GET_ATTRIB(insn->opcode, attrib)) {
201 ctx_log_pred_write(ctx, pnum);
202 }
203 }
204
205 static void mark_implicit_writes(DisasContext *ctx, Insn *insn)
206 {
207 mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_FP, HEX_REG_FP);
208 mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_SP, HEX_REG_SP);
209 mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_LR, HEX_REG_LR);
210 mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_LC0, HEX_REG_LC0);
211 mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_SA0, HEX_REG_SA0);
212 mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_LC1, HEX_REG_LC1);
213 mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_SA1, HEX_REG_SA1);
214
215 mark_implicit_pred_write(ctx, insn, A_IMPLICIT_WRITES_P0, 0);
216 mark_implicit_pred_write(ctx, insn, A_IMPLICIT_WRITES_P1, 1);
217 mark_implicit_pred_write(ctx, insn, A_IMPLICIT_WRITES_P2, 2);
218 mark_implicit_pred_write(ctx, insn, A_IMPLICIT_WRITES_P3, 3);
219 }
220
221 static void gen_insn(CPUHexagonState *env, DisasContext *ctx,
222 Insn *insn, Packet *pkt)
223 {
224 if (insn->generate) {
225 mark_implicit_writes(ctx, insn);
226 insn->generate(env, ctx, insn, pkt);
227 } else {
228 gen_exception(HEX_EXCP_INVALID_OPCODE);
229 ctx->base.is_jmp = DISAS_NORETURN;
230 }
231 }
232
233 /*
234 * Helpers for generating the packet commit
235 */
236 static void gen_reg_writes(DisasContext *ctx)
237 {
238 int i;
239
240 for (i = 0; i < ctx->reg_log_idx; i++) {
241 int reg_num = ctx->reg_log[i];
242
243 tcg_gen_mov_tl(hex_gpr[reg_num], hex_new_value[reg_num]);
244 }
245 }
246
247 static void gen_pred_writes(DisasContext *ctx, Packet *pkt)
248 {
249 TCGv zero, control_reg, pval;
250 int i;
251
252 /* Early exit if the log is empty */
253 if (!ctx->preg_log_idx) {
254 return;
255 }
256
257 zero = tcg_const_tl(0);
258 control_reg = tcg_temp_new();
259 pval = tcg_temp_new();
260
261 /*
262 * Only endloop instructions will conditionally
263 * write a predicate. If there are no endloop
264 * instructions, we can use the non-conditional
265 * write of the predicates.
266 */
267 if (pkt->pkt_has_endloop) {
268 TCGv pred_written = tcg_temp_new();
269 for (i = 0; i < ctx->preg_log_idx; i++) {
270 int pred_num = ctx->preg_log[i];
271
272 tcg_gen_andi_tl(pred_written, hex_pred_written, 1 << pred_num);
273 tcg_gen_movcond_tl(TCG_COND_NE, hex_pred[pred_num],
274 pred_written, zero,
275 hex_new_pred_value[pred_num],
276 hex_pred[pred_num]);
277 }
278 tcg_temp_free(pred_written);
279 } else {
280 for (i = 0; i < ctx->preg_log_idx; i++) {
281 int pred_num = ctx->preg_log[i];
282 tcg_gen_mov_tl(hex_pred[pred_num], hex_new_pred_value[pred_num]);
283 #if HEX_DEBUG
284 /* Do this so HELPER(debug_commit_end) will know */
285 tcg_gen_ori_tl(hex_pred_written, hex_pred_written, 1 << pred_num);
286 #endif
287 }
288 }
289
290 tcg_temp_free(zero);
291 tcg_temp_free(control_reg);
292 tcg_temp_free(pval);
293 }
294
295 static void gen_check_store_width(DisasContext *ctx, int slot_num)
296 {
297 #if HEX_DEBUG
298 TCGv slot = tcg_const_tl(slot_num);
299 TCGv check = tcg_const_tl(ctx->store_width[slot_num]);
300 gen_helper_debug_check_store_width(cpu_env, slot, check);
301 tcg_temp_free(slot);
302 tcg_temp_free(check);
303 #endif
304 }
305
306 static bool slot_is_predicated(Packet *pkt, int slot_num)
307 {
308 for (int i = 0; i < pkt->num_insns; i++) {
309 if (pkt->insn[i].slot == slot_num) {
310 return GET_ATTRIB(pkt->insn[i].opcode, A_CONDEXEC);
311 }
312 }
313 /* If we get to here, we didn't find an instruction in the requested slot */
314 g_assert_not_reached();
315 }
316
317 void process_store(DisasContext *ctx, Packet *pkt, int slot_num)
318 {
319 bool is_predicated = slot_is_predicated(pkt, slot_num);
320 TCGLabel *label_end = NULL;
321
322 /*
323 * We may have already processed this store
324 * See CHECK_NOSHUF in macros.h
325 */
326 if (slot_num == 1 && ctx->s1_store_processed) {
327 return;
328 }
329 ctx->s1_store_processed = 1;
330
331 if (is_predicated) {
332 TCGv cancelled = tcg_temp_new();
333 label_end = gen_new_label();
334
335 /* Don't do anything if the slot was cancelled */
336 tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1);
337 tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end);
338 tcg_temp_free(cancelled);
339 }
340 {
341 TCGv address = tcg_temp_local_new();
342 tcg_gen_mov_tl(address, hex_store_addr[slot_num]);
343
344 /*
345 * If we know the width from the DisasContext, we can
346 * generate much cleaner code.
347 * Unfortunately, not all instructions execute the fSTORE
348 * macro during code generation. Anything that uses the
349 * generic helper will have this problem. Instructions
350 * that use fWRAP to generate proper TCG code will be OK.
351 */
352 switch (ctx->store_width[slot_num]) {
353 case 1:
354 gen_check_store_width(ctx, slot_num);
355 tcg_gen_qemu_st8(hex_store_val32[slot_num],
356 hex_store_addr[slot_num],
357 ctx->mem_idx);
358 break;
359 case 2:
360 gen_check_store_width(ctx, slot_num);
361 tcg_gen_qemu_st16(hex_store_val32[slot_num],
362 hex_store_addr[slot_num],
363 ctx->mem_idx);
364 break;
365 case 4:
366 gen_check_store_width(ctx, slot_num);
367 tcg_gen_qemu_st32(hex_store_val32[slot_num],
368 hex_store_addr[slot_num],
369 ctx->mem_idx);
370 break;
371 case 8:
372 gen_check_store_width(ctx, slot_num);
373 tcg_gen_qemu_st64(hex_store_val64[slot_num],
374 hex_store_addr[slot_num],
375 ctx->mem_idx);
376 break;
377 default:
378 {
379 /*
380 * If we get to here, we don't know the width at
381 * TCG generation time, we'll use a helper to
382 * avoid branching based on the width at runtime.
383 */
384 TCGv slot = tcg_const_tl(slot_num);
385 gen_helper_commit_store(cpu_env, slot);
386 tcg_temp_free(slot);
387 }
388 }
389 tcg_temp_free(address);
390 }
391 if (is_predicated) {
392 gen_set_label(label_end);
393 }
394 }
395
396 static void process_store_log(DisasContext *ctx, Packet *pkt)
397 {
398 /*
399 * When a packet has two stores, the hardware processes
400 * slot 1 and then slot 2. This will be important when
401 * the memory accesses overlap.
402 */
403 if (pkt->pkt_has_store_s1 && !pkt->pkt_has_dczeroa) {
404 process_store(ctx, pkt, 1);
405 }
406 if (pkt->pkt_has_store_s0 && !pkt->pkt_has_dczeroa) {
407 process_store(ctx, pkt, 0);
408 }
409 }
410
411 /* Zero out a 32-bit cache line */
412 static void process_dczeroa(DisasContext *ctx, Packet *pkt)
413 {
414 if (pkt->pkt_has_dczeroa) {
415 /* Store 32 bytes of zero starting at (addr & ~0x1f) */
416 TCGv addr = tcg_temp_new();
417 TCGv_i64 zero = tcg_const_i64(0);
418
419 tcg_gen_andi_tl(addr, hex_dczero_addr, ~0x1f);
420 tcg_gen_qemu_st64(zero, addr, ctx->mem_idx);
421 tcg_gen_addi_tl(addr, addr, 8);
422 tcg_gen_qemu_st64(zero, addr, ctx->mem_idx);
423 tcg_gen_addi_tl(addr, addr, 8);
424 tcg_gen_qemu_st64(zero, addr, ctx->mem_idx);
425 tcg_gen_addi_tl(addr, addr, 8);
426 tcg_gen_qemu_st64(zero, addr, ctx->mem_idx);
427
428 tcg_temp_free(addr);
429 tcg_temp_free_i64(zero);
430 }
431 }
432
433 static void update_exec_counters(DisasContext *ctx, Packet *pkt)
434 {
435 int num_insns = pkt->num_insns;
436 int num_real_insns = 0;
437
438 for (int i = 0; i < num_insns; i++) {
439 if (!pkt->insn[i].is_endloop &&
440 !pkt->insn[i].part1 &&
441 !GET_ATTRIB(pkt->insn[i].opcode, A_IT_NOP)) {
442 num_real_insns++;
443 }
444 }
445
446 ctx->num_packets++;
447 ctx->num_insns += num_real_insns;
448 }
449
450 static void gen_exec_counters(DisasContext *ctx)
451 {
452 tcg_gen_addi_tl(hex_gpr[HEX_REG_QEMU_PKT_CNT],
453 hex_gpr[HEX_REG_QEMU_PKT_CNT], ctx->num_packets);
454 tcg_gen_addi_tl(hex_gpr[HEX_REG_QEMU_INSN_CNT],
455 hex_gpr[HEX_REG_QEMU_INSN_CNT], ctx->num_insns);
456 }
457
458 static void gen_commit_packet(DisasContext *ctx, Packet *pkt)
459 {
460 gen_reg_writes(ctx);
461 gen_pred_writes(ctx, pkt);
462 process_store_log(ctx, pkt);
463 process_dczeroa(ctx, pkt);
464 update_exec_counters(ctx, pkt);
465 #if HEX_DEBUG
466 {
467 TCGv has_st0 =
468 tcg_const_tl(pkt->pkt_has_store_s0 && !pkt->pkt_has_dczeroa);
469 TCGv has_st1 =
470 tcg_const_tl(pkt->pkt_has_store_s1 && !pkt->pkt_has_dczeroa);
471
472 /* Handy place to set a breakpoint at the end of execution */
473 gen_helper_debug_commit_end(cpu_env, has_st0, has_st1);
474
475 tcg_temp_free(has_st0);
476 tcg_temp_free(has_st1);
477 }
478 #endif
479
480 if (pkt->pkt_has_cof) {
481 ctx->base.is_jmp = DISAS_NORETURN;
482 }
483 }
484
485 static void decode_and_translate_packet(CPUHexagonState *env, DisasContext *ctx)
486 {
487 uint32_t words[PACKET_WORDS_MAX];
488 int nwords;
489 Packet pkt;
490 int i;
491
492 nwords = read_packet_words(env, ctx, words);
493 if (!nwords) {
494 gen_exception(HEX_EXCP_INVALID_PACKET);
495 ctx->base.is_jmp = DISAS_NORETURN;
496 return;
497 }
498
499 if (decode_packet(nwords, words, &pkt, false) > 0) {
500 HEX_DEBUG_PRINT_PKT(&pkt);
501 gen_start_packet(ctx, &pkt);
502 for (i = 0; i < pkt.num_insns; i++) {
503 gen_insn(env, ctx, &pkt.insn[i], &pkt);
504 }
505 gen_commit_packet(ctx, &pkt);
506 ctx->base.pc_next += pkt.encod_pkt_size_in_bytes;
507 } else {
508 gen_exception(HEX_EXCP_INVALID_PACKET);
509 ctx->base.is_jmp = DISAS_NORETURN;
510 }
511 }
512
513 static void hexagon_tr_init_disas_context(DisasContextBase *dcbase,
514 CPUState *cs)
515 {
516 DisasContext *ctx = container_of(dcbase, DisasContext, base);
517
518 ctx->mem_idx = MMU_USER_IDX;
519 ctx->num_packets = 0;
520 ctx->num_insns = 0;
521 }
522
523 static void hexagon_tr_tb_start(DisasContextBase *db, CPUState *cpu)
524 {
525 }
526
527 static void hexagon_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
528 {
529 DisasContext *ctx = container_of(dcbase, DisasContext, base);
530
531 tcg_gen_insn_start(ctx->base.pc_next);
532 }
533
534 static bool hexagon_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
535 const CPUBreakpoint *bp)
536 {
537 DisasContext *ctx = container_of(dcbase, DisasContext, base);
538
539 tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->base.pc_next);
540 ctx->base.is_jmp = DISAS_NORETURN;
541 gen_exception_debug();
542 /*
543 * The address covered by the breakpoint must be included in
544 * [tb->pc, tb->pc + tb->size) in order to for it to be
545 * properly cleared -- thus we increment the PC here so that
546 * the logic setting tb->size below does the right thing.
547 */
548 ctx->base.pc_next += 4;
549 return true;
550 }
551
552 static bool pkt_crosses_page(CPUHexagonState *env, DisasContext *ctx)
553 {
554 target_ulong page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
555 bool found_end = false;
556 int nwords;
557
558 for (nwords = 0; !found_end && nwords < PACKET_WORDS_MAX; nwords++) {
559 uint32_t word = cpu_ldl_code(env,
560 ctx->base.pc_next + nwords * sizeof(uint32_t));
561 found_end = is_packet_end(word);
562 }
563 uint32_t next_ptr = ctx->base.pc_next + nwords * sizeof(uint32_t);
564 return found_end && next_ptr - page_start >= TARGET_PAGE_SIZE;
565 }
566
567 static void hexagon_tr_translate_packet(DisasContextBase *dcbase, CPUState *cpu)
568 {
569 DisasContext *ctx = container_of(dcbase, DisasContext, base);
570 CPUHexagonState *env = cpu->env_ptr;
571
572 decode_and_translate_packet(env, ctx);
573
574 if (ctx->base.is_jmp == DISAS_NEXT) {
575 target_ulong page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
576 target_ulong bytes_max = PACKET_WORDS_MAX * sizeof(target_ulong);
577
578 if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE ||
579 (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE - bytes_max &&
580 pkt_crosses_page(env, ctx))) {
581 ctx->base.is_jmp = DISAS_TOO_MANY;
582 }
583
584 /*
585 * The CPU log is used to compare against LLDB single stepping,
586 * so end the TLB after every packet.
587 */
588 HexagonCPU *hex_cpu = container_of(env, HexagonCPU, env);
589 if (hex_cpu->lldb_compat && qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
590 ctx->base.is_jmp = DISAS_TOO_MANY;
591 }
592 }
593 }
594
595 static void hexagon_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
596 {
597 DisasContext *ctx = container_of(dcbase, DisasContext, base);
598
599 switch (ctx->base.is_jmp) {
600 case DISAS_TOO_MANY:
601 gen_exec_counters(ctx);
602 tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->base.pc_next);
603 if (ctx->base.singlestep_enabled) {
604 gen_exception_debug();
605 } else {
606 tcg_gen_exit_tb(NULL, 0);
607 }
608 break;
609 case DISAS_NORETURN:
610 gen_exec_counters(ctx);
611 tcg_gen_mov_tl(hex_gpr[HEX_REG_PC], hex_next_PC);
612 if (ctx->base.singlestep_enabled) {
613 gen_exception_debug();
614 } else {
615 tcg_gen_exit_tb(NULL, 0);
616 }
617 break;
618 default:
619 g_assert_not_reached();
620 }
621 }
622
623 static void hexagon_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
624 {
625 qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
626 log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
627 }
628
629
630 static const TranslatorOps hexagon_tr_ops = {
631 .init_disas_context = hexagon_tr_init_disas_context,
632 .tb_start = hexagon_tr_tb_start,
633 .insn_start = hexagon_tr_insn_start,
634 .breakpoint_check = hexagon_tr_breakpoint_check,
635 .translate_insn = hexagon_tr_translate_packet,
636 .tb_stop = hexagon_tr_tb_stop,
637 .disas_log = hexagon_tr_disas_log,
638 };
639
640 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
641 {
642 DisasContext ctx;
643
644 translator_loop(&hexagon_tr_ops, &ctx.base, cs, tb, max_insns);
645 }
646
647 #define NAME_LEN 64
648 static char new_value_names[TOTAL_PER_THREAD_REGS][NAME_LEN];
649 #if HEX_DEBUG
650 static char reg_written_names[TOTAL_PER_THREAD_REGS][NAME_LEN];
651 #endif
652 static char new_pred_value_names[NUM_PREGS][NAME_LEN];
653 static char store_addr_names[STORES_MAX][NAME_LEN];
654 static char store_width_names[STORES_MAX][NAME_LEN];
655 static char store_val32_names[STORES_MAX][NAME_LEN];
656 static char store_val64_names[STORES_MAX][NAME_LEN];
657
658 void hexagon_translate_init(void)
659 {
660 int i;
661
662 opcode_init();
663
664 #if HEX_DEBUG
665 if (!qemu_logfile) {
666 qemu_set_log(qemu_loglevel);
667 }
668 #endif
669
670 for (i = 0; i < TOTAL_PER_THREAD_REGS; i++) {
671 hex_gpr[i] = tcg_global_mem_new(cpu_env,
672 offsetof(CPUHexagonState, gpr[i]),
673 hexagon_regnames[i]);
674
675 snprintf(new_value_names[i], NAME_LEN, "new_%s", hexagon_regnames[i]);
676 hex_new_value[i] = tcg_global_mem_new(cpu_env,
677 offsetof(CPUHexagonState, new_value[i]),
678 new_value_names[i]);
679
680 #if HEX_DEBUG
681 snprintf(reg_written_names[i], NAME_LEN, "reg_written_%s",
682 hexagon_regnames[i]);
683 hex_reg_written[i] = tcg_global_mem_new(cpu_env,
684 offsetof(CPUHexagonState, reg_written[i]),
685 reg_written_names[i]);
686 #endif
687 }
688 for (i = 0; i < NUM_PREGS; i++) {
689 hex_pred[i] = tcg_global_mem_new(cpu_env,
690 offsetof(CPUHexagonState, pred[i]),
691 hexagon_prednames[i]);
692
693 snprintf(new_pred_value_names[i], NAME_LEN, "new_pred_%s",
694 hexagon_prednames[i]);
695 hex_new_pred_value[i] = tcg_global_mem_new(cpu_env,
696 offsetof(CPUHexagonState, new_pred_value[i]),
697 new_pred_value_names[i]);
698 }
699 hex_pred_written = tcg_global_mem_new(cpu_env,
700 offsetof(CPUHexagonState, pred_written), "pred_written");
701 hex_next_PC = tcg_global_mem_new(cpu_env,
702 offsetof(CPUHexagonState, next_PC), "next_PC");
703 hex_this_PC = tcg_global_mem_new(cpu_env,
704 offsetof(CPUHexagonState, this_PC), "this_PC");
705 hex_slot_cancelled = tcg_global_mem_new(cpu_env,
706 offsetof(CPUHexagonState, slot_cancelled), "slot_cancelled");
707 hex_branch_taken = tcg_global_mem_new(cpu_env,
708 offsetof(CPUHexagonState, branch_taken), "branch_taken");
709 hex_pkt_has_store_s1 = tcg_global_mem_new(cpu_env,
710 offsetof(CPUHexagonState, pkt_has_store_s1), "pkt_has_store_s1");
711 hex_dczero_addr = tcg_global_mem_new(cpu_env,
712 offsetof(CPUHexagonState, dczero_addr), "dczero_addr");
713 hex_llsc_addr = tcg_global_mem_new(cpu_env,
714 offsetof(CPUHexagonState, llsc_addr), "llsc_addr");
715 hex_llsc_val = tcg_global_mem_new(cpu_env,
716 offsetof(CPUHexagonState, llsc_val), "llsc_val");
717 hex_llsc_val_i64 = tcg_global_mem_new_i64(cpu_env,
718 offsetof(CPUHexagonState, llsc_val_i64), "llsc_val_i64");
719 for (i = 0; i < STORES_MAX; i++) {
720 snprintf(store_addr_names[i], NAME_LEN, "store_addr_%d", i);
721 hex_store_addr[i] = tcg_global_mem_new(cpu_env,
722 offsetof(CPUHexagonState, mem_log_stores[i].va),
723 store_addr_names[i]);
724
725 snprintf(store_width_names[i], NAME_LEN, "store_width_%d", i);
726 hex_store_width[i] = tcg_global_mem_new(cpu_env,
727 offsetof(CPUHexagonState, mem_log_stores[i].width),
728 store_width_names[i]);
729
730 snprintf(store_val32_names[i], NAME_LEN, "store_val32_%d", i);
731 hex_store_val32[i] = tcg_global_mem_new(cpu_env,
732 offsetof(CPUHexagonState, mem_log_stores[i].data32),
733 store_val32_names[i]);
734
735 snprintf(store_val64_names[i], NAME_LEN, "store_val64_%d", i);
736 hex_store_val64[i] = tcg_global_mem_new_i64(cpu_env,
737 offsetof(CPUHexagonState, mem_log_stores[i].data64),
738 store_val64_names[i]);
739 }
740 }