]> git.proxmox.com Git - mirror_qemu.git/blob - target-tilegx/translate.c
target-tilegx: Handle unconditional jump instructions
[mirror_qemu.git] / target-tilegx / translate.c
1 /*
2 * QEMU TILE-Gx CPU
3 *
4 * Copyright (c) 2015 Chen Gang
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see
18 * <http://www.gnu.org/licenses/lgpl-2.1.html>
19 */
20
21 #include "cpu.h"
22 #include "qemu/log.h"
23 #include "disas/disas.h"
24 #include "tcg-op.h"
25 #include "exec/cpu_ldst.h"
26 #include "opcode_tilegx.h"
27
28 #define FMT64X "%016" PRIx64
29
30 static TCGv_ptr cpu_env;
31 static TCGv cpu_pc;
32 static TCGv cpu_regs[TILEGX_R_COUNT];
33
34 static const char * const reg_names[64] = {
35 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
36 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
37 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
38 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
39 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
40 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
41 "r48", "r49", "r50", "r51", "bp", "tp", "sp", "lr",
42 "sn", "idn0", "idn1", "udn0", "udn1", "udn2", "udn2", "zero"
43 };
44
45 /* Modified registers are cached in temporaries until the end of the bundle. */
46 typedef struct {
47 unsigned reg;
48 TCGv val;
49 } DisasContextTemp;
50
51 #define MAX_WRITEBACK 4
52
53 /* This is the state at translation time. */
54 typedef struct {
55 uint64_t pc; /* Current pc */
56
57 TCGv zero; /* For zero register */
58
59 DisasContextTemp wb[MAX_WRITEBACK];
60 int num_wb;
61 int mmuidx;
62 bool exit_tb;
63
64 struct {
65 TCGCond cond; /* branch condition */
66 TCGv dest; /* branch destination */
67 TCGv val1; /* value to be compared against zero, for cond */
68 } jmp; /* Jump object, only once in each TB block */
69 } DisasContext;
70
71 #include "exec/gen-icount.h"
72
73 /* Differentiate the various pipe encodings. */
74 #define TY_X0 0
75 #define TY_X1 1
76 #define TY_Y0 2
77 #define TY_Y1 3
78
79 /* Remerge the base opcode and extension fields for switching.
80 The X opcode fields are 3 bits; Y0/Y1 opcode fields are 4 bits;
81 Y2 opcode field is 2 bits. */
82 #define OE(OP, EXT, XY) (TY_##XY + OP * 4 + EXT * 64)
83
84 /* Similar, but for Y2 only. */
85 #define OEY2(OP, MODE) (OP + MODE * 4)
86
87 /* Similar, but make sure opcode names match up. */
88 #define OE_RR_X0(E) OE(RRR_0_OPCODE_X0, E##_UNARY_OPCODE_X0, X0)
89 #define OE_RR_X1(E) OE(RRR_0_OPCODE_X1, E##_UNARY_OPCODE_X1, X1)
90 #define OE_RR_Y0(E) OE(RRR_1_OPCODE_Y0, E##_UNARY_OPCODE_Y0, Y0)
91 #define OE_RR_Y1(E) OE(RRR_1_OPCODE_Y1, E##_UNARY_OPCODE_Y1, Y1)
92 #define OE_RRR(E,N,XY) OE(RRR_##N##_OPCODE_##XY, E##_RRR_##N##_OPCODE_##XY, XY)
93 #define OE_IM(E,XY) OE(IMM8_OPCODE_##XY, E##_IMM8_OPCODE_##XY, XY)
94 #define OE_SH(E,XY) OE(SHIFT_OPCODE_##XY, E##_SHIFT_OPCODE_##XY, XY)
95
96
97 static void gen_exception(DisasContext *dc, TileExcp num)
98 {
99 TCGv_i32 tmp;
100
101 tcg_gen_movi_tl(cpu_pc, dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
102
103 tmp = tcg_const_i32(num);
104 gen_helper_exception(cpu_env, tmp);
105 tcg_temp_free_i32(tmp);
106 dc->exit_tb = true;
107 }
108
109 static bool check_gr(DisasContext *dc, uint8_t reg)
110 {
111 if (likely(reg < TILEGX_R_COUNT)) {
112 return true;
113 }
114
115 switch (reg) {
116 case TILEGX_R_SN:
117 case TILEGX_R_ZERO:
118 break;
119 case TILEGX_R_IDN0:
120 case TILEGX_R_IDN1:
121 gen_exception(dc, TILEGX_EXCP_REG_IDN_ACCESS);
122 break;
123 case TILEGX_R_UDN0:
124 case TILEGX_R_UDN1:
125 case TILEGX_R_UDN2:
126 case TILEGX_R_UDN3:
127 gen_exception(dc, TILEGX_EXCP_REG_UDN_ACCESS);
128 break;
129 default:
130 g_assert_not_reached();
131 }
132 return false;
133 }
134
135 static TCGv load_zero(DisasContext *dc)
136 {
137 if (TCGV_IS_UNUSED_I64(dc->zero)) {
138 dc->zero = tcg_const_i64(0);
139 }
140 return dc->zero;
141 }
142
143 static TCGv load_gr(DisasContext *dc, unsigned reg)
144 {
145 if (check_gr(dc, reg)) {
146 return cpu_regs[reg];
147 }
148 return load_zero(dc);
149 }
150
151 static TCGv dest_gr(DisasContext *dc, unsigned reg)
152 {
153 int n;
154
155 /* Skip the result, mark the exception if necessary, and continue */
156 check_gr(dc, reg);
157
158 n = dc->num_wb++;
159 dc->wb[n].reg = reg;
160 return dc->wb[n].val = tcg_temp_new_i64();
161 }
162
163 static void gen_saturate_op(TCGv tdest, TCGv tsrca, TCGv tsrcb,
164 void (*operate)(TCGv, TCGv, TCGv))
165 {
166 TCGv t0 = tcg_temp_new();
167
168 tcg_gen_ext32s_tl(tdest, tsrca);
169 tcg_gen_ext32s_tl(t0, tsrcb);
170 operate(tdest, tdest, t0);
171
172 tcg_gen_movi_tl(t0, 0x7fffffff);
173 tcg_gen_movcond_tl(TCG_COND_GT, tdest, tdest, t0, t0, tdest);
174 tcg_gen_movi_tl(t0, -0x80000000LL);
175 tcg_gen_movcond_tl(TCG_COND_LT, tdest, tdest, t0, t0, tdest);
176
177 tcg_temp_free(t0);
178 }
179
180 /* Shift the 128-bit value TSRCA:TSRCD right by the number of bytes
181 specified by the bottom 3 bits of TSRCB, and set TDEST to the
182 low 64 bits of the resulting value. */
183 static void gen_dblalign(TCGv tdest, TCGv tsrcd, TCGv tsrca, TCGv tsrcb)
184 {
185 TCGv t0 = tcg_temp_new();
186
187 tcg_gen_andi_tl(t0, tsrcb, 7);
188 tcg_gen_shli_tl(t0, t0, 3);
189 tcg_gen_shr_tl(tdest, tsrcd, t0);
190
191 /* We want to do "t0 = tsrca << (64 - t0)". Two's complement
192 arithmetic on a 6-bit field tells us that 64 - t0 is equal
193 to (t0 ^ 63) + 1. So we can do the shift in two parts,
194 neither of which will be an invalid shift by 64. */
195 tcg_gen_xori_tl(t0, t0, 63);
196 tcg_gen_shl_tl(t0, tsrca, t0);
197 tcg_gen_shli_tl(t0, t0, 1);
198 tcg_gen_or_tl(tdest, tdest, t0);
199
200 tcg_temp_free(t0);
201 }
202
203 /* Similarly, except that the 128-bit value is TSRCA:TSRCB, and the
204 right shift is an immediate. */
205 static void gen_dblaligni(TCGv tdest, TCGv tsrca, TCGv tsrcb, int shr)
206 {
207 TCGv t0 = tcg_temp_new();
208
209 tcg_gen_shri_tl(t0, tsrcb, shr);
210 tcg_gen_shli_tl(tdest, tsrca, 64 - shr);
211 tcg_gen_or_tl(tdest, tdest, t0);
212
213 tcg_temp_free(t0);
214 }
215
216 static TileExcp gen_st_opcode(DisasContext *dc, unsigned dest, unsigned srca,
217 unsigned srcb, TCGMemOp memop, const char *name)
218 {
219 if (dest) {
220 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
221 }
222
223 tcg_gen_qemu_st_tl(load_gr(dc, srcb), load_gr(dc, srca),
224 dc->mmuidx, memop);
225
226 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s", name,
227 reg_names[srca], reg_names[srcb]);
228 return TILEGX_EXCP_NONE;
229 }
230
231 static TileExcp gen_st_add_opcode(DisasContext *dc, unsigned srca, unsigned srcb,
232 int imm, TCGMemOp memop, const char *name)
233 {
234 TCGv tsrca = load_gr(dc, srca);
235 TCGv tsrcb = load_gr(dc, srcb);
236
237 tcg_gen_qemu_st_tl(tsrcb, tsrca, dc->mmuidx, memop);
238 tcg_gen_addi_tl(dest_gr(dc, srca), tsrca, imm);
239
240 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %d", name,
241 reg_names[srca], reg_names[srcb], imm);
242 return TILEGX_EXCP_NONE;
243 }
244
245 static TileExcp gen_rr_opcode(DisasContext *dc, unsigned opext,
246 unsigned dest, unsigned srca)
247 {
248 TCGv tdest, tsrca;
249 const char *mnemonic;
250 TCGMemOp memop;
251
252 /* Eliminate nops and jumps before doing anything else. */
253 switch (opext) {
254 case OE_RR_Y0(NOP):
255 case OE_RR_Y1(NOP):
256 case OE_RR_X0(NOP):
257 case OE_RR_X1(NOP):
258 mnemonic = "nop";
259 goto do_nop;
260 case OE_RR_Y0(FNOP):
261 case OE_RR_Y1(FNOP):
262 case OE_RR_X0(FNOP):
263 case OE_RR_X1(FNOP):
264 mnemonic = "fnop";
265 do_nop:
266 if (srca || dest) {
267 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
268 }
269 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s", mnemonic);
270 return TILEGX_EXCP_NONE;
271
272 case OE_RR_X1(JRP):
273 case OE_RR_Y1(JRP):
274 mnemonic = "jrp";
275 goto do_jr;
276 case OE_RR_X1(JR):
277 case OE_RR_Y1(JR):
278 mnemonic = "jr";
279 goto do_jr;
280 case OE_RR_X1(JALRP):
281 case OE_RR_Y1(JALRP):
282 mnemonic = "jalrp";
283 goto do_jalr;
284 case OE_RR_X1(JALR):
285 case OE_RR_Y1(JALR):
286 mnemonic = "jalr";
287 do_jalr:
288 tcg_gen_movi_tl(dest_gr(dc, TILEGX_R_LR),
289 dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
290 do_jr:
291 dc->jmp.cond = TCG_COND_ALWAYS;
292 dc->jmp.dest = tcg_temp_new();
293 tcg_gen_andi_tl(dc->jmp.dest, load_gr(dc, srca), ~7);
294 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s", mnemonic, reg_names[srca]);
295 return TILEGX_EXCP_NONE;
296 }
297
298 tdest = dest_gr(dc, dest);
299 tsrca = load_gr(dc, srca);
300
301 switch (opext) {
302 case OE_RR_X0(CNTLZ):
303 case OE_RR_Y0(CNTLZ):
304 gen_helper_cntlz(tdest, tsrca);
305 mnemonic = "cntlz";
306 break;
307 case OE_RR_X0(CNTTZ):
308 case OE_RR_Y0(CNTTZ):
309 gen_helper_cnttz(tdest, tsrca);
310 mnemonic = "cnttz";
311 break;
312 case OE_RR_X1(DRAIN):
313 case OE_RR_X1(DTLBPR):
314 case OE_RR_X1(FINV):
315 case OE_RR_X1(FLUSHWB):
316 case OE_RR_X1(FLUSH):
317 case OE_RR_X0(FSINGLE_PACK1):
318 case OE_RR_Y0(FSINGLE_PACK1):
319 case OE_RR_X1(ICOH):
320 case OE_RR_X1(ILL):
321 case OE_RR_Y1(ILL):
322 case OE_RR_X1(INV):
323 case OE_RR_X1(IRET):
324 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
325 case OE_RR_X1(LD1S):
326 memop = MO_SB;
327 mnemonic = "ld1s";
328 goto do_load;
329 case OE_RR_X1(LD1U):
330 memop = MO_UB;
331 mnemonic = "ld1u";
332 goto do_load;
333 case OE_RR_X1(LD2S):
334 memop = MO_TESW;
335 mnemonic = "ld2s";
336 goto do_load;
337 case OE_RR_X1(LD2U):
338 memop = MO_TEUW;
339 mnemonic = "ld2u";
340 goto do_load;
341 case OE_RR_X1(LD4S):
342 memop = MO_TESL;
343 mnemonic = "ld4s";
344 goto do_load;
345 case OE_RR_X1(LD4U):
346 memop = MO_TEUL;
347 mnemonic = "ld4u";
348 goto do_load;
349 case OE_RR_X1(LDNT1S):
350 memop = MO_SB;
351 mnemonic = "ldnt1s";
352 goto do_load;
353 case OE_RR_X1(LDNT1U):
354 memop = MO_UB;
355 mnemonic = "ldnt1u";
356 goto do_load;
357 case OE_RR_X1(LDNT2S):
358 memop = MO_TESW;
359 mnemonic = "ldnt2s";
360 goto do_load;
361 case OE_RR_X1(LDNT2U):
362 memop = MO_TEUW;
363 mnemonic = "ldnt2u";
364 goto do_load;
365 case OE_RR_X1(LDNT4S):
366 memop = MO_TESL;
367 mnemonic = "ldnt4s";
368 goto do_load;
369 case OE_RR_X1(LDNT4U):
370 memop = MO_TEUL;
371 mnemonic = "ldnt4u";
372 goto do_load;
373 case OE_RR_X1(LDNT):
374 memop = MO_TEQ;
375 mnemonic = "ldnt";
376 goto do_load;
377 case OE_RR_X1(LD):
378 memop = MO_TEQ;
379 mnemonic = "ld";
380 do_load:
381 tcg_gen_qemu_ld_tl(tdest, tsrca, dc->mmuidx, memop);
382 break;
383 case OE_RR_X1(LDNA):
384 tcg_gen_andi_tl(tdest, tsrca, ~7);
385 tcg_gen_qemu_ld_tl(tdest, tdest, dc->mmuidx, MO_TEQ);
386 mnemonic = "ldna";
387 break;
388 case OE_RR_X1(LNK):
389 case OE_RR_Y1(LNK):
390 if (srca) {
391 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
392 }
393 tcg_gen_movi_tl(tdest, dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
394 mnemonic = "lnk";
395 break;
396 case OE_RR_X1(MF):
397 case OE_RR_X1(NAP):
398 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
399 case OE_RR_X0(PCNT):
400 case OE_RR_Y0(PCNT):
401 gen_helper_pcnt(tdest, tsrca);
402 mnemonic = "pcnt";
403 break;
404 case OE_RR_X0(REVBITS):
405 case OE_RR_Y0(REVBITS):
406 gen_helper_revbits(tdest, tsrca);
407 mnemonic = "revbits";
408 break;
409 case OE_RR_X0(REVBYTES):
410 case OE_RR_Y0(REVBYTES):
411 tcg_gen_bswap64_tl(tdest, tsrca);
412 mnemonic = "revbytes";
413 break;
414 case OE_RR_X1(SWINT0):
415 case OE_RR_X1(SWINT1):
416 case OE_RR_X1(SWINT2):
417 case OE_RR_X1(SWINT3):
418 case OE_RR_X0(TBLIDXB0):
419 case OE_RR_Y0(TBLIDXB0):
420 case OE_RR_X0(TBLIDXB1):
421 case OE_RR_Y0(TBLIDXB1):
422 case OE_RR_X0(TBLIDXB2):
423 case OE_RR_Y0(TBLIDXB2):
424 case OE_RR_X0(TBLIDXB3):
425 case OE_RR_Y0(TBLIDXB3):
426 case OE_RR_X1(WH64):
427 default:
428 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
429 }
430
431 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s", mnemonic,
432 reg_names[dest], reg_names[srca]);
433 return TILEGX_EXCP_NONE;
434 }
435
436 static TileExcp gen_rrr_opcode(DisasContext *dc, unsigned opext,
437 unsigned dest, unsigned srca, unsigned srcb)
438 {
439 TCGv tdest = dest_gr(dc, dest);
440 TCGv tsrca = load_gr(dc, srca);
441 TCGv tsrcb = load_gr(dc, srcb);
442 const char *mnemonic;
443
444 switch (opext) {
445 case OE_RRR(ADDXSC, 0, X0):
446 case OE_RRR(ADDXSC, 0, X1):
447 gen_saturate_op(tdest, tsrca, tsrcb, tcg_gen_add_tl);
448 mnemonic = "addxsc";
449 break;
450 case OE_RRR(ADDX, 0, X0):
451 case OE_RRR(ADDX, 0, X1):
452 case OE_RRR(ADDX, 0, Y0):
453 case OE_RRR(ADDX, 0, Y1):
454 tcg_gen_add_tl(tdest, tsrca, tsrcb);
455 tcg_gen_ext32s_tl(tdest, tdest);
456 mnemonic = "addx";
457 break;
458 case OE_RRR(ADD, 0, X0):
459 case OE_RRR(ADD, 0, X1):
460 case OE_RRR(ADD, 0, Y0):
461 case OE_RRR(ADD, 0, Y1):
462 tcg_gen_add_tl(tdest, tsrca, tsrcb);
463 mnemonic = "add";
464 break;
465 case OE_RRR(AND, 0, X0):
466 case OE_RRR(AND, 0, X1):
467 case OE_RRR(AND, 5, Y0):
468 case OE_RRR(AND, 5, Y1):
469 tcg_gen_and_tl(tdest, tsrca, tsrcb);
470 mnemonic = "and";
471 break;
472 case OE_RRR(CMOVEQZ, 0, X0):
473 case OE_RRR(CMOVEQZ, 4, Y0):
474 case OE_RRR(CMOVNEZ, 0, X0):
475 case OE_RRR(CMOVNEZ, 4, Y0):
476 case OE_RRR(CMPEQ, 0, X0):
477 case OE_RRR(CMPEQ, 0, X1):
478 case OE_RRR(CMPEQ, 3, Y0):
479 case OE_RRR(CMPEQ, 3, Y1):
480 case OE_RRR(CMPEXCH4, 0, X1):
481 case OE_RRR(CMPEXCH, 0, X1):
482 case OE_RRR(CMPLES, 0, X0):
483 case OE_RRR(CMPLES, 0, X1):
484 case OE_RRR(CMPLES, 2, Y0):
485 case OE_RRR(CMPLES, 2, Y1):
486 case OE_RRR(CMPLEU, 0, X0):
487 case OE_RRR(CMPLEU, 0, X1):
488 case OE_RRR(CMPLEU, 2, Y0):
489 case OE_RRR(CMPLEU, 2, Y1):
490 case OE_RRR(CMPLTS, 0, X0):
491 case OE_RRR(CMPLTS, 0, X1):
492 case OE_RRR(CMPLTS, 2, Y0):
493 case OE_RRR(CMPLTS, 2, Y1):
494 case OE_RRR(CMPLTU, 0, X0):
495 case OE_RRR(CMPLTU, 0, X1):
496 case OE_RRR(CMPLTU, 2, Y0):
497 case OE_RRR(CMPLTU, 2, Y1):
498 case OE_RRR(CMPNE, 0, X0):
499 case OE_RRR(CMPNE, 0, X1):
500 case OE_RRR(CMPNE, 3, Y0):
501 case OE_RRR(CMPNE, 3, Y1):
502 case OE_RRR(CMULAF, 0, X0):
503 case OE_RRR(CMULA, 0, X0):
504 case OE_RRR(CMULFR, 0, X0):
505 case OE_RRR(CMULF, 0, X0):
506 case OE_RRR(CMULHR, 0, X0):
507 case OE_RRR(CMULH, 0, X0):
508 case OE_RRR(CMUL, 0, X0):
509 case OE_RRR(CRC32_32, 0, X0):
510 case OE_RRR(CRC32_8, 0, X0):
511 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
512 case OE_RRR(DBLALIGN2, 0, X0):
513 case OE_RRR(DBLALIGN2, 0, X1):
514 gen_dblaligni(tdest, tsrca, tsrcb, 16);
515 mnemonic = "dblalign2";
516 break;
517 case OE_RRR(DBLALIGN4, 0, X0):
518 case OE_RRR(DBLALIGN4, 0, X1):
519 gen_dblaligni(tdest, tsrca, tsrcb, 32);
520 mnemonic = "dblalign4";
521 break;
522 case OE_RRR(DBLALIGN6, 0, X0):
523 case OE_RRR(DBLALIGN6, 0, X1):
524 gen_dblaligni(tdest, tsrca, tsrcb, 48);
525 mnemonic = "dblalign6";
526 break;
527 case OE_RRR(DBLALIGN, 0, X0):
528 gen_dblalign(tdest, load_gr(dc, dest), tsrca, tsrcb);
529 mnemonic = "dblalign";
530 break;
531 case OE_RRR(EXCH4, 0, X1):
532 case OE_RRR(EXCH, 0, X1):
533 case OE_RRR(FDOUBLE_ADDSUB, 0, X0):
534 case OE_RRR(FDOUBLE_ADD_FLAGS, 0, X0):
535 case OE_RRR(FDOUBLE_MUL_FLAGS, 0, X0):
536 case OE_RRR(FDOUBLE_PACK1, 0, X0):
537 case OE_RRR(FDOUBLE_PACK2, 0, X0):
538 case OE_RRR(FDOUBLE_SUB_FLAGS, 0, X0):
539 case OE_RRR(FDOUBLE_UNPACK_MAX, 0, X0):
540 case OE_RRR(FDOUBLE_UNPACK_MIN, 0, X0):
541 case OE_RRR(FETCHADD4, 0, X1):
542 case OE_RRR(FETCHADDGEZ4, 0, X1):
543 case OE_RRR(FETCHADDGEZ, 0, X1):
544 case OE_RRR(FETCHADD, 0, X1):
545 case OE_RRR(FETCHAND4, 0, X1):
546 case OE_RRR(FETCHAND, 0, X1):
547 case OE_RRR(FETCHOR4, 0, X1):
548 case OE_RRR(FETCHOR, 0, X1):
549 case OE_RRR(FSINGLE_ADD1, 0, X0):
550 case OE_RRR(FSINGLE_ADDSUB2, 0, X0):
551 case OE_RRR(FSINGLE_MUL1, 0, X0):
552 case OE_RRR(FSINGLE_MUL2, 0, X0):
553 case OE_RRR(FSINGLE_PACK2, 0, X0):
554 case OE_RRR(FSINGLE_SUB1, 0, X0):
555 case OE_RRR(MNZ, 0, X0):
556 case OE_RRR(MNZ, 0, X1):
557 case OE_RRR(MNZ, 4, Y0):
558 case OE_RRR(MNZ, 4, Y1):
559 case OE_RRR(MULAX, 0, X0):
560 case OE_RRR(MULAX, 3, Y0):
561 case OE_RRR(MULA_HS_HS, 0, X0):
562 case OE_RRR(MULA_HS_HS, 9, Y0):
563 case OE_RRR(MULA_HS_HU, 0, X0):
564 case OE_RRR(MULA_HS_LS, 0, X0):
565 case OE_RRR(MULA_HS_LU, 0, X0):
566 case OE_RRR(MULA_HU_HU, 0, X0):
567 case OE_RRR(MULA_HU_HU, 9, Y0):
568 case OE_RRR(MULA_HU_LS, 0, X0):
569 case OE_RRR(MULA_HU_LU, 0, X0):
570 case OE_RRR(MULA_LS_LS, 0, X0):
571 case OE_RRR(MULA_LS_LS, 9, Y0):
572 case OE_RRR(MULA_LS_LU, 0, X0):
573 case OE_RRR(MULA_LU_LU, 0, X0):
574 case OE_RRR(MULA_LU_LU, 9, Y0):
575 case OE_RRR(MULX, 0, X0):
576 case OE_RRR(MULX, 3, Y0):
577 case OE_RRR(MUL_HS_HS, 0, X0):
578 case OE_RRR(MUL_HS_HS, 8, Y0):
579 case OE_RRR(MUL_HS_HU, 0, X0):
580 case OE_RRR(MUL_HS_LS, 0, X0):
581 case OE_RRR(MUL_HS_LU, 0, X0):
582 case OE_RRR(MUL_HU_HU, 0, X0):
583 case OE_RRR(MUL_HU_HU, 8, Y0):
584 case OE_RRR(MUL_HU_LS, 0, X0):
585 case OE_RRR(MUL_HU_LU, 0, X0):
586 case OE_RRR(MUL_LS_LS, 0, X0):
587 case OE_RRR(MUL_LS_LS, 8, Y0):
588 case OE_RRR(MUL_LS_LU, 0, X0):
589 case OE_RRR(MUL_LU_LU, 0, X0):
590 case OE_RRR(MUL_LU_LU, 8, Y0):
591 case OE_RRR(MZ, 0, X0):
592 case OE_RRR(MZ, 0, X1):
593 case OE_RRR(MZ, 4, Y0):
594 case OE_RRR(MZ, 4, Y1):
595 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
596 case OE_RRR(NOR, 0, X0):
597 case OE_RRR(NOR, 0, X1):
598 case OE_RRR(NOR, 5, Y0):
599 case OE_RRR(NOR, 5, Y1):
600 tcg_gen_nor_tl(tdest, tsrca, tsrcb);
601 mnemonic = "nor";
602 break;
603 case OE_RRR(OR, 0, X0):
604 case OE_RRR(OR, 0, X1):
605 case OE_RRR(OR, 5, Y0):
606 case OE_RRR(OR, 5, Y1):
607 tcg_gen_or_tl(tdest, tsrca, tsrcb);
608 mnemonic = "or";
609 break;
610 case OE_RRR(ROTL, 0, X0):
611 case OE_RRR(ROTL, 0, X1):
612 case OE_RRR(ROTL, 6, Y0):
613 case OE_RRR(ROTL, 6, Y1):
614 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
615 case OE_RRR(SHL1ADDX, 0, X0):
616 case OE_RRR(SHL1ADDX, 0, X1):
617 case OE_RRR(SHL1ADDX, 7, Y0):
618 case OE_RRR(SHL1ADDX, 7, Y1):
619 tcg_gen_shli_tl(tdest, tsrca, 1);
620 tcg_gen_add_tl(tdest, tdest, tsrcb);
621 tcg_gen_ext32s_tl(tdest, tdest);
622 mnemonic = "shl1addx";
623 break;
624 case OE_RRR(SHL1ADD, 0, X0):
625 case OE_RRR(SHL1ADD, 0, X1):
626 case OE_RRR(SHL1ADD, 1, Y0):
627 case OE_RRR(SHL1ADD, 1, Y1):
628 tcg_gen_shli_tl(tdest, tsrca, 1);
629 tcg_gen_add_tl(tdest, tdest, tsrcb);
630 mnemonic = "shl1add";
631 break;
632 case OE_RRR(SHL2ADDX, 0, X0):
633 case OE_RRR(SHL2ADDX, 0, X1):
634 case OE_RRR(SHL2ADDX, 7, Y0):
635 case OE_RRR(SHL2ADDX, 7, Y1):
636 tcg_gen_shli_tl(tdest, tsrca, 2);
637 tcg_gen_add_tl(tdest, tdest, tsrcb);
638 tcg_gen_ext32s_tl(tdest, tdest);
639 mnemonic = "shl2addx";
640 break;
641 case OE_RRR(SHL2ADD, 0, X0):
642 case OE_RRR(SHL2ADD, 0, X1):
643 case OE_RRR(SHL2ADD, 1, Y0):
644 case OE_RRR(SHL2ADD, 1, Y1):
645 tcg_gen_shli_tl(tdest, tsrca, 2);
646 tcg_gen_add_tl(tdest, tdest, tsrcb);
647 mnemonic = "shl2add";
648 break;
649 case OE_RRR(SHL3ADDX, 0, X0):
650 case OE_RRR(SHL3ADDX, 0, X1):
651 case OE_RRR(SHL3ADDX, 7, Y0):
652 case OE_RRR(SHL3ADDX, 7, Y1):
653 tcg_gen_shli_tl(tdest, tsrca, 3);
654 tcg_gen_add_tl(tdest, tdest, tsrcb);
655 tcg_gen_ext32s_tl(tdest, tdest);
656 mnemonic = "shl3addx";
657 break;
658 case OE_RRR(SHL3ADD, 0, X0):
659 case OE_RRR(SHL3ADD, 0, X1):
660 case OE_RRR(SHL3ADD, 1, Y0):
661 case OE_RRR(SHL3ADD, 1, Y1):
662 tcg_gen_shli_tl(tdest, tsrca, 3);
663 tcg_gen_add_tl(tdest, tdest, tsrcb);
664 mnemonic = "shl3add";
665 break;
666 case OE_RRR(SHLX, 0, X0):
667 case OE_RRR(SHLX, 0, X1):
668 case OE_RRR(SHL, 0, X0):
669 case OE_RRR(SHL, 0, X1):
670 case OE_RRR(SHL, 6, Y0):
671 case OE_RRR(SHL, 6, Y1):
672 case OE_RRR(SHRS, 0, X0):
673 case OE_RRR(SHRS, 0, X1):
674 case OE_RRR(SHRS, 6, Y0):
675 case OE_RRR(SHRS, 6, Y1):
676 case OE_RRR(SHRUX, 0, X0):
677 case OE_RRR(SHRUX, 0, X1):
678 case OE_RRR(SHRU, 0, X0):
679 case OE_RRR(SHRU, 0, X1):
680 case OE_RRR(SHRU, 6, Y0):
681 case OE_RRR(SHRU, 6, Y1):
682 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
683 case OE_RRR(SHUFFLEBYTES, 0, X0):
684 gen_helper_shufflebytes(tdest, load_gr(dc, dest), tsrca, tsrca);
685 mnemonic = "shufflebytes";
686 break;
687 case OE_RRR(SUBXSC, 0, X0):
688 case OE_RRR(SUBXSC, 0, X1):
689 gen_saturate_op(tdest, tsrca, tsrcb, tcg_gen_sub_tl);
690 mnemonic = "subxsc";
691 break;
692 case OE_RRR(SUBX, 0, X0):
693 case OE_RRR(SUBX, 0, X1):
694 case OE_RRR(SUBX, 0, Y0):
695 case OE_RRR(SUBX, 0, Y1):
696 tcg_gen_sub_tl(tdest, tsrca, tsrcb);
697 tcg_gen_ext32s_tl(tdest, tdest);
698 mnemonic = "subx";
699 break;
700 case OE_RRR(SUB, 0, X0):
701 case OE_RRR(SUB, 0, X1):
702 case OE_RRR(SUB, 0, Y0):
703 case OE_RRR(SUB, 0, Y1):
704 tcg_gen_sub_tl(tdest, tsrca, tsrcb);
705 mnemonic = "sub";
706 break;
707 case OE_RRR(V1ADDUC, 0, X0):
708 case OE_RRR(V1ADDUC, 0, X1):
709 case OE_RRR(V1ADD, 0, X0):
710 case OE_RRR(V1ADD, 0, X1):
711 case OE_RRR(V1ADIFFU, 0, X0):
712 case OE_RRR(V1AVGU, 0, X0):
713 case OE_RRR(V1CMPEQ, 0, X0):
714 case OE_RRR(V1CMPEQ, 0, X1):
715 case OE_RRR(V1CMPLES, 0, X0):
716 case OE_RRR(V1CMPLES, 0, X1):
717 case OE_RRR(V1CMPLEU, 0, X0):
718 case OE_RRR(V1CMPLEU, 0, X1):
719 case OE_RRR(V1CMPLTS, 0, X0):
720 case OE_RRR(V1CMPLTS, 0, X1):
721 case OE_RRR(V1CMPLTU, 0, X0):
722 case OE_RRR(V1CMPLTU, 0, X1):
723 case OE_RRR(V1CMPNE, 0, X0):
724 case OE_RRR(V1CMPNE, 0, X1):
725 case OE_RRR(V1DDOTPUA, 0, X0):
726 case OE_RRR(V1DDOTPUSA, 0, X0):
727 case OE_RRR(V1DDOTPUS, 0, X0):
728 case OE_RRR(V1DDOTPU, 0, X0):
729 case OE_RRR(V1DOTPA, 0, X0):
730 case OE_RRR(V1DOTPUA, 0, X0):
731 case OE_RRR(V1DOTPUSA, 0, X0):
732 case OE_RRR(V1DOTPUS, 0, X0):
733 case OE_RRR(V1DOTPU, 0, X0):
734 case OE_RRR(V1DOTP, 0, X0):
735 case OE_RRR(V1INT_H, 0, X0):
736 case OE_RRR(V1INT_H, 0, X1):
737 case OE_RRR(V1INT_L, 0, X0):
738 case OE_RRR(V1INT_L, 0, X1):
739 case OE_RRR(V1MAXU, 0, X0):
740 case OE_RRR(V1MAXU, 0, X1):
741 case OE_RRR(V1MINU, 0, X0):
742 case OE_RRR(V1MINU, 0, X1):
743 case OE_RRR(V1MNZ, 0, X0):
744 case OE_RRR(V1MNZ, 0, X1):
745 case OE_RRR(V1MULTU, 0, X0):
746 case OE_RRR(V1MULUS, 0, X0):
747 case OE_RRR(V1MULU, 0, X0):
748 case OE_RRR(V1MZ, 0, X0):
749 case OE_RRR(V1MZ, 0, X1):
750 case OE_RRR(V1SADAU, 0, X0):
751 case OE_RRR(V1SADU, 0, X0):
752 case OE_RRR(V1SHL, 0, X0):
753 case OE_RRR(V1SHL, 0, X1):
754 case OE_RRR(V1SHRS, 0, X0):
755 case OE_RRR(V1SHRS, 0, X1):
756 case OE_RRR(V1SHRU, 0, X0):
757 case OE_RRR(V1SHRU, 0, X1):
758 case OE_RRR(V1SUBUC, 0, X0):
759 case OE_RRR(V1SUBUC, 0, X1):
760 case OE_RRR(V1SUB, 0, X0):
761 case OE_RRR(V1SUB, 0, X1):
762 case OE_RRR(V2ADDSC, 0, X0):
763 case OE_RRR(V2ADDSC, 0, X1):
764 case OE_RRR(V2ADD, 0, X0):
765 case OE_RRR(V2ADD, 0, X1):
766 case OE_RRR(V2ADIFFS, 0, X0):
767 case OE_RRR(V2AVGS, 0, X0):
768 case OE_RRR(V2CMPEQ, 0, X0):
769 case OE_RRR(V2CMPEQ, 0, X1):
770 case OE_RRR(V2CMPLES, 0, X0):
771 case OE_RRR(V2CMPLES, 0, X1):
772 case OE_RRR(V2CMPLEU, 0, X0):
773 case OE_RRR(V2CMPLEU, 0, X1):
774 case OE_RRR(V2CMPLTS, 0, X0):
775 case OE_RRR(V2CMPLTS, 0, X1):
776 case OE_RRR(V2CMPLTU, 0, X0):
777 case OE_RRR(V2CMPLTU, 0, X1):
778 case OE_RRR(V2CMPNE, 0, X0):
779 case OE_RRR(V2CMPNE, 0, X1):
780 case OE_RRR(V2DOTPA, 0, X0):
781 case OE_RRR(V2DOTP, 0, X0):
782 case OE_RRR(V2INT_H, 0, X0):
783 case OE_RRR(V2INT_H, 0, X1):
784 case OE_RRR(V2INT_L, 0, X0):
785 case OE_RRR(V2INT_L, 0, X1):
786 case OE_RRR(V2MAXS, 0, X0):
787 case OE_RRR(V2MAXS, 0, X1):
788 case OE_RRR(V2MINS, 0, X0):
789 case OE_RRR(V2MINS, 0, X1):
790 case OE_RRR(V2MNZ, 0, X0):
791 case OE_RRR(V2MNZ, 0, X1):
792 case OE_RRR(V2MULFSC, 0, X0):
793 case OE_RRR(V2MULS, 0, X0):
794 case OE_RRR(V2MULTS, 0, X0):
795 case OE_RRR(V2MZ, 0, X0):
796 case OE_RRR(V2MZ, 0, X1):
797 case OE_RRR(V2PACKH, 0, X0):
798 case OE_RRR(V2PACKH, 0, X1):
799 case OE_RRR(V2PACKL, 0, X0):
800 case OE_RRR(V2PACKL, 0, X1):
801 case OE_RRR(V2PACKUC, 0, X0):
802 case OE_RRR(V2PACKUC, 0, X1):
803 case OE_RRR(V2SADAS, 0, X0):
804 case OE_RRR(V2SADAU, 0, X0):
805 case OE_RRR(V2SADS, 0, X0):
806 case OE_RRR(V2SADU, 0, X0):
807 case OE_RRR(V2SHLSC, 0, X0):
808 case OE_RRR(V2SHLSC, 0, X1):
809 case OE_RRR(V2SHL, 0, X0):
810 case OE_RRR(V2SHL, 0, X1):
811 case OE_RRR(V2SHRS, 0, X0):
812 case OE_RRR(V2SHRS, 0, X1):
813 case OE_RRR(V2SHRU, 0, X0):
814 case OE_RRR(V2SHRU, 0, X1):
815 case OE_RRR(V2SUBSC, 0, X0):
816 case OE_RRR(V2SUBSC, 0, X1):
817 case OE_RRR(V2SUB, 0, X0):
818 case OE_RRR(V2SUB, 0, X1):
819 case OE_RRR(V4ADDSC, 0, X0):
820 case OE_RRR(V4ADDSC, 0, X1):
821 case OE_RRR(V4ADD, 0, X0):
822 case OE_RRR(V4ADD, 0, X1):
823 case OE_RRR(V4INT_H, 0, X0):
824 case OE_RRR(V4INT_H, 0, X1):
825 case OE_RRR(V4INT_L, 0, X0):
826 case OE_RRR(V4INT_L, 0, X1):
827 case OE_RRR(V4PACKSC, 0, X0):
828 case OE_RRR(V4PACKSC, 0, X1):
829 case OE_RRR(V4SHLSC, 0, X0):
830 case OE_RRR(V4SHLSC, 0, X1):
831 case OE_RRR(V4SHL, 0, X0):
832 case OE_RRR(V4SHL, 0, X1):
833 case OE_RRR(V4SHRS, 0, X0):
834 case OE_RRR(V4SHRS, 0, X1):
835 case OE_RRR(V4SHRU, 0, X0):
836 case OE_RRR(V4SHRU, 0, X1):
837 case OE_RRR(V4SUBSC, 0, X0):
838 case OE_RRR(V4SUBSC, 0, X1):
839 case OE_RRR(V4SUB, 0, X0):
840 case OE_RRR(V4SUB, 0, X1):
841 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
842 case OE_RRR(XOR, 0, X0):
843 case OE_RRR(XOR, 0, X1):
844 case OE_RRR(XOR, 5, Y0):
845 case OE_RRR(XOR, 5, Y1):
846 tcg_gen_xor_tl(tdest, tsrca, tsrcb);
847 mnemonic = "xor";
848 break;
849 default:
850 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
851 }
852
853 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %s", mnemonic,
854 reg_names[dest], reg_names[srca], reg_names[srcb]);
855 return TILEGX_EXCP_NONE;
856 }
857
858 static TileExcp gen_rri_opcode(DisasContext *dc, unsigned opext,
859 unsigned dest, unsigned srca, int imm)
860 {
861 TCGv tdest = dest_gr(dc, dest);
862 TCGv tsrca = load_gr(dc, srca);
863 const char *mnemonic;
864 TCGMemOp memop;
865
866 switch (opext) {
867 case OE(ADDI_OPCODE_Y0, 0, Y0):
868 case OE(ADDI_OPCODE_Y1, 0, Y1):
869 case OE_IM(ADDI, X0):
870 case OE_IM(ADDI, X1):
871 tcg_gen_addi_tl(tdest, tsrca, imm);
872 mnemonic = "addi";
873 break;
874 case OE(ADDXI_OPCODE_Y0, 0, Y0):
875 case OE(ADDXI_OPCODE_Y1, 0, Y1):
876 case OE_IM(ADDXI, X0):
877 case OE_IM(ADDXI, X1):
878 tcg_gen_addi_tl(tdest, tsrca, imm);
879 tcg_gen_ext32s_tl(tdest, tdest);
880 mnemonic = "addxi";
881 break;
882 case OE(ANDI_OPCODE_Y0, 0, Y0):
883 case OE(ANDI_OPCODE_Y1, 0, Y1):
884 case OE_IM(ANDI, X0):
885 case OE_IM(ANDI, X1):
886 tcg_gen_andi_tl(tdest, tsrca, imm);
887 mnemonic = "andi";
888 break;
889 case OE_IM(CMPEQI, X0):
890 case OE_IM(CMPEQI, X1):
891 case OE_IM(CMPLTSI, X0):
892 case OE_IM(CMPLTSI, X1):
893 case OE_IM(CMPLTUI, X0):
894 case OE_IM(CMPLTUI, X1):
895 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
896 case OE_IM(LD1S_ADD, X1):
897 memop = MO_SB;
898 mnemonic = "ld1s_add";
899 goto do_load_add;
900 case OE_IM(LD1U_ADD, X1):
901 memop = MO_UB;
902 mnemonic = "ld1u_add";
903 goto do_load_add;
904 case OE_IM(LD2S_ADD, X1):
905 memop = MO_TESW;
906 mnemonic = "ld2s_add";
907 goto do_load_add;
908 case OE_IM(LD2U_ADD, X1):
909 memop = MO_TEUW;
910 mnemonic = "ld2u_add";
911 goto do_load_add;
912 case OE_IM(LD4S_ADD, X1):
913 memop = MO_TESL;
914 mnemonic = "ld4s_add";
915 goto do_load_add;
916 case OE_IM(LD4U_ADD, X1):
917 memop = MO_TEUL;
918 mnemonic = "ld4u_add";
919 goto do_load_add;
920 case OE_IM(LDNT1S_ADD, X1):
921 memop = MO_SB;
922 mnemonic = "ldnt1s_add";
923 goto do_load_add;
924 case OE_IM(LDNT1U_ADD, X1):
925 memop = MO_UB;
926 mnemonic = "ldnt1u_add";
927 goto do_load_add;
928 case OE_IM(LDNT2S_ADD, X1):
929 memop = MO_TESW;
930 mnemonic = "ldnt2s_add";
931 goto do_load_add;
932 case OE_IM(LDNT2U_ADD, X1):
933 memop = MO_TEUW;
934 mnemonic = "ldnt2u_add";
935 goto do_load_add;
936 case OE_IM(LDNT4S_ADD, X1):
937 memop = MO_TESL;
938 mnemonic = "ldnt4s_add";
939 goto do_load_add;
940 case OE_IM(LDNT4U_ADD, X1):
941 memop = MO_TEUL;
942 mnemonic = "ldnt4u_add";
943 goto do_load_add;
944 case OE_IM(LDNT_ADD, X1):
945 memop = MO_TEQ;
946 mnemonic = "ldnt_add";
947 goto do_load_add;
948 case OE_IM(LD_ADD, X1):
949 memop = MO_TEQ;
950 mnemonic = "ldnt_add";
951 do_load_add:
952 tcg_gen_qemu_ld_tl(tdest, tsrca, dc->mmuidx, memop);
953 tcg_gen_addi_tl(dest_gr(dc, srca), tsrca, imm);
954 break;
955 case OE_IM(LDNA_ADD, X1):
956 tcg_gen_andi_tl(tdest, tsrca, ~7);
957 tcg_gen_qemu_ld_tl(tdest, tdest, dc->mmuidx, MO_TEQ);
958 tcg_gen_addi_tl(dest_gr(dc, srca), tsrca, imm);
959 mnemonic = "ldna_add";
960 break;
961 case OE_IM(MFSPR, X1):
962 case OE_IM(MTSPR, X1):
963 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
964 case OE_IM(ORI, X0):
965 case OE_IM(ORI, X1):
966 tcg_gen_ori_tl(tdest, tsrca, imm);
967 mnemonic = "ori";
968 break;
969 case OE_IM(V1ADDI, X0):
970 case OE_IM(V1ADDI, X1):
971 case OE_IM(V1CMPEQI, X0):
972 case OE_IM(V1CMPEQI, X1):
973 case OE_IM(V1CMPLTSI, X0):
974 case OE_IM(V1CMPLTSI, X1):
975 case OE_IM(V1CMPLTUI, X0):
976 case OE_IM(V1CMPLTUI, X1):
977 case OE_IM(V1MAXUI, X0):
978 case OE_IM(V1MAXUI, X1):
979 case OE_IM(V1MINUI, X0):
980 case OE_IM(V1MINUI, X1):
981 case OE_IM(V2ADDI, X0):
982 case OE_IM(V2ADDI, X1):
983 case OE_IM(V2CMPEQI, X0):
984 case OE_IM(V2CMPEQI, X1):
985 case OE_IM(V2CMPLTSI, X0):
986 case OE_IM(V2CMPLTSI, X1):
987 case OE_IM(V2CMPLTUI, X0):
988 case OE_IM(V2CMPLTUI, X1):
989 case OE_IM(V2MAXSI, X0):
990 case OE_IM(V2MAXSI, X1):
991 case OE_IM(V2MINSI, X0):
992 case OE_IM(V2MINSI, X1):
993 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
994 case OE_IM(XORI, X0):
995 case OE_IM(XORI, X1):
996 tcg_gen_xori_tl(tdest, tsrca, imm);
997 mnemonic = "xori";
998 break;
999
1000 case OE_SH(ROTLI, X0):
1001 case OE_SH(ROTLI, X1):
1002 case OE_SH(ROTLI, Y0):
1003 case OE_SH(ROTLI, Y1):
1004 case OE_SH(SHLI, X0):
1005 case OE_SH(SHLI, X1):
1006 case OE_SH(SHLI, Y0):
1007 case OE_SH(SHLI, Y1):
1008 case OE_SH(SHLXI, X0):
1009 case OE_SH(SHLXI, X1):
1010 case OE_SH(SHRSI, X0):
1011 case OE_SH(SHRSI, X1):
1012 case OE_SH(SHRSI, Y0):
1013 case OE_SH(SHRSI, Y1):
1014 case OE_SH(SHRUI, X0):
1015 case OE_SH(SHRUI, X1):
1016 case OE_SH(SHRUI, Y0):
1017 case OE_SH(SHRUI, Y1):
1018 case OE_SH(SHRUXI, X0):
1019 case OE_SH(SHRUXI, X1):
1020 case OE_SH(V1SHLI, X0):
1021 case OE_SH(V1SHLI, X1):
1022 case OE_SH(V1SHRSI, X0):
1023 case OE_SH(V1SHRSI, X1):
1024 case OE_SH(V1SHRUI, X0):
1025 case OE_SH(V1SHRUI, X1):
1026 case OE_SH(V2SHLI, X0):
1027 case OE_SH(V2SHLI, X1):
1028 case OE_SH(V2SHRSI, X0):
1029 case OE_SH(V2SHRSI, X1):
1030 case OE_SH(V2SHRUI, X0):
1031 case OE_SH(V2SHRUI, X1):
1032 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1033
1034 case OE(ADDLI_OPCODE_X0, 0, X0):
1035 case OE(ADDLI_OPCODE_X1, 0, X1):
1036 tcg_gen_addi_tl(tdest, tsrca, imm);
1037 mnemonic = "addli";
1038 break;
1039 case OE(ADDXLI_OPCODE_X0, 0, X0):
1040 case OE(ADDXLI_OPCODE_X1, 0, X1):
1041 tcg_gen_addi_tl(tdest, tsrca, imm);
1042 tcg_gen_ext32s_tl(tdest, tdest);
1043 mnemonic = "addxli";
1044 break;
1045 case OE(CMPEQI_OPCODE_Y0, 0, Y0):
1046 case OE(CMPEQI_OPCODE_Y1, 0, Y1):
1047 case OE(CMPLTSI_OPCODE_Y0, 0, Y0):
1048 case OE(CMPLTSI_OPCODE_Y1, 0, Y1):
1049 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1050 case OE(SHL16INSLI_OPCODE_X0, 0, X0):
1051 case OE(SHL16INSLI_OPCODE_X1, 0, X1):
1052 tcg_gen_shli_tl(tdest, tsrca, 16);
1053 tcg_gen_ori_tl(tdest, tdest, imm & 0xffff);
1054 mnemonic = "shl16insli";
1055 break;
1056
1057 default:
1058 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1059 }
1060
1061 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %d", mnemonic,
1062 reg_names[dest], reg_names[srca], imm);
1063 return TILEGX_EXCP_NONE;
1064 }
1065
1066 static TileExcp gen_bf_opcode_x0(DisasContext *dc, unsigned ext,
1067 unsigned dest, unsigned srca,
1068 unsigned bfs, unsigned bfe)
1069 {
1070 const char *mnemonic;
1071
1072 switch (ext) {
1073 case BFEXTU_BF_OPCODE_X0:
1074 case BFEXTS_BF_OPCODE_X0:
1075 case BFINS_BF_OPCODE_X0:
1076 case MM_BF_OPCODE_X0:
1077 default:
1078 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1079 }
1080
1081 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %u, %u", mnemonic,
1082 reg_names[dest], reg_names[srca], bfs, bfe);
1083 return TILEGX_EXCP_NONE;
1084 }
1085
1086 static TileExcp gen_branch_opcode_x1(DisasContext *dc, unsigned ext,
1087 unsigned srca, int off)
1088 {
1089 target_ulong tgt = dc->pc + off * TILEGX_BUNDLE_SIZE_IN_BYTES;
1090 const char *mnemonic;
1091
1092 switch (ext) {
1093 case BEQZT_BRANCH_OPCODE_X1:
1094 case BEQZ_BRANCH_OPCODE_X1:
1095 case BNEZT_BRANCH_OPCODE_X1:
1096 case BNEZ_BRANCH_OPCODE_X1:
1097 case BLBC_BRANCH_OPCODE_X1:
1098 case BGEZT_BRANCH_OPCODE_X1:
1099 case BGEZ_BRANCH_OPCODE_X1:
1100 case BGTZT_BRANCH_OPCODE_X1:
1101 case BGTZ_BRANCH_OPCODE_X1:
1102 case BLBCT_BRANCH_OPCODE_X1:
1103 case BLBST_BRANCH_OPCODE_X1:
1104 case BLBS_BRANCH_OPCODE_X1:
1105 case BLEZT_BRANCH_OPCODE_X1:
1106 case BLEZ_BRANCH_OPCODE_X1:
1107 case BLTZT_BRANCH_OPCODE_X1:
1108 case BLTZ_BRANCH_OPCODE_X1:
1109 default:
1110 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1111 }
1112
1113 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1114 qemu_log("%s %s, " TARGET_FMT_lx " <%s>",
1115 mnemonic, reg_names[srca], tgt, lookup_symbol(tgt));
1116 }
1117 return TILEGX_EXCP_NONE;
1118 }
1119
1120 static TileExcp gen_jump_opcode_x1(DisasContext *dc, unsigned ext, int off)
1121 {
1122 target_ulong tgt = dc->pc + off * TILEGX_BUNDLE_SIZE_IN_BYTES;
1123 const char *mnemonic = "j";
1124
1125 /* The extension field is 1 bit, therefore we only have JAL and J. */
1126 if (ext == JAL_JUMP_OPCODE_X1) {
1127 tcg_gen_movi_tl(dest_gr(dc, TILEGX_R_LR),
1128 dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
1129 mnemonic = "jal";
1130 }
1131 dc->jmp.cond = TCG_COND_ALWAYS;
1132 dc->jmp.dest = tcg_const_tl(tgt);
1133
1134 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1135 qemu_log("%s " TARGET_FMT_lx " <%s>",
1136 mnemonic, tgt, lookup_symbol(tgt));
1137 }
1138 return TILEGX_EXCP_NONE;
1139 }
1140
1141 static TileExcp decode_y0(DisasContext *dc, tilegx_bundle_bits bundle)
1142 {
1143 unsigned opc = get_Opcode_Y0(bundle);
1144 unsigned ext = get_RRROpcodeExtension_Y0(bundle);
1145 unsigned dest = get_Dest_Y0(bundle);
1146 unsigned srca = get_SrcA_Y0(bundle);
1147 unsigned srcb;
1148 int imm;
1149
1150 switch (opc) {
1151 case RRR_1_OPCODE_Y0:
1152 if (ext == UNARY_RRR_1_OPCODE_Y0) {
1153 ext = get_UnaryOpcodeExtension_Y0(bundle);
1154 return gen_rr_opcode(dc, OE(opc, ext, Y0), dest, srca);
1155 }
1156 /* fallthru */
1157 case RRR_0_OPCODE_Y0:
1158 case RRR_2_OPCODE_Y0:
1159 case RRR_3_OPCODE_Y0:
1160 case RRR_4_OPCODE_Y0:
1161 case RRR_5_OPCODE_Y0:
1162 case RRR_6_OPCODE_Y0:
1163 case RRR_7_OPCODE_Y0:
1164 case RRR_8_OPCODE_Y0:
1165 case RRR_9_OPCODE_Y0:
1166 srcb = get_SrcB_Y0(bundle);
1167 return gen_rrr_opcode(dc, OE(opc, ext, Y0), dest, srca, srcb);
1168
1169 case SHIFT_OPCODE_Y0:
1170 ext = get_ShiftOpcodeExtension_Y0(bundle);
1171 imm = get_ShAmt_Y0(bundle);
1172 return gen_rri_opcode(dc, OE(opc, ext, Y0), dest, srca, imm);
1173
1174 case ADDI_OPCODE_Y0:
1175 case ADDXI_OPCODE_Y0:
1176 case ANDI_OPCODE_Y0:
1177 case CMPEQI_OPCODE_Y0:
1178 case CMPLTSI_OPCODE_Y0:
1179 imm = (int8_t)get_Imm8_Y0(bundle);
1180 return gen_rri_opcode(dc, OE(opc, 0, Y0), dest, srca, imm);
1181
1182 default:
1183 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1184 }
1185 }
1186
1187 static TileExcp decode_y1(DisasContext *dc, tilegx_bundle_bits bundle)
1188 {
1189 unsigned opc = get_Opcode_Y1(bundle);
1190 unsigned ext = get_RRROpcodeExtension_Y1(bundle);
1191 unsigned dest = get_Dest_Y1(bundle);
1192 unsigned srca = get_SrcA_Y1(bundle);
1193 unsigned srcb;
1194 int imm;
1195
1196 switch (get_Opcode_Y1(bundle)) {
1197 case RRR_1_OPCODE_Y1:
1198 if (ext == UNARY_RRR_1_OPCODE_Y0) {
1199 ext = get_UnaryOpcodeExtension_Y1(bundle);
1200 return gen_rr_opcode(dc, OE(opc, ext, Y1), dest, srca);
1201 }
1202 /* fallthru */
1203 case RRR_0_OPCODE_Y1:
1204 case RRR_2_OPCODE_Y1:
1205 case RRR_3_OPCODE_Y1:
1206 case RRR_4_OPCODE_Y1:
1207 case RRR_5_OPCODE_Y1:
1208 case RRR_6_OPCODE_Y1:
1209 case RRR_7_OPCODE_Y1:
1210 srcb = get_SrcB_Y1(bundle);
1211 return gen_rrr_opcode(dc, OE(opc, ext, Y1), dest, srca, srcb);
1212
1213 case SHIFT_OPCODE_Y1:
1214 ext = get_ShiftOpcodeExtension_Y1(bundle);
1215 imm = get_ShAmt_Y1(bundle);
1216 return gen_rri_opcode(dc, OE(opc, ext, Y1), dest, srca, imm);
1217
1218 case ADDI_OPCODE_Y1:
1219 case ADDXI_OPCODE_Y1:
1220 case ANDI_OPCODE_Y1:
1221 case CMPEQI_OPCODE_Y1:
1222 case CMPLTSI_OPCODE_Y1:
1223 imm = (int8_t)get_Imm8_Y1(bundle);
1224 return gen_rri_opcode(dc, OE(opc, 0, Y1), dest, srca, imm);
1225
1226 default:
1227 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1228 }
1229 }
1230
1231 static TileExcp decode_y2(DisasContext *dc, tilegx_bundle_bits bundle)
1232 {
1233 unsigned mode = get_Mode(bundle);
1234 unsigned opc = get_Opcode_Y2(bundle);
1235 unsigned srca = get_SrcA_Y2(bundle);
1236 unsigned srcbdest = get_SrcBDest_Y2(bundle);
1237 const char *mnemonic;
1238 TCGMemOp memop;
1239
1240 switch (OEY2(opc, mode)) {
1241 case OEY2(LD1S_OPCODE_Y2, MODE_OPCODE_YA2):
1242 memop = MO_SB;
1243 mnemonic = "ld1s";
1244 goto do_load;
1245 case OEY2(LD1U_OPCODE_Y2, MODE_OPCODE_YA2):
1246 memop = MO_UB;
1247 mnemonic = "ld1u";
1248 goto do_load;
1249 case OEY2(LD2S_OPCODE_Y2, MODE_OPCODE_YA2):
1250 memop = MO_TESW;
1251 mnemonic = "ld2s";
1252 goto do_load;
1253 case OEY2(LD2U_OPCODE_Y2, MODE_OPCODE_YA2):
1254 memop = MO_TEUW;
1255 mnemonic = "ld2u";
1256 goto do_load;
1257 case OEY2(LD4S_OPCODE_Y2, MODE_OPCODE_YB2):
1258 memop = MO_TESL;
1259 mnemonic = "ld4s";
1260 goto do_load;
1261 case OEY2(LD4U_OPCODE_Y2, MODE_OPCODE_YB2):
1262 memop = MO_TEUL;
1263 mnemonic = "ld4u";
1264 goto do_load;
1265 case OEY2(LD_OPCODE_Y2, MODE_OPCODE_YB2):
1266 memop = MO_TEQ;
1267 mnemonic = "ld";
1268 do_load:
1269 tcg_gen_qemu_ld_tl(dest_gr(dc, srcbdest), load_gr(dc, srca),
1270 dc->mmuidx, memop);
1271 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s", mnemonic,
1272 reg_names[srcbdest], reg_names[srca]);
1273 return TILEGX_EXCP_NONE;
1274
1275 case OEY2(ST1_OPCODE_Y2, MODE_OPCODE_YC2):
1276 return gen_st_opcode(dc, 0, srca, srcbdest, MO_UB, "st1");
1277 case OEY2(ST2_OPCODE_Y2, MODE_OPCODE_YC2):
1278 return gen_st_opcode(dc, 0, srca, srcbdest, MO_TEUW, "st2");
1279 case OEY2(ST4_OPCODE_Y2, MODE_OPCODE_YC2):
1280 return gen_st_opcode(dc, 0, srca, srcbdest, MO_TEUL, "st4");
1281 case OEY2(ST_OPCODE_Y2, MODE_OPCODE_YC2):
1282 return gen_st_opcode(dc, 0, srca, srcbdest, MO_TEQ, "st");
1283
1284 default:
1285 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1286 }
1287 }
1288
1289 static TileExcp decode_x0(DisasContext *dc, tilegx_bundle_bits bundle)
1290 {
1291 unsigned opc = get_Opcode_X0(bundle);
1292 unsigned dest = get_Dest_X0(bundle);
1293 unsigned srca = get_SrcA_X0(bundle);
1294 unsigned ext, srcb, bfs, bfe;
1295 int imm;
1296
1297 switch (opc) {
1298 case RRR_0_OPCODE_X0:
1299 ext = get_RRROpcodeExtension_X0(bundle);
1300 if (ext == UNARY_RRR_0_OPCODE_X0) {
1301 ext = get_UnaryOpcodeExtension_X0(bundle);
1302 return gen_rr_opcode(dc, OE(opc, ext, X0), dest, srca);
1303 }
1304 srcb = get_SrcB_X0(bundle);
1305 return gen_rrr_opcode(dc, OE(opc, ext, X0), dest, srca, srcb);
1306
1307 case SHIFT_OPCODE_X0:
1308 ext = get_ShiftOpcodeExtension_X0(bundle);
1309 imm = get_ShAmt_X0(bundle);
1310 return gen_rri_opcode(dc, OE(opc, ext, X0), dest, srca, imm);
1311
1312 case IMM8_OPCODE_X0:
1313 ext = get_Imm8OpcodeExtension_X0(bundle);
1314 imm = (int8_t)get_Imm8_X0(bundle);
1315 return gen_rri_opcode(dc, OE(opc, ext, X0), dest, srca, imm);
1316
1317 case BF_OPCODE_X0:
1318 ext = get_BFOpcodeExtension_X0(bundle);
1319 bfs = get_BFStart_X0(bundle);
1320 bfe = get_BFEnd_X0(bundle);
1321 return gen_bf_opcode_x0(dc, ext, dest, srca, bfs, bfe);
1322
1323 case ADDLI_OPCODE_X0:
1324 case SHL16INSLI_OPCODE_X0:
1325 case ADDXLI_OPCODE_X0:
1326 imm = (int16_t)get_Imm16_X0(bundle);
1327 return gen_rri_opcode(dc, OE(opc, 0, X0), dest, srca, imm);
1328
1329 default:
1330 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1331 }
1332 }
1333
1334 static TileExcp decode_x1(DisasContext *dc, tilegx_bundle_bits bundle)
1335 {
1336 unsigned opc = get_Opcode_X1(bundle);
1337 unsigned dest = get_Dest_X1(bundle);
1338 unsigned srca = get_SrcA_X1(bundle);
1339 unsigned ext, srcb;
1340 int imm;
1341
1342 switch (opc) {
1343 case RRR_0_OPCODE_X1:
1344 ext = get_RRROpcodeExtension_X1(bundle);
1345 srcb = get_SrcB_X1(bundle);
1346 switch (ext) {
1347 case UNARY_RRR_0_OPCODE_X1:
1348 ext = get_UnaryOpcodeExtension_X1(bundle);
1349 return gen_rr_opcode(dc, OE(opc, ext, X1), dest, srca);
1350 case ST1_RRR_0_OPCODE_X1:
1351 return gen_st_opcode(dc, dest, srca, srcb, MO_UB, "st1");
1352 case ST2_RRR_0_OPCODE_X1:
1353 return gen_st_opcode(dc, dest, srca, srcb, MO_TEUW, "st2");
1354 case ST4_RRR_0_OPCODE_X1:
1355 return gen_st_opcode(dc, dest, srca, srcb, MO_TEUL, "st4");
1356 case STNT1_RRR_0_OPCODE_X1:
1357 return gen_st_opcode(dc, dest, srca, srcb, MO_UB, "stnt1");
1358 case STNT2_RRR_0_OPCODE_X1:
1359 return gen_st_opcode(dc, dest, srca, srcb, MO_TEUW, "stnt2");
1360 case STNT4_RRR_0_OPCODE_X1:
1361 return gen_st_opcode(dc, dest, srca, srcb, MO_TEUL, "stnt4");
1362 case STNT_RRR_0_OPCODE_X1:
1363 return gen_st_opcode(dc, dest, srca, srcb, MO_TEQ, "stnt");
1364 case ST_RRR_0_OPCODE_X1:
1365 return gen_st_opcode(dc, dest, srca, srcb, MO_TEQ, "st");
1366 }
1367 return gen_rrr_opcode(dc, OE(opc, ext, X1), dest, srca, srcb);
1368
1369 case SHIFT_OPCODE_X1:
1370 ext = get_ShiftOpcodeExtension_X1(bundle);
1371 imm = get_ShAmt_X1(bundle);
1372 return gen_rri_opcode(dc, OE(opc, ext, X1), dest, srca, imm);
1373
1374 case IMM8_OPCODE_X1:
1375 ext = get_Imm8OpcodeExtension_X1(bundle);
1376 imm = (int8_t)get_Dest_Imm8_X1(bundle);
1377 srcb = get_SrcB_X1(bundle);
1378 switch (ext) {
1379 case ST1_ADD_IMM8_OPCODE_X1:
1380 return gen_st_add_opcode(dc, srca, srcb, imm, MO_UB, "st1_add");
1381 case ST2_ADD_IMM8_OPCODE_X1:
1382 return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUW, "st2_add");
1383 case ST4_ADD_IMM8_OPCODE_X1:
1384 return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUL, "st4_add");
1385 case STNT1_ADD_IMM8_OPCODE_X1:
1386 return gen_st_add_opcode(dc, srca, srcb, imm, MO_UB, "stnt1_add");
1387 case STNT2_ADD_IMM8_OPCODE_X1:
1388 return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUW, "stnt2_add");
1389 case STNT4_ADD_IMM8_OPCODE_X1:
1390 return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUL, "stnt4_add");
1391 case STNT_ADD_IMM8_OPCODE_X1:
1392 return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEQ, "stnt_add");
1393 case ST_ADD_IMM8_OPCODE_X1:
1394 return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEQ, "st_add");
1395 }
1396 imm = (int8_t)get_Imm8_X1(bundle);
1397 return gen_rri_opcode(dc, OE(opc, ext, X1), dest, srca, imm);
1398
1399 case BRANCH_OPCODE_X1:
1400 ext = get_BrType_X1(bundle);
1401 imm = sextract32(get_BrOff_X1(bundle), 0, 17);
1402 return gen_branch_opcode_x1(dc, ext, srca, imm);
1403
1404 case JUMP_OPCODE_X1:
1405 ext = get_JumpOpcodeExtension_X1(bundle);
1406 imm = sextract32(get_JumpOff_X1(bundle), 0, 27);
1407 return gen_jump_opcode_x1(dc, ext, imm);
1408
1409 case ADDLI_OPCODE_X1:
1410 case SHL16INSLI_OPCODE_X1:
1411 case ADDXLI_OPCODE_X1:
1412 imm = (int16_t)get_Imm16_X1(bundle);
1413 return gen_rri_opcode(dc, OE(opc, 0, X1), dest, srca, imm);
1414
1415 default:
1416 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1417 }
1418 }
1419
1420 static void notice_excp(DisasContext *dc, uint64_t bundle,
1421 const char *type, TileExcp excp)
1422 {
1423 if (likely(excp == TILEGX_EXCP_NONE)) {
1424 return;
1425 }
1426 gen_exception(dc, excp);
1427 if (excp == TILEGX_EXCP_OPCODE_UNIMPLEMENTED) {
1428 qemu_log_mask(LOG_UNIMP, "UNIMP %s, [" FMT64X "]\n", type, bundle);
1429 }
1430 }
1431
1432 static void translate_one_bundle(DisasContext *dc, uint64_t bundle)
1433 {
1434 int i;
1435
1436 for (i = 0; i < ARRAY_SIZE(dc->wb); i++) {
1437 DisasContextTemp *wb = &dc->wb[i];
1438 wb->reg = TILEGX_R_NOREG;
1439 TCGV_UNUSED_I64(wb->val);
1440 }
1441 dc->num_wb = 0;
1442
1443 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
1444 tcg_gen_debug_insn_start(dc->pc);
1445 }
1446
1447 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %" PRIx64 ": { ", dc->pc);
1448 if (get_Mode(bundle)) {
1449 notice_excp(dc, bundle, "y0", decode_y0(dc, bundle));
1450 qemu_log_mask(CPU_LOG_TB_IN_ASM, " ; ");
1451 notice_excp(dc, bundle, "y1", decode_y1(dc, bundle));
1452 qemu_log_mask(CPU_LOG_TB_IN_ASM, " ; ");
1453 notice_excp(dc, bundle, "y2", decode_y2(dc, bundle));
1454 } else {
1455 notice_excp(dc, bundle, "x0", decode_x0(dc, bundle));
1456 qemu_log_mask(CPU_LOG_TB_IN_ASM, " ; ");
1457 notice_excp(dc, bundle, "x1", decode_x1(dc, bundle));
1458 }
1459 qemu_log_mask(CPU_LOG_TB_IN_ASM, " }\n");
1460
1461 for (i = dc->num_wb - 1; i >= 0; --i) {
1462 DisasContextTemp *wb = &dc->wb[i];
1463 if (wb->reg < TILEGX_R_COUNT) {
1464 tcg_gen_mov_i64(cpu_regs[wb->reg], wb->val);
1465 }
1466 tcg_temp_free_i64(wb->val);
1467 }
1468
1469 if (dc->jmp.cond != TCG_COND_NEVER) {
1470 if (dc->jmp.cond == TCG_COND_ALWAYS) {
1471 tcg_gen_mov_i64(cpu_pc, dc->jmp.dest);
1472 } else {
1473 TCGv next = tcg_const_i64(dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
1474 tcg_gen_movcond_i64(dc->jmp.cond, cpu_pc,
1475 dc->jmp.val1, load_zero(dc),
1476 dc->jmp.dest, next);
1477 tcg_temp_free_i64(dc->jmp.val1);
1478 tcg_temp_free_i64(next);
1479 }
1480 tcg_temp_free_i64(dc->jmp.dest);
1481 tcg_gen_exit_tb(0);
1482 dc->exit_tb = true;
1483 }
1484 }
1485
1486 static inline void gen_intermediate_code_internal(TileGXCPU *cpu,
1487 TranslationBlock *tb,
1488 bool search_pc)
1489 {
1490 DisasContext ctx;
1491 DisasContext *dc = &ctx;
1492 CPUState *cs = CPU(cpu);
1493 CPUTLGState *env = &cpu->env;
1494 uint64_t pc_start = tb->pc;
1495 uint64_t next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1496 int j, lj = -1;
1497 int num_insns = 0;
1498 int max_insns = tb->cflags & CF_COUNT_MASK;
1499
1500 dc->pc = pc_start;
1501 dc->mmuidx = 0;
1502 dc->exit_tb = false;
1503 dc->jmp.cond = TCG_COND_NEVER;
1504 TCGV_UNUSED_I64(dc->jmp.dest);
1505 TCGV_UNUSED_I64(dc->jmp.val1);
1506 TCGV_UNUSED_I64(dc->zero);
1507
1508 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1509 qemu_log("IN: %s\n", lookup_symbol(pc_start));
1510 }
1511 if (!max_insns) {
1512 max_insns = CF_COUNT_MASK;
1513 }
1514 if (cs->singlestep_enabled || singlestep) {
1515 max_insns = 1;
1516 }
1517 gen_tb_start(tb);
1518
1519 while (1) {
1520 if (search_pc) {
1521 j = tcg_op_buf_count();
1522 if (lj < j) {
1523 lj++;
1524 while (lj < j) {
1525 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1526 }
1527 }
1528 tcg_ctx.gen_opc_pc[lj] = dc->pc;
1529 tcg_ctx.gen_opc_instr_start[lj] = 1;
1530 tcg_ctx.gen_opc_icount[lj] = num_insns;
1531 }
1532 translate_one_bundle(dc, cpu_ldq_data(env, dc->pc));
1533
1534 if (dc->exit_tb) {
1535 /* PC updated and EXIT_TB/GOTO_TB/exception emitted. */
1536 break;
1537 }
1538 dc->pc += TILEGX_BUNDLE_SIZE_IN_BYTES;
1539 if (++num_insns >= max_insns
1540 || dc->pc >= next_page_start
1541 || tcg_op_buf_full()) {
1542 /* Ending the TB due to TB size or page boundary. Set PC. */
1543 tcg_gen_movi_tl(cpu_pc, dc->pc);
1544 tcg_gen_exit_tb(0);
1545 break;
1546 }
1547 }
1548
1549 gen_tb_end(tb, num_insns);
1550 if (search_pc) {
1551 j = tcg_op_buf_count();
1552 lj++;
1553 while (lj <= j) {
1554 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1555 }
1556 } else {
1557 tb->size = dc->pc - pc_start;
1558 tb->icount = num_insns;
1559 }
1560
1561 qemu_log_mask(CPU_LOG_TB_IN_ASM, "\n");
1562 }
1563
1564 void gen_intermediate_code(CPUTLGState *env, struct TranslationBlock *tb)
1565 {
1566 gen_intermediate_code_internal(tilegx_env_get_cpu(env), tb, false);
1567 }
1568
1569 void gen_intermediate_code_pc(CPUTLGState *env, struct TranslationBlock *tb)
1570 {
1571 gen_intermediate_code_internal(tilegx_env_get_cpu(env), tb, true);
1572 }
1573
1574 void restore_state_to_opc(CPUTLGState *env, TranslationBlock *tb, int pc_pos)
1575 {
1576 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
1577 }
1578
1579 void tilegx_tcg_init(void)
1580 {
1581 int i;
1582
1583 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1584 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUTLGState, pc), "pc");
1585 for (i = 0; i < TILEGX_R_COUNT; i++) {
1586 cpu_regs[i] = tcg_global_mem_new_i64(TCG_AREG0,
1587 offsetof(CPUTLGState, regs[i]),
1588 reg_names[i]);
1589 }
1590 }