]> git.proxmox.com Git - mirror_qemu.git/blob - target/riscv/insn_trans/trans_rvi.c.inc
321885f9517f79161455de3c0d600e89693697bf
[mirror_qemu.git] / target / riscv / insn_trans / trans_rvi.c.inc
1 /*
2 * RISC-V translation routines for the RVXI Base Integer Instruction Set.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
6 * Bastian Koppelmann, kbastian@mail.uni-paderborn.de
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2 or later, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21 static bool trans_illegal(DisasContext *ctx, arg_empty *a)
22 {
23 gen_exception_illegal(ctx);
24 return true;
25 }
26
27 static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a)
28 {
29 REQUIRE_64_OR_128BIT(ctx);
30 return trans_illegal(ctx, a);
31 }
32
33 static bool trans_lui(DisasContext *ctx, arg_lui *a)
34 {
35 gen_set_gpri(ctx, a->rd, a->imm);
36 return true;
37 }
38
39 static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
40 {
41 gen_set_gpri(ctx, a->rd, a->imm + ctx->base.pc_next);
42 return true;
43 }
44
45 static bool trans_jal(DisasContext *ctx, arg_jal *a)
46 {
47 gen_jal(ctx, a->rd, a->imm);
48 return true;
49 }
50
51 static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
52 {
53 TCGLabel *misaligned = NULL;
54 TCGv target_pc = tcg_temp_new();
55
56 tcg_gen_addi_tl(target_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm);
57 tcg_gen_andi_tl(target_pc, target_pc, (target_ulong)-2);
58
59 if (get_xl(ctx) == MXL_RV32) {
60 tcg_gen_ext32s_tl(target_pc, target_pc);
61 }
62
63 if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) {
64 TCGv t0 = tcg_temp_new();
65
66 misaligned = gen_new_label();
67 tcg_gen_andi_tl(t0, target_pc, 0x2);
68 tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
69 }
70
71 gen_set_gpri(ctx, a->rd, ctx->pc_succ_insn);
72 tcg_gen_mov_tl(cpu_pc, target_pc);
73 lookup_and_goto_ptr(ctx);
74
75 if (misaligned) {
76 gen_set_label(misaligned);
77 gen_exception_inst_addr_mis(ctx, target_pc);
78 }
79 ctx->base.is_jmp = DISAS_NORETURN;
80
81 return true;
82 }
83
84 static TCGCond gen_compare_i128(bool bz, TCGv rl,
85 TCGv al, TCGv ah, TCGv bl, TCGv bh,
86 TCGCond cond)
87 {
88 TCGv rh = tcg_temp_new();
89 bool invert = false;
90
91 switch (cond) {
92 case TCG_COND_EQ:
93 case TCG_COND_NE:
94 if (bz) {
95 tcg_gen_or_tl(rl, al, ah);
96 } else {
97 tcg_gen_xor_tl(rl, al, bl);
98 tcg_gen_xor_tl(rh, ah, bh);
99 tcg_gen_or_tl(rl, rl, rh);
100 }
101 break;
102
103 case TCG_COND_GE:
104 case TCG_COND_LT:
105 if (bz) {
106 tcg_gen_mov_tl(rl, ah);
107 } else {
108 TCGv tmp = tcg_temp_new();
109
110 tcg_gen_sub2_tl(rl, rh, al, ah, bl, bh);
111 tcg_gen_xor_tl(rl, rh, ah);
112 tcg_gen_xor_tl(tmp, ah, bh);
113 tcg_gen_and_tl(rl, rl, tmp);
114 tcg_gen_xor_tl(rl, rh, rl);
115 }
116 break;
117
118 case TCG_COND_LTU:
119 invert = true;
120 /* fallthrough */
121 case TCG_COND_GEU:
122 {
123 TCGv tmp = tcg_temp_new();
124 TCGv zero = tcg_constant_tl(0);
125 TCGv one = tcg_constant_tl(1);
126
127 cond = TCG_COND_NE;
128 /* borrow in to second word */
129 tcg_gen_setcond_tl(TCG_COND_LTU, tmp, al, bl);
130 /* seed third word with 1, which will be result */
131 tcg_gen_sub2_tl(tmp, rh, ah, one, tmp, zero);
132 tcg_gen_sub2_tl(tmp, rl, tmp, rh, bh, zero);
133 }
134 break;
135
136 default:
137 g_assert_not_reached();
138 }
139
140 if (invert) {
141 cond = tcg_invert_cond(cond);
142 }
143 return cond;
144 }
145
146 static void gen_setcond_i128(TCGv rl, TCGv rh,
147 TCGv src1l, TCGv src1h,
148 TCGv src2l, TCGv src2h,
149 TCGCond cond)
150 {
151 cond = gen_compare_i128(false, rl, src1l, src1h, src2l, src2h, cond);
152 tcg_gen_setcondi_tl(cond, rl, rl, 0);
153 tcg_gen_movi_tl(rh, 0);
154 }
155
156 static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
157 {
158 TCGLabel *l = gen_new_label();
159 TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN);
160 TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN);
161 target_ulong next_pc;
162
163 if (get_xl(ctx) == MXL_RV128) {
164 TCGv src1h = get_gprh(ctx, a->rs1);
165 TCGv src2h = get_gprh(ctx, a->rs2);
166 TCGv tmp = tcg_temp_new();
167
168 cond = gen_compare_i128(a->rs2 == 0,
169 tmp, src1, src1h, src2, src2h, cond);
170 tcg_gen_brcondi_tl(cond, tmp, 0, l);
171 } else {
172 tcg_gen_brcond_tl(cond, src1, src2, l);
173 }
174 gen_goto_tb(ctx, 1, ctx->cur_insn_len);
175
176 gen_set_label(l); /* branch taken */
177
178 next_pc = ctx->base.pc_next + a->imm;
179 if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca &&
180 (next_pc & 0x3)) {
181 /* misaligned */
182 TCGv target_pc = tcg_temp_new();
183 gen_pc_plus_diff(target_pc, ctx, next_pc);
184 gen_exception_inst_addr_mis(ctx, target_pc);
185 } else {
186 gen_goto_tb(ctx, 0, a->imm);
187 }
188 ctx->base.is_jmp = DISAS_NORETURN;
189
190 return true;
191 }
192
193 static bool trans_beq(DisasContext *ctx, arg_beq *a)
194 {
195 return gen_branch(ctx, a, TCG_COND_EQ);
196 }
197
198 static bool trans_bne(DisasContext *ctx, arg_bne *a)
199 {
200 return gen_branch(ctx, a, TCG_COND_NE);
201 }
202
203 static bool trans_blt(DisasContext *ctx, arg_blt *a)
204 {
205 return gen_branch(ctx, a, TCG_COND_LT);
206 }
207
208 static bool trans_bge(DisasContext *ctx, arg_bge *a)
209 {
210 return gen_branch(ctx, a, TCG_COND_GE);
211 }
212
213 static bool trans_bltu(DisasContext *ctx, arg_bltu *a)
214 {
215 return gen_branch(ctx, a, TCG_COND_LTU);
216 }
217
218 static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a)
219 {
220 return gen_branch(ctx, a, TCG_COND_GEU);
221 }
222
223 static bool gen_load_tl(DisasContext *ctx, arg_lb *a, MemOp memop)
224 {
225 TCGv dest = dest_gpr(ctx, a->rd);
226 TCGv addr = get_address(ctx, a->rs1, a->imm);
227
228 tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop);
229 gen_set_gpr(ctx, a->rd, dest);
230 return true;
231 }
232
233 /* Compute only 64-bit addresses to use the address translation mechanism */
234 static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop)
235 {
236 TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
237 TCGv destl = dest_gpr(ctx, a->rd);
238 TCGv desth = dest_gprh(ctx, a->rd);
239 TCGv addrl = tcg_temp_new();
240
241 tcg_gen_addi_tl(addrl, src1l, a->imm);
242
243 if ((memop & MO_SIZE) <= MO_64) {
244 tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, memop);
245 if (memop & MO_SIGN) {
246 tcg_gen_sari_tl(desth, destl, 63);
247 } else {
248 tcg_gen_movi_tl(desth, 0);
249 }
250 } else {
251 /* assume little-endian memory access for now */
252 tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, MO_TEUQ);
253 tcg_gen_addi_tl(addrl, addrl, 8);
254 tcg_gen_qemu_ld_tl(desth, addrl, ctx->mem_idx, MO_TEUQ);
255 }
256
257 gen_set_gpr128(ctx, a->rd, destl, desth);
258 return true;
259 }
260
261 static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
262 {
263 decode_save_opc(ctx);
264 if (get_xl(ctx) == MXL_RV128) {
265 return gen_load_i128(ctx, a, memop);
266 } else {
267 return gen_load_tl(ctx, a, memop);
268 }
269 }
270
271 static bool trans_lb(DisasContext *ctx, arg_lb *a)
272 {
273 return gen_load(ctx, a, MO_SB);
274 }
275
276 static bool trans_lh(DisasContext *ctx, arg_lh *a)
277 {
278 return gen_load(ctx, a, MO_TESW);
279 }
280
281 static bool trans_lw(DisasContext *ctx, arg_lw *a)
282 {
283 return gen_load(ctx, a, MO_TESL);
284 }
285
286 static bool trans_ld(DisasContext *ctx, arg_ld *a)
287 {
288 REQUIRE_64_OR_128BIT(ctx);
289 return gen_load(ctx, a, MO_TESQ);
290 }
291
292 static bool trans_lq(DisasContext *ctx, arg_lq *a)
293 {
294 REQUIRE_128BIT(ctx);
295 return gen_load(ctx, a, MO_TEUO);
296 }
297
298 static bool trans_lbu(DisasContext *ctx, arg_lbu *a)
299 {
300 return gen_load(ctx, a, MO_UB);
301 }
302
303 static bool trans_lhu(DisasContext *ctx, arg_lhu *a)
304 {
305 return gen_load(ctx, a, MO_TEUW);
306 }
307
308 static bool trans_lwu(DisasContext *ctx, arg_lwu *a)
309 {
310 REQUIRE_64_OR_128BIT(ctx);
311 return gen_load(ctx, a, MO_TEUL);
312 }
313
314 static bool trans_ldu(DisasContext *ctx, arg_ldu *a)
315 {
316 REQUIRE_128BIT(ctx);
317 return gen_load(ctx, a, MO_TEUQ);
318 }
319
320 static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop)
321 {
322 TCGv addr = get_address(ctx, a->rs1, a->imm);
323 TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
324
325 tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
326 return true;
327 }
328
329 static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop)
330 {
331 TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
332 TCGv src2l = get_gpr(ctx, a->rs2, EXT_NONE);
333 TCGv src2h = get_gprh(ctx, a->rs2);
334 TCGv addrl = tcg_temp_new();
335
336 tcg_gen_addi_tl(addrl, src1l, a->imm);
337
338 if ((memop & MO_SIZE) <= MO_64) {
339 tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, memop);
340 } else {
341 /* little-endian memory access assumed for now */
342 tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, MO_TEUQ);
343 tcg_gen_addi_tl(addrl, addrl, 8);
344 tcg_gen_qemu_st_tl(src2h, addrl, ctx->mem_idx, MO_TEUQ);
345 }
346 return true;
347 }
348
349 static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
350 {
351 decode_save_opc(ctx);
352 if (get_xl(ctx) == MXL_RV128) {
353 return gen_store_i128(ctx, a, memop);
354 } else {
355 return gen_store_tl(ctx, a, memop);
356 }
357 }
358
359 static bool trans_sb(DisasContext *ctx, arg_sb *a)
360 {
361 return gen_store(ctx, a, MO_SB);
362 }
363
364 static bool trans_sh(DisasContext *ctx, arg_sh *a)
365 {
366 return gen_store(ctx, a, MO_TESW);
367 }
368
369 static bool trans_sw(DisasContext *ctx, arg_sw *a)
370 {
371 return gen_store(ctx, a, MO_TESL);
372 }
373
374 static bool trans_sd(DisasContext *ctx, arg_sd *a)
375 {
376 REQUIRE_64_OR_128BIT(ctx);
377 return gen_store(ctx, a, MO_TEUQ);
378 }
379
380 static bool trans_sq(DisasContext *ctx, arg_sq *a)
381 {
382 REQUIRE_128BIT(ctx);
383 return gen_store(ctx, a, MO_TEUO);
384 }
385
386 static bool trans_addd(DisasContext *ctx, arg_addd *a)
387 {
388 REQUIRE_128BIT(ctx);
389 ctx->ol = MXL_RV64;
390 return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
391 }
392
393 static bool trans_addid(DisasContext *ctx, arg_addid *a)
394 {
395 REQUIRE_128BIT(ctx);
396 ctx->ol = MXL_RV64;
397 return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
398 }
399
400 static bool trans_subd(DisasContext *ctx, arg_subd *a)
401 {
402 REQUIRE_128BIT(ctx);
403 ctx->ol = MXL_RV64;
404 return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
405 }
406
407 static void gen_addi2_i128(TCGv retl, TCGv reth,
408 TCGv srcl, TCGv srch, target_long imm)
409 {
410 TCGv imml = tcg_constant_tl(imm);
411 TCGv immh = tcg_constant_tl(-(imm < 0));
412 tcg_gen_add2_tl(retl, reth, srcl, srch, imml, immh);
413 }
414
415 static bool trans_addi(DisasContext *ctx, arg_addi *a)
416 {
417 return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, gen_addi2_i128);
418 }
419
420 static void gen_slt(TCGv ret, TCGv s1, TCGv s2)
421 {
422 tcg_gen_setcond_tl(TCG_COND_LT, ret, s1, s2);
423 }
424
425 static void gen_slt_i128(TCGv retl, TCGv reth,
426 TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
427 {
428 gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LT);
429 }
430
431 static void gen_sltu(TCGv ret, TCGv s1, TCGv s2)
432 {
433 tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2);
434 }
435
436 static void gen_sltu_i128(TCGv retl, TCGv reth,
437 TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
438 {
439 gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LTU);
440 }
441
442 static bool trans_slti(DisasContext *ctx, arg_slti *a)
443 {
444 return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
445 }
446
447 static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a)
448 {
449 return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
450 }
451
452 static bool trans_xori(DisasContext *ctx, arg_xori *a)
453 {
454 return gen_logic_imm_fn(ctx, a, tcg_gen_xori_tl);
455 }
456
457 static bool trans_ori(DisasContext *ctx, arg_ori *a)
458 {
459 return gen_logic_imm_fn(ctx, a, tcg_gen_ori_tl);
460 }
461
462 static bool trans_andi(DisasContext *ctx, arg_andi *a)
463 {
464 return gen_logic_imm_fn(ctx, a, tcg_gen_andi_tl);
465 }
466
467 static void gen_slli_i128(TCGv retl, TCGv reth,
468 TCGv src1l, TCGv src1h,
469 target_long shamt)
470 {
471 if (shamt >= 64) {
472 tcg_gen_shli_tl(reth, src1l, shamt - 64);
473 tcg_gen_movi_tl(retl, 0);
474 } else {
475 tcg_gen_extract2_tl(reth, src1l, src1h, 64 - shamt);
476 tcg_gen_shli_tl(retl, src1l, shamt);
477 }
478 }
479
480 static bool trans_slli(DisasContext *ctx, arg_slli *a)
481 {
482 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, gen_slli_i128);
483 }
484
485 static void gen_srliw(TCGv dst, TCGv src, target_long shamt)
486 {
487 tcg_gen_extract_tl(dst, src, shamt, 32 - shamt);
488 }
489
490 static void gen_srli_i128(TCGv retl, TCGv reth,
491 TCGv src1l, TCGv src1h,
492 target_long shamt)
493 {
494 if (shamt >= 64) {
495 tcg_gen_shri_tl(retl, src1h, shamt - 64);
496 tcg_gen_movi_tl(reth, 0);
497 } else {
498 tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
499 tcg_gen_shri_tl(reth, src1h, shamt);
500 }
501 }
502
503 static bool trans_srli(DisasContext *ctx, arg_srli *a)
504 {
505 return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
506 tcg_gen_shri_tl, gen_srliw, gen_srli_i128);
507 }
508
509 static void gen_sraiw(TCGv dst, TCGv src, target_long shamt)
510 {
511 tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt);
512 }
513
514 static void gen_srai_i128(TCGv retl, TCGv reth,
515 TCGv src1l, TCGv src1h,
516 target_long shamt)
517 {
518 if (shamt >= 64) {
519 tcg_gen_sari_tl(retl, src1h, shamt - 64);
520 tcg_gen_sari_tl(reth, src1h, 63);
521 } else {
522 tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
523 tcg_gen_sari_tl(reth, src1h, shamt);
524 }
525 }
526
527 static bool trans_srai(DisasContext *ctx, arg_srai *a)
528 {
529 return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
530 tcg_gen_sari_tl, gen_sraiw, gen_srai_i128);
531 }
532
533 static bool trans_add(DisasContext *ctx, arg_add *a)
534 {
535 return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, tcg_gen_add2_tl);
536 }
537
538 static bool trans_sub(DisasContext *ctx, arg_sub *a)
539 {
540 return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, tcg_gen_sub2_tl);
541 }
542
543 static void gen_sll_i128(TCGv destl, TCGv desth,
544 TCGv src1l, TCGv src1h, TCGv shamt)
545 {
546 TCGv ls = tcg_temp_new();
547 TCGv rs = tcg_temp_new();
548 TCGv hs = tcg_temp_new();
549 TCGv ll = tcg_temp_new();
550 TCGv lr = tcg_temp_new();
551 TCGv h0 = tcg_temp_new();
552 TCGv h1 = tcg_temp_new();
553 TCGv zero = tcg_constant_tl(0);
554
555 tcg_gen_andi_tl(hs, shamt, 64);
556 tcg_gen_andi_tl(ls, shamt, 63);
557 tcg_gen_neg_tl(shamt, shamt);
558 tcg_gen_andi_tl(rs, shamt, 63);
559
560 tcg_gen_shl_tl(ll, src1l, ls);
561 tcg_gen_shl_tl(h0, src1h, ls);
562 tcg_gen_shr_tl(lr, src1l, rs);
563 tcg_gen_movcond_tl(TCG_COND_NE, lr, shamt, zero, lr, zero);
564 tcg_gen_or_tl(h1, h0, lr);
565
566 tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, zero, ll);
567 tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, ll, h1);
568 }
569
570 static bool trans_sll(DisasContext *ctx, arg_sll *a)
571 {
572 return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, gen_sll_i128);
573 }
574
575 static bool trans_slt(DisasContext *ctx, arg_slt *a)
576 {
577 return gen_arith(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
578 }
579
580 static bool trans_sltu(DisasContext *ctx, arg_sltu *a)
581 {
582 return gen_arith(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
583 }
584
585 static void gen_srl_i128(TCGv destl, TCGv desth,
586 TCGv src1l, TCGv src1h, TCGv shamt)
587 {
588 TCGv ls = tcg_temp_new();
589 TCGv rs = tcg_temp_new();
590 TCGv hs = tcg_temp_new();
591 TCGv ll = tcg_temp_new();
592 TCGv lr = tcg_temp_new();
593 TCGv h0 = tcg_temp_new();
594 TCGv h1 = tcg_temp_new();
595 TCGv zero = tcg_constant_tl(0);
596
597 tcg_gen_andi_tl(hs, shamt, 64);
598 tcg_gen_andi_tl(rs, shamt, 63);
599 tcg_gen_neg_tl(shamt, shamt);
600 tcg_gen_andi_tl(ls, shamt, 63);
601
602 tcg_gen_shr_tl(lr, src1l, rs);
603 tcg_gen_shr_tl(h1, src1h, rs);
604 tcg_gen_shl_tl(ll, src1h, ls);
605 tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
606 tcg_gen_or_tl(h0, ll, lr);
607
608 tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
609 tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, zero, h1);
610 }
611
612 static bool trans_srl(DisasContext *ctx, arg_srl *a)
613 {
614 return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, gen_srl_i128);
615 }
616
617 static void gen_sra_i128(TCGv destl, TCGv desth,
618 TCGv src1l, TCGv src1h, TCGv shamt)
619 {
620 TCGv ls = tcg_temp_new();
621 TCGv rs = tcg_temp_new();
622 TCGv hs = tcg_temp_new();
623 TCGv ll = tcg_temp_new();
624 TCGv lr = tcg_temp_new();
625 TCGv h0 = tcg_temp_new();
626 TCGv h1 = tcg_temp_new();
627 TCGv zero = tcg_constant_tl(0);
628
629 tcg_gen_andi_tl(hs, shamt, 64);
630 tcg_gen_andi_tl(rs, shamt, 63);
631 tcg_gen_neg_tl(shamt, shamt);
632 tcg_gen_andi_tl(ls, shamt, 63);
633
634 tcg_gen_shr_tl(lr, src1l, rs);
635 tcg_gen_sar_tl(h1, src1h, rs);
636 tcg_gen_shl_tl(ll, src1h, ls);
637 tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
638 tcg_gen_or_tl(h0, ll, lr);
639 tcg_gen_sari_tl(lr, src1h, 63);
640
641 tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
642 tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, lr, h1);
643 }
644
645 static bool trans_sra(DisasContext *ctx, arg_sra *a)
646 {
647 return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, gen_sra_i128);
648 }
649
650 static bool trans_xor(DisasContext *ctx, arg_xor *a)
651 {
652 return gen_logic(ctx, a, tcg_gen_xor_tl);
653 }
654
655 static bool trans_or(DisasContext *ctx, arg_or *a)
656 {
657 return gen_logic(ctx, a, tcg_gen_or_tl);
658 }
659
660 static bool trans_and(DisasContext *ctx, arg_and *a)
661 {
662 return gen_logic(ctx, a, tcg_gen_and_tl);
663 }
664
665 static bool trans_addiw(DisasContext *ctx, arg_addiw *a)
666 {
667 REQUIRE_64_OR_128BIT(ctx);
668 ctx->ol = MXL_RV32;
669 return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
670 }
671
672 static bool trans_slliw(DisasContext *ctx, arg_slliw *a)
673 {
674 REQUIRE_64_OR_128BIT(ctx);
675 ctx->ol = MXL_RV32;
676 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
677 }
678
679 static bool trans_srliw(DisasContext *ctx, arg_srliw *a)
680 {
681 REQUIRE_64_OR_128BIT(ctx);
682 ctx->ol = MXL_RV32;
683 return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw, NULL);
684 }
685
686 static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a)
687 {
688 REQUIRE_64_OR_128BIT(ctx);
689 ctx->ol = MXL_RV32;
690 return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw, NULL);
691 }
692
693 static bool trans_sllid(DisasContext *ctx, arg_sllid *a)
694 {
695 REQUIRE_128BIT(ctx);
696 ctx->ol = MXL_RV64;
697 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
698 }
699
700 static bool trans_srlid(DisasContext *ctx, arg_srlid *a)
701 {
702 REQUIRE_128BIT(ctx);
703 ctx->ol = MXL_RV64;
704 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shri_tl, NULL);
705 }
706
707 static bool trans_sraid(DisasContext *ctx, arg_sraid *a)
708 {
709 REQUIRE_128BIT(ctx);
710 ctx->ol = MXL_RV64;
711 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_sari_tl, NULL);
712 }
713
714 static bool trans_addw(DisasContext *ctx, arg_addw *a)
715 {
716 REQUIRE_64_OR_128BIT(ctx);
717 ctx->ol = MXL_RV32;
718 return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
719 }
720
721 static bool trans_subw(DisasContext *ctx, arg_subw *a)
722 {
723 REQUIRE_64_OR_128BIT(ctx);
724 ctx->ol = MXL_RV32;
725 return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
726 }
727
728 static bool trans_sllw(DisasContext *ctx, arg_sllw *a)
729 {
730 REQUIRE_64_OR_128BIT(ctx);
731 ctx->ol = MXL_RV32;
732 return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
733 }
734
735 static bool trans_srlw(DisasContext *ctx, arg_srlw *a)
736 {
737 REQUIRE_64_OR_128BIT(ctx);
738 ctx->ol = MXL_RV32;
739 return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
740 }
741
742 static bool trans_sraw(DisasContext *ctx, arg_sraw *a)
743 {
744 REQUIRE_64_OR_128BIT(ctx);
745 ctx->ol = MXL_RV32;
746 return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
747 }
748
749 static bool trans_slld(DisasContext *ctx, arg_slld *a)
750 {
751 REQUIRE_128BIT(ctx);
752 ctx->ol = MXL_RV64;
753 return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
754 }
755
756 static bool trans_srld(DisasContext *ctx, arg_srld *a)
757 {
758 REQUIRE_128BIT(ctx);
759 ctx->ol = MXL_RV64;
760 return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
761 }
762
763 static bool trans_srad(DisasContext *ctx, arg_srad *a)
764 {
765 REQUIRE_128BIT(ctx);
766 ctx->ol = MXL_RV64;
767 return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
768 }
769
770 static bool trans_pause(DisasContext *ctx, arg_pause *a)
771 {
772 if (!ctx->cfg_ptr->ext_zihintpause) {
773 return false;
774 }
775
776 /*
777 * PAUSE is a no-op in QEMU,
778 * end the TB and return to main loop
779 */
780 gen_set_pc_imm(ctx, ctx->pc_succ_insn);
781 exit_tb(ctx);
782 ctx->base.is_jmp = DISAS_NORETURN;
783
784 return true;
785 }
786
787 static bool trans_fence(DisasContext *ctx, arg_fence *a)
788 {
789 /* FENCE is a full memory barrier. */
790 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
791 return true;
792 }
793
794 static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
795 {
796 if (!ctx->cfg_ptr->ext_ifencei) {
797 return false;
798 }
799
800 /*
801 * FENCE_I is a no-op in QEMU,
802 * however we need to end the translation block
803 */
804 gen_set_pc_imm(ctx, ctx->pc_succ_insn);
805 exit_tb(ctx);
806 ctx->base.is_jmp = DISAS_NORETURN;
807 return true;
808 }
809
810 static bool do_csr_post(DisasContext *ctx)
811 {
812 /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
813 decode_save_opc(ctx);
814 /* We may have changed important cpu state -- exit to main loop. */
815 gen_set_pc_imm(ctx, ctx->pc_succ_insn);
816 exit_tb(ctx);
817 ctx->base.is_jmp = DISAS_NORETURN;
818 return true;
819 }
820
821 static bool do_csrr(DisasContext *ctx, int rd, int rc)
822 {
823 TCGv dest = dest_gpr(ctx, rd);
824 TCGv_i32 csr = tcg_constant_i32(rc);
825
826 translator_io_start(&ctx->base);
827 gen_helper_csrr(dest, cpu_env, csr);
828 gen_set_gpr(ctx, rd, dest);
829 return do_csr_post(ctx);
830 }
831
832 static bool do_csrw(DisasContext *ctx, int rc, TCGv src)
833 {
834 TCGv_i32 csr = tcg_constant_i32(rc);
835
836 translator_io_start(&ctx->base);
837 gen_helper_csrw(cpu_env, csr, src);
838 return do_csr_post(ctx);
839 }
840
841 static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask)
842 {
843 TCGv dest = dest_gpr(ctx, rd);
844 TCGv_i32 csr = tcg_constant_i32(rc);
845
846 translator_io_start(&ctx->base);
847 gen_helper_csrrw(dest, cpu_env, csr, src, mask);
848 gen_set_gpr(ctx, rd, dest);
849 return do_csr_post(ctx);
850 }
851
852 static bool do_csrr_i128(DisasContext *ctx, int rd, int rc)
853 {
854 TCGv destl = dest_gpr(ctx, rd);
855 TCGv desth = dest_gprh(ctx, rd);
856 TCGv_i32 csr = tcg_constant_i32(rc);
857
858 translator_io_start(&ctx->base);
859 gen_helper_csrr_i128(destl, cpu_env, csr);
860 tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
861 gen_set_gpr128(ctx, rd, destl, desth);
862 return do_csr_post(ctx);
863 }
864
865 static bool do_csrw_i128(DisasContext *ctx, int rc, TCGv srcl, TCGv srch)
866 {
867 TCGv_i32 csr = tcg_constant_i32(rc);
868
869 translator_io_start(&ctx->base);
870 gen_helper_csrw_i128(cpu_env, csr, srcl, srch);
871 return do_csr_post(ctx);
872 }
873
874 static bool do_csrrw_i128(DisasContext *ctx, int rd, int rc,
875 TCGv srcl, TCGv srch, TCGv maskl, TCGv maskh)
876 {
877 TCGv destl = dest_gpr(ctx, rd);
878 TCGv desth = dest_gprh(ctx, rd);
879 TCGv_i32 csr = tcg_constant_i32(rc);
880
881 translator_io_start(&ctx->base);
882 gen_helper_csrrw_i128(destl, cpu_env, csr, srcl, srch, maskl, maskh);
883 tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
884 gen_set_gpr128(ctx, rd, destl, desth);
885 return do_csr_post(ctx);
886 }
887
888 static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
889 {
890 RISCVMXL xl = get_xl(ctx);
891 if (xl < MXL_RV128) {
892 TCGv src = get_gpr(ctx, a->rs1, EXT_NONE);
893
894 /*
895 * If rd == 0, the insn shall not read the csr, nor cause any of the
896 * side effects that might occur on a csr read.
897 */
898 if (a->rd == 0) {
899 return do_csrw(ctx, a->csr, src);
900 }
901
902 TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
903 (target_ulong)-1);
904 return do_csrrw(ctx, a->rd, a->csr, src, mask);
905 } else {
906 TCGv srcl = get_gpr(ctx, a->rs1, EXT_NONE);
907 TCGv srch = get_gprh(ctx, a->rs1);
908
909 /*
910 * If rd == 0, the insn shall not read the csr, nor cause any of the
911 * side effects that might occur on a csr read.
912 */
913 if (a->rd == 0) {
914 return do_csrw_i128(ctx, a->csr, srcl, srch);
915 }
916
917 TCGv mask = tcg_constant_tl(-1);
918 return do_csrrw_i128(ctx, a->rd, a->csr, srcl, srch, mask, mask);
919 }
920 }
921
922 static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a)
923 {
924 /*
925 * If rs1 == 0, the insn shall not write to the csr at all, nor
926 * cause any of the side effects that might occur on a csr write.
927 * Note that if rs1 specifies a register other than x0, holding
928 * a zero value, the instruction will still attempt to write the
929 * unmodified value back to the csr and will cause side effects.
930 */
931 if (get_xl(ctx) < MXL_RV128) {
932 if (a->rs1 == 0) {
933 return do_csrr(ctx, a->rd, a->csr);
934 }
935
936 TCGv ones = tcg_constant_tl(-1);
937 TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
938 return do_csrrw(ctx, a->rd, a->csr, ones, mask);
939 } else {
940 if (a->rs1 == 0) {
941 return do_csrr_i128(ctx, a->rd, a->csr);
942 }
943
944 TCGv ones = tcg_constant_tl(-1);
945 TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
946 TCGv maskh = get_gprh(ctx, a->rs1);
947 return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, maskl, maskh);
948 }
949 }
950
951 static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a)
952 {
953 /*
954 * If rs1 == 0, the insn shall not write to the csr at all, nor
955 * cause any of the side effects that might occur on a csr write.
956 * Note that if rs1 specifies a register other than x0, holding
957 * a zero value, the instruction will still attempt to write the
958 * unmodified value back to the csr and will cause side effects.
959 */
960 if (get_xl(ctx) < MXL_RV128) {
961 if (a->rs1 == 0) {
962 return do_csrr(ctx, a->rd, a->csr);
963 }
964
965 TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
966 return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
967 } else {
968 if (a->rs1 == 0) {
969 return do_csrr_i128(ctx, a->rd, a->csr);
970 }
971
972 TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
973 TCGv maskh = get_gprh(ctx, a->rs1);
974 return do_csrrw_i128(ctx, a->rd, a->csr,
975 ctx->zero, ctx->zero, maskl, maskh);
976 }
977 }
978
979 static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a)
980 {
981 RISCVMXL xl = get_xl(ctx);
982 if (xl < MXL_RV128) {
983 TCGv src = tcg_constant_tl(a->rs1);
984
985 /*
986 * If rd == 0, the insn shall not read the csr, nor cause any of the
987 * side effects that might occur on a csr read.
988 */
989 if (a->rd == 0) {
990 return do_csrw(ctx, a->csr, src);
991 }
992
993 TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
994 (target_ulong)-1);
995 return do_csrrw(ctx, a->rd, a->csr, src, mask);
996 } else {
997 TCGv src = tcg_constant_tl(a->rs1);
998
999 /*
1000 * If rd == 0, the insn shall not read the csr, nor cause any of the
1001 * side effects that might occur on a csr read.
1002 */
1003 if (a->rd == 0) {
1004 return do_csrw_i128(ctx, a->csr, src, ctx->zero);
1005 }
1006
1007 TCGv mask = tcg_constant_tl(-1);
1008 return do_csrrw_i128(ctx, a->rd, a->csr, src, ctx->zero, mask, mask);
1009 }
1010 }
1011
1012 static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a)
1013 {
1014 /*
1015 * If rs1 == 0, the insn shall not write to the csr at all, nor
1016 * cause any of the side effects that might occur on a csr write.
1017 * Note that if rs1 specifies a register other than x0, holding
1018 * a zero value, the instruction will still attempt to write the
1019 * unmodified value back to the csr and will cause side effects.
1020 */
1021 if (get_xl(ctx) < MXL_RV128) {
1022 if (a->rs1 == 0) {
1023 return do_csrr(ctx, a->rd, a->csr);
1024 }
1025
1026 TCGv ones = tcg_constant_tl(-1);
1027 TCGv mask = tcg_constant_tl(a->rs1);
1028 return do_csrrw(ctx, a->rd, a->csr, ones, mask);
1029 } else {
1030 if (a->rs1 == 0) {
1031 return do_csrr_i128(ctx, a->rd, a->csr);
1032 }
1033
1034 TCGv ones = tcg_constant_tl(-1);
1035 TCGv mask = tcg_constant_tl(a->rs1);
1036 return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, mask, ctx->zero);
1037 }
1038 }
1039
1040 static bool trans_csrrci(DisasContext *ctx, arg_csrrci * a)
1041 {
1042 /*
1043 * If rs1 == 0, the insn shall not write to the csr at all, nor
1044 * cause any of the side effects that might occur on a csr write.
1045 * Note that if rs1 specifies a register other than x0, holding
1046 * a zero value, the instruction will still attempt to write the
1047 * unmodified value back to the csr and will cause side effects.
1048 */
1049 if (get_xl(ctx) < MXL_RV128) {
1050 if (a->rs1 == 0) {
1051 return do_csrr(ctx, a->rd, a->csr);
1052 }
1053
1054 TCGv mask = tcg_constant_tl(a->rs1);
1055 return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
1056 } else {
1057 if (a->rs1 == 0) {
1058 return do_csrr_i128(ctx, a->rd, a->csr);
1059 }
1060
1061 TCGv mask = tcg_constant_tl(a->rs1);
1062 return do_csrrw_i128(ctx, a->rd, a->csr,
1063 ctx->zero, ctx->zero, mask, ctx->zero);
1064 }
1065 }