]> git.proxmox.com Git - mirror_qemu.git/blob - target/riscv/insn_trans/trans_rvi.c.inc
target/riscv: Remove condition guarding register zero for auipc and lui
[mirror_qemu.git] / target / riscv / insn_trans / trans_rvi.c.inc
1 /*
2 * RISC-V translation routines for the RVXI Base Integer Instruction Set.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
6 * Bastian Koppelmann, kbastian@mail.uni-paderborn.de
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2 or later, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21 static bool trans_illegal(DisasContext *ctx, arg_empty *a)
22 {
23 gen_exception_illegal(ctx);
24 return true;
25 }
26
27 static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a)
28 {
29 REQUIRE_64_OR_128BIT(ctx);
30 return trans_illegal(ctx, a);
31 }
32
33 static bool trans_lui(DisasContext *ctx, arg_lui *a)
34 {
35 gen_set_gpri(ctx, a->rd, a->imm);
36 return true;
37 }
38
39 static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
40 {
41 gen_set_gpri(ctx, a->rd, a->imm + ctx->base.pc_next);
42 return true;
43 }
44
45 static bool trans_jal(DisasContext *ctx, arg_jal *a)
46 {
47 gen_jal(ctx, a->rd, a->imm);
48 return true;
49 }
50
51 static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
52 {
53 TCGLabel *misaligned = NULL;
54
55 tcg_gen_addi_tl(cpu_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm);
56 tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2);
57
58 gen_set_pc(ctx, cpu_pc);
59 if (!has_ext(ctx, RVC)) {
60 TCGv t0 = tcg_temp_new();
61
62 misaligned = gen_new_label();
63 tcg_gen_andi_tl(t0, cpu_pc, 0x2);
64 tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
65 tcg_temp_free(t0);
66 }
67
68 gen_set_gpri(ctx, a->rd, ctx->pc_succ_insn);
69 tcg_gen_lookup_and_goto_ptr();
70
71 if (misaligned) {
72 gen_set_label(misaligned);
73 gen_exception_inst_addr_mis(ctx);
74 }
75 ctx->base.is_jmp = DISAS_NORETURN;
76
77 return true;
78 }
79
80 static TCGCond gen_compare_i128(bool bz, TCGv rl,
81 TCGv al, TCGv ah, TCGv bl, TCGv bh,
82 TCGCond cond)
83 {
84 TCGv rh = tcg_temp_new();
85 bool invert = false;
86
87 switch (cond) {
88 case TCG_COND_EQ:
89 case TCG_COND_NE:
90 if (bz) {
91 tcg_gen_or_tl(rl, al, ah);
92 } else {
93 tcg_gen_xor_tl(rl, al, bl);
94 tcg_gen_xor_tl(rh, ah, bh);
95 tcg_gen_or_tl(rl, rl, rh);
96 }
97 break;
98
99 case TCG_COND_GE:
100 case TCG_COND_LT:
101 if (bz) {
102 tcg_gen_mov_tl(rl, ah);
103 } else {
104 TCGv tmp = tcg_temp_new();
105
106 tcg_gen_sub2_tl(rl, rh, al, ah, bl, bh);
107 tcg_gen_xor_tl(rl, rh, ah);
108 tcg_gen_xor_tl(tmp, ah, bh);
109 tcg_gen_and_tl(rl, rl, tmp);
110 tcg_gen_xor_tl(rl, rh, rl);
111
112 tcg_temp_free(tmp);
113 }
114 break;
115
116 case TCG_COND_LTU:
117 invert = true;
118 /* fallthrough */
119 case TCG_COND_GEU:
120 {
121 TCGv tmp = tcg_temp_new();
122 TCGv zero = tcg_constant_tl(0);
123 TCGv one = tcg_constant_tl(1);
124
125 cond = TCG_COND_NE;
126 /* borrow in to second word */
127 tcg_gen_setcond_tl(TCG_COND_LTU, tmp, al, bl);
128 /* seed third word with 1, which will be result */
129 tcg_gen_sub2_tl(tmp, rh, ah, one, tmp, zero);
130 tcg_gen_sub2_tl(tmp, rl, tmp, rh, bh, zero);
131
132 tcg_temp_free(tmp);
133 }
134 break;
135
136 default:
137 g_assert_not_reached();
138 }
139
140 if (invert) {
141 cond = tcg_invert_cond(cond);
142 }
143
144 tcg_temp_free(rh);
145 return cond;
146 }
147
148 static void gen_setcond_i128(TCGv rl, TCGv rh,
149 TCGv src1l, TCGv src1h,
150 TCGv src2l, TCGv src2h,
151 TCGCond cond)
152 {
153 cond = gen_compare_i128(false, rl, src1l, src1h, src2l, src2h, cond);
154 tcg_gen_setcondi_tl(cond, rl, rl, 0);
155 tcg_gen_movi_tl(rh, 0);
156 }
157
158 static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
159 {
160 TCGLabel *l = gen_new_label();
161 TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN);
162 TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN);
163
164 if (get_xl(ctx) == MXL_RV128) {
165 TCGv src1h = get_gprh(ctx, a->rs1);
166 TCGv src2h = get_gprh(ctx, a->rs2);
167 TCGv tmp = tcg_temp_new();
168
169 cond = gen_compare_i128(a->rs2 == 0,
170 tmp, src1, src1h, src2, src2h, cond);
171 tcg_gen_brcondi_tl(cond, tmp, 0, l);
172
173 tcg_temp_free(tmp);
174 } else {
175 tcg_gen_brcond_tl(cond, src1, src2, l);
176 }
177 gen_goto_tb(ctx, 1, ctx->pc_succ_insn);
178
179 gen_set_label(l); /* branch taken */
180
181 if (!has_ext(ctx, RVC) && ((ctx->base.pc_next + a->imm) & 0x3)) {
182 /* misaligned */
183 gen_exception_inst_addr_mis(ctx);
184 } else {
185 gen_goto_tb(ctx, 0, ctx->base.pc_next + a->imm);
186 }
187 ctx->base.is_jmp = DISAS_NORETURN;
188
189 return true;
190 }
191
192 static bool trans_beq(DisasContext *ctx, arg_beq *a)
193 {
194 return gen_branch(ctx, a, TCG_COND_EQ);
195 }
196
197 static bool trans_bne(DisasContext *ctx, arg_bne *a)
198 {
199 return gen_branch(ctx, a, TCG_COND_NE);
200 }
201
202 static bool trans_blt(DisasContext *ctx, arg_blt *a)
203 {
204 return gen_branch(ctx, a, TCG_COND_LT);
205 }
206
207 static bool trans_bge(DisasContext *ctx, arg_bge *a)
208 {
209 return gen_branch(ctx, a, TCG_COND_GE);
210 }
211
212 static bool trans_bltu(DisasContext *ctx, arg_bltu *a)
213 {
214 return gen_branch(ctx, a, TCG_COND_LTU);
215 }
216
217 static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a)
218 {
219 return gen_branch(ctx, a, TCG_COND_GEU);
220 }
221
222 static bool gen_load_tl(DisasContext *ctx, arg_lb *a, MemOp memop)
223 {
224 TCGv dest = dest_gpr(ctx, a->rd);
225 TCGv addr = get_address(ctx, a->rs1, a->imm);
226
227 tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop);
228 gen_set_gpr(ctx, a->rd, dest);
229 return true;
230 }
231
232 /* Compute only 64-bit addresses to use the address translation mechanism */
233 static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop)
234 {
235 TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
236 TCGv destl = dest_gpr(ctx, a->rd);
237 TCGv desth = dest_gprh(ctx, a->rd);
238 TCGv addrl = tcg_temp_new();
239
240 tcg_gen_addi_tl(addrl, src1l, a->imm);
241
242 if ((memop & MO_SIZE) <= MO_64) {
243 tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, memop);
244 if (memop & MO_SIGN) {
245 tcg_gen_sari_tl(desth, destl, 63);
246 } else {
247 tcg_gen_movi_tl(desth, 0);
248 }
249 } else {
250 /* assume little-endian memory access for now */
251 tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, MO_TEUQ);
252 tcg_gen_addi_tl(addrl, addrl, 8);
253 tcg_gen_qemu_ld_tl(desth, addrl, ctx->mem_idx, MO_TEUQ);
254 }
255
256 gen_set_gpr128(ctx, a->rd, destl, desth);
257
258 tcg_temp_free(addrl);
259 return true;
260 }
261
262 static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
263 {
264 if (get_xl(ctx) == MXL_RV128) {
265 return gen_load_i128(ctx, a, memop);
266 } else {
267 return gen_load_tl(ctx, a, memop);
268 }
269 }
270
271 static bool trans_lb(DisasContext *ctx, arg_lb *a)
272 {
273 return gen_load(ctx, a, MO_SB);
274 }
275
276 static bool trans_lh(DisasContext *ctx, arg_lh *a)
277 {
278 return gen_load(ctx, a, MO_TESW);
279 }
280
281 static bool trans_lw(DisasContext *ctx, arg_lw *a)
282 {
283 return gen_load(ctx, a, MO_TESL);
284 }
285
286 static bool trans_ld(DisasContext *ctx, arg_ld *a)
287 {
288 REQUIRE_64_OR_128BIT(ctx);
289 return gen_load(ctx, a, MO_TESQ);
290 }
291
292 static bool trans_lq(DisasContext *ctx, arg_lq *a)
293 {
294 REQUIRE_128BIT(ctx);
295 return gen_load(ctx, a, MO_TEUO);
296 }
297
298 static bool trans_lbu(DisasContext *ctx, arg_lbu *a)
299 {
300 return gen_load(ctx, a, MO_UB);
301 }
302
303 static bool trans_lhu(DisasContext *ctx, arg_lhu *a)
304 {
305 return gen_load(ctx, a, MO_TEUW);
306 }
307
308 static bool trans_lwu(DisasContext *ctx, arg_lwu *a)
309 {
310 REQUIRE_64_OR_128BIT(ctx);
311 return gen_load(ctx, a, MO_TEUL);
312 }
313
314 static bool trans_ldu(DisasContext *ctx, arg_ldu *a)
315 {
316 REQUIRE_128BIT(ctx);
317 return gen_load(ctx, a, MO_TEUQ);
318 }
319
320 static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop)
321 {
322 TCGv addr = get_address(ctx, a->rs1, a->imm);
323 TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
324
325 tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
326 return true;
327 }
328
329 static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop)
330 {
331 TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
332 TCGv src2l = get_gpr(ctx, a->rs2, EXT_NONE);
333 TCGv src2h = get_gprh(ctx, a->rs2);
334 TCGv addrl = tcg_temp_new();
335
336 tcg_gen_addi_tl(addrl, src1l, a->imm);
337
338 if ((memop & MO_SIZE) <= MO_64) {
339 tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, memop);
340 } else {
341 /* little-endian memory access assumed for now */
342 tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, MO_TEUQ);
343 tcg_gen_addi_tl(addrl, addrl, 8);
344 tcg_gen_qemu_st_tl(src2h, addrl, ctx->mem_idx, MO_TEUQ);
345 }
346
347 tcg_temp_free(addrl);
348 return true;
349 }
350
351 static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
352 {
353 if (get_xl(ctx) == MXL_RV128) {
354 return gen_store_i128(ctx, a, memop);
355 } else {
356 return gen_store_tl(ctx, a, memop);
357 }
358 }
359
360 static bool trans_sb(DisasContext *ctx, arg_sb *a)
361 {
362 return gen_store(ctx, a, MO_SB);
363 }
364
365 static bool trans_sh(DisasContext *ctx, arg_sh *a)
366 {
367 return gen_store(ctx, a, MO_TESW);
368 }
369
370 static bool trans_sw(DisasContext *ctx, arg_sw *a)
371 {
372 return gen_store(ctx, a, MO_TESL);
373 }
374
375 static bool trans_sd(DisasContext *ctx, arg_sd *a)
376 {
377 REQUIRE_64_OR_128BIT(ctx);
378 return gen_store(ctx, a, MO_TEUQ);
379 }
380
381 static bool trans_sq(DisasContext *ctx, arg_sq *a)
382 {
383 REQUIRE_128BIT(ctx);
384 return gen_store(ctx, a, MO_TEUO);
385 }
386
387 static bool trans_addd(DisasContext *ctx, arg_addd *a)
388 {
389 REQUIRE_128BIT(ctx);
390 ctx->ol = MXL_RV64;
391 return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
392 }
393
394 static bool trans_addid(DisasContext *ctx, arg_addid *a)
395 {
396 REQUIRE_128BIT(ctx);
397 ctx->ol = MXL_RV64;
398 return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
399 }
400
401 static bool trans_subd(DisasContext *ctx, arg_subd *a)
402 {
403 REQUIRE_128BIT(ctx);
404 ctx->ol = MXL_RV64;
405 return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
406 }
407
408 static void gen_addi2_i128(TCGv retl, TCGv reth,
409 TCGv srcl, TCGv srch, target_long imm)
410 {
411 TCGv imml = tcg_constant_tl(imm);
412 TCGv immh = tcg_constant_tl(-(imm < 0));
413 tcg_gen_add2_tl(retl, reth, srcl, srch, imml, immh);
414 }
415
416 static bool trans_addi(DisasContext *ctx, arg_addi *a)
417 {
418 return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, gen_addi2_i128);
419 }
420
421 static void gen_slt(TCGv ret, TCGv s1, TCGv s2)
422 {
423 tcg_gen_setcond_tl(TCG_COND_LT, ret, s1, s2);
424 }
425
426 static void gen_slt_i128(TCGv retl, TCGv reth,
427 TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
428 {
429 gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LT);
430 }
431
432 static void gen_sltu(TCGv ret, TCGv s1, TCGv s2)
433 {
434 tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2);
435 }
436
437 static void gen_sltu_i128(TCGv retl, TCGv reth,
438 TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
439 {
440 gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LTU);
441 }
442
443 static bool trans_slti(DisasContext *ctx, arg_slti *a)
444 {
445 return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
446 }
447
448 static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a)
449 {
450 return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
451 }
452
453 static bool trans_xori(DisasContext *ctx, arg_xori *a)
454 {
455 return gen_logic_imm_fn(ctx, a, tcg_gen_xori_tl);
456 }
457
458 static bool trans_ori(DisasContext *ctx, arg_ori *a)
459 {
460 return gen_logic_imm_fn(ctx, a, tcg_gen_ori_tl);
461 }
462
463 static bool trans_andi(DisasContext *ctx, arg_andi *a)
464 {
465 return gen_logic_imm_fn(ctx, a, tcg_gen_andi_tl);
466 }
467
468 static void gen_slli_i128(TCGv retl, TCGv reth,
469 TCGv src1l, TCGv src1h,
470 target_long shamt)
471 {
472 if (shamt >= 64) {
473 tcg_gen_shli_tl(reth, src1l, shamt - 64);
474 tcg_gen_movi_tl(retl, 0);
475 } else {
476 tcg_gen_extract2_tl(reth, src1l, src1h, 64 - shamt);
477 tcg_gen_shli_tl(retl, src1l, shamt);
478 }
479 }
480
481 static bool trans_slli(DisasContext *ctx, arg_slli *a)
482 {
483 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, gen_slli_i128);
484 }
485
486 static void gen_srliw(TCGv dst, TCGv src, target_long shamt)
487 {
488 tcg_gen_extract_tl(dst, src, shamt, 32 - shamt);
489 }
490
491 static void gen_srli_i128(TCGv retl, TCGv reth,
492 TCGv src1l, TCGv src1h,
493 target_long shamt)
494 {
495 if (shamt >= 64) {
496 tcg_gen_shri_tl(retl, src1h, shamt - 64);
497 tcg_gen_movi_tl(reth, 0);
498 } else {
499 tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
500 tcg_gen_shri_tl(reth, src1h, shamt);
501 }
502 }
503
504 static bool trans_srli(DisasContext *ctx, arg_srli *a)
505 {
506 return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
507 tcg_gen_shri_tl, gen_srliw, gen_srli_i128);
508 }
509
510 static void gen_sraiw(TCGv dst, TCGv src, target_long shamt)
511 {
512 tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt);
513 }
514
515 static void gen_srai_i128(TCGv retl, TCGv reth,
516 TCGv src1l, TCGv src1h,
517 target_long shamt)
518 {
519 if (shamt >= 64) {
520 tcg_gen_sari_tl(retl, src1h, shamt - 64);
521 tcg_gen_sari_tl(reth, src1h, 63);
522 } else {
523 tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
524 tcg_gen_sari_tl(reth, src1h, shamt);
525 }
526 }
527
528 static bool trans_srai(DisasContext *ctx, arg_srai *a)
529 {
530 return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
531 tcg_gen_sari_tl, gen_sraiw, gen_srai_i128);
532 }
533
534 static bool trans_add(DisasContext *ctx, arg_add *a)
535 {
536 return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, tcg_gen_add2_tl);
537 }
538
539 static bool trans_sub(DisasContext *ctx, arg_sub *a)
540 {
541 return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, tcg_gen_sub2_tl);
542 }
543
544 static void gen_sll_i128(TCGv destl, TCGv desth,
545 TCGv src1l, TCGv src1h, TCGv shamt)
546 {
547 TCGv ls = tcg_temp_new();
548 TCGv rs = tcg_temp_new();
549 TCGv hs = tcg_temp_new();
550 TCGv ll = tcg_temp_new();
551 TCGv lr = tcg_temp_new();
552 TCGv h0 = tcg_temp_new();
553 TCGv h1 = tcg_temp_new();
554 TCGv zero = tcg_constant_tl(0);
555
556 tcg_gen_andi_tl(hs, shamt, 64);
557 tcg_gen_andi_tl(ls, shamt, 63);
558 tcg_gen_neg_tl(shamt, shamt);
559 tcg_gen_andi_tl(rs, shamt, 63);
560
561 tcg_gen_shl_tl(ll, src1l, ls);
562 tcg_gen_shl_tl(h0, src1h, ls);
563 tcg_gen_shr_tl(lr, src1l, rs);
564 tcg_gen_movcond_tl(TCG_COND_NE, lr, shamt, zero, lr, zero);
565 tcg_gen_or_tl(h1, h0, lr);
566
567 tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, zero, ll);
568 tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, ll, h1);
569
570 tcg_temp_free(ls);
571 tcg_temp_free(rs);
572 tcg_temp_free(hs);
573 tcg_temp_free(ll);
574 tcg_temp_free(lr);
575 tcg_temp_free(h0);
576 tcg_temp_free(h1);
577 }
578
579 static bool trans_sll(DisasContext *ctx, arg_sll *a)
580 {
581 return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, gen_sll_i128);
582 }
583
584 static bool trans_slt(DisasContext *ctx, arg_slt *a)
585 {
586 return gen_arith(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
587 }
588
589 static bool trans_sltu(DisasContext *ctx, arg_sltu *a)
590 {
591 return gen_arith(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
592 }
593
594 static void gen_srl_i128(TCGv destl, TCGv desth,
595 TCGv src1l, TCGv src1h, TCGv shamt)
596 {
597 TCGv ls = tcg_temp_new();
598 TCGv rs = tcg_temp_new();
599 TCGv hs = tcg_temp_new();
600 TCGv ll = tcg_temp_new();
601 TCGv lr = tcg_temp_new();
602 TCGv h0 = tcg_temp_new();
603 TCGv h1 = tcg_temp_new();
604 TCGv zero = tcg_constant_tl(0);
605
606 tcg_gen_andi_tl(hs, shamt, 64);
607 tcg_gen_andi_tl(rs, shamt, 63);
608 tcg_gen_neg_tl(shamt, shamt);
609 tcg_gen_andi_tl(ls, shamt, 63);
610
611 tcg_gen_shr_tl(lr, src1l, rs);
612 tcg_gen_shr_tl(h1, src1h, rs);
613 tcg_gen_shl_tl(ll, src1h, ls);
614 tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
615 tcg_gen_or_tl(h0, ll, lr);
616
617 tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
618 tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, zero, h1);
619
620 tcg_temp_free(ls);
621 tcg_temp_free(rs);
622 tcg_temp_free(hs);
623 tcg_temp_free(ll);
624 tcg_temp_free(lr);
625 tcg_temp_free(h0);
626 tcg_temp_free(h1);
627 }
628
629 static bool trans_srl(DisasContext *ctx, arg_srl *a)
630 {
631 return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, gen_srl_i128);
632 }
633
634 static void gen_sra_i128(TCGv destl, TCGv desth,
635 TCGv src1l, TCGv src1h, TCGv shamt)
636 {
637 TCGv ls = tcg_temp_new();
638 TCGv rs = tcg_temp_new();
639 TCGv hs = tcg_temp_new();
640 TCGv ll = tcg_temp_new();
641 TCGv lr = tcg_temp_new();
642 TCGv h0 = tcg_temp_new();
643 TCGv h1 = tcg_temp_new();
644 TCGv zero = tcg_constant_tl(0);
645
646 tcg_gen_andi_tl(hs, shamt, 64);
647 tcg_gen_andi_tl(rs, shamt, 63);
648 tcg_gen_neg_tl(shamt, shamt);
649 tcg_gen_andi_tl(ls, shamt, 63);
650
651 tcg_gen_shr_tl(lr, src1l, rs);
652 tcg_gen_sar_tl(h1, src1h, rs);
653 tcg_gen_shl_tl(ll, src1h, ls);
654 tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
655 tcg_gen_or_tl(h0, ll, lr);
656 tcg_gen_sari_tl(lr, src1h, 63);
657
658 tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
659 tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, lr, h1);
660
661 tcg_temp_free(ls);
662 tcg_temp_free(rs);
663 tcg_temp_free(hs);
664 tcg_temp_free(ll);
665 tcg_temp_free(lr);
666 tcg_temp_free(h0);
667 tcg_temp_free(h1);
668 }
669
670 static bool trans_sra(DisasContext *ctx, arg_sra *a)
671 {
672 return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, gen_sra_i128);
673 }
674
675 static bool trans_xor(DisasContext *ctx, arg_xor *a)
676 {
677 return gen_logic(ctx, a, tcg_gen_xor_tl);
678 }
679
680 static bool trans_or(DisasContext *ctx, arg_or *a)
681 {
682 return gen_logic(ctx, a, tcg_gen_or_tl);
683 }
684
685 static bool trans_and(DisasContext *ctx, arg_and *a)
686 {
687 return gen_logic(ctx, a, tcg_gen_and_tl);
688 }
689
690 static bool trans_addiw(DisasContext *ctx, arg_addiw *a)
691 {
692 REQUIRE_64_OR_128BIT(ctx);
693 ctx->ol = MXL_RV32;
694 return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
695 }
696
697 static bool trans_slliw(DisasContext *ctx, arg_slliw *a)
698 {
699 REQUIRE_64_OR_128BIT(ctx);
700 ctx->ol = MXL_RV32;
701 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
702 }
703
704 static bool trans_srliw(DisasContext *ctx, arg_srliw *a)
705 {
706 REQUIRE_64_OR_128BIT(ctx);
707 ctx->ol = MXL_RV32;
708 return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw, NULL);
709 }
710
711 static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a)
712 {
713 REQUIRE_64_OR_128BIT(ctx);
714 ctx->ol = MXL_RV32;
715 return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw, NULL);
716 }
717
718 static bool trans_sllid(DisasContext *ctx, arg_sllid *a)
719 {
720 REQUIRE_128BIT(ctx);
721 ctx->ol = MXL_RV64;
722 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
723 }
724
725 static bool trans_srlid(DisasContext *ctx, arg_srlid *a)
726 {
727 REQUIRE_128BIT(ctx);
728 ctx->ol = MXL_RV64;
729 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shri_tl, NULL);
730 }
731
732 static bool trans_sraid(DisasContext *ctx, arg_sraid *a)
733 {
734 REQUIRE_128BIT(ctx);
735 ctx->ol = MXL_RV64;
736 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_sari_tl, NULL);
737 }
738
739 static bool trans_addw(DisasContext *ctx, arg_addw *a)
740 {
741 REQUIRE_64_OR_128BIT(ctx);
742 ctx->ol = MXL_RV32;
743 return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
744 }
745
746 static bool trans_subw(DisasContext *ctx, arg_subw *a)
747 {
748 REQUIRE_64_OR_128BIT(ctx);
749 ctx->ol = MXL_RV32;
750 return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
751 }
752
753 static bool trans_sllw(DisasContext *ctx, arg_sllw *a)
754 {
755 REQUIRE_64_OR_128BIT(ctx);
756 ctx->ol = MXL_RV32;
757 return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
758 }
759
760 static bool trans_srlw(DisasContext *ctx, arg_srlw *a)
761 {
762 REQUIRE_64_OR_128BIT(ctx);
763 ctx->ol = MXL_RV32;
764 return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
765 }
766
767 static bool trans_sraw(DisasContext *ctx, arg_sraw *a)
768 {
769 REQUIRE_64_OR_128BIT(ctx);
770 ctx->ol = MXL_RV32;
771 return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
772 }
773
774 static bool trans_slld(DisasContext *ctx, arg_slld *a)
775 {
776 REQUIRE_128BIT(ctx);
777 ctx->ol = MXL_RV64;
778 return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
779 }
780
781 static bool trans_srld(DisasContext *ctx, arg_srld *a)
782 {
783 REQUIRE_128BIT(ctx);
784 ctx->ol = MXL_RV64;
785 return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
786 }
787
788 static bool trans_srad(DisasContext *ctx, arg_srad *a)
789 {
790 REQUIRE_128BIT(ctx);
791 ctx->ol = MXL_RV64;
792 return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
793 }
794
795
796 static bool trans_fence(DisasContext *ctx, arg_fence *a)
797 {
798 /* FENCE is a full memory barrier. */
799 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
800 return true;
801 }
802
803 static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
804 {
805 if (!ctx->cfg_ptr->ext_ifencei) {
806 return false;
807 }
808
809 /*
810 * FENCE_I is a no-op in QEMU,
811 * however we need to end the translation block
812 */
813 gen_set_pc_imm(ctx, ctx->pc_succ_insn);
814 tcg_gen_exit_tb(NULL, 0);
815 ctx->base.is_jmp = DISAS_NORETURN;
816 return true;
817 }
818
819 static bool do_csr_post(DisasContext *ctx)
820 {
821 /* We may have changed important cpu state -- exit to main loop. */
822 gen_set_pc_imm(ctx, ctx->pc_succ_insn);
823 tcg_gen_exit_tb(NULL, 0);
824 ctx->base.is_jmp = DISAS_NORETURN;
825 return true;
826 }
827
828 static bool do_csrr(DisasContext *ctx, int rd, int rc)
829 {
830 TCGv dest = dest_gpr(ctx, rd);
831 TCGv_i32 csr = tcg_constant_i32(rc);
832
833 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
834 gen_io_start();
835 }
836 gen_helper_csrr(dest, cpu_env, csr);
837 gen_set_gpr(ctx, rd, dest);
838 return do_csr_post(ctx);
839 }
840
841 static bool do_csrw(DisasContext *ctx, int rc, TCGv src)
842 {
843 TCGv_i32 csr = tcg_constant_i32(rc);
844
845 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
846 gen_io_start();
847 }
848 gen_helper_csrw(cpu_env, csr, src);
849 return do_csr_post(ctx);
850 }
851
852 static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask)
853 {
854 TCGv dest = dest_gpr(ctx, rd);
855 TCGv_i32 csr = tcg_constant_i32(rc);
856
857 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
858 gen_io_start();
859 }
860 gen_helper_csrrw(dest, cpu_env, csr, src, mask);
861 gen_set_gpr(ctx, rd, dest);
862 return do_csr_post(ctx);
863 }
864
865 static bool do_csrr_i128(DisasContext *ctx, int rd, int rc)
866 {
867 TCGv destl = dest_gpr(ctx, rd);
868 TCGv desth = dest_gprh(ctx, rd);
869 TCGv_i32 csr = tcg_constant_i32(rc);
870
871 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
872 gen_io_start();
873 }
874 gen_helper_csrr_i128(destl, cpu_env, csr);
875 tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
876 gen_set_gpr128(ctx, rd, destl, desth);
877 return do_csr_post(ctx);
878 }
879
880 static bool do_csrw_i128(DisasContext *ctx, int rc, TCGv srcl, TCGv srch)
881 {
882 TCGv_i32 csr = tcg_constant_i32(rc);
883
884 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
885 gen_io_start();
886 }
887 gen_helper_csrw_i128(cpu_env, csr, srcl, srch);
888 return do_csr_post(ctx);
889 }
890
891 static bool do_csrrw_i128(DisasContext *ctx, int rd, int rc,
892 TCGv srcl, TCGv srch, TCGv maskl, TCGv maskh)
893 {
894 TCGv destl = dest_gpr(ctx, rd);
895 TCGv desth = dest_gprh(ctx, rd);
896 TCGv_i32 csr = tcg_constant_i32(rc);
897
898 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
899 gen_io_start();
900 }
901 gen_helper_csrrw_i128(destl, cpu_env, csr, srcl, srch, maskl, maskh);
902 tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
903 gen_set_gpr128(ctx, rd, destl, desth);
904 return do_csr_post(ctx);
905 }
906
907 static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
908 {
909 RISCVMXL xl = get_xl(ctx);
910 if (xl < MXL_RV128) {
911 TCGv src = get_gpr(ctx, a->rs1, EXT_NONE);
912
913 /*
914 * If rd == 0, the insn shall not read the csr, nor cause any of the
915 * side effects that might occur on a csr read.
916 */
917 if (a->rd == 0) {
918 return do_csrw(ctx, a->csr, src);
919 }
920
921 TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
922 (target_ulong)-1);
923 return do_csrrw(ctx, a->rd, a->csr, src, mask);
924 } else {
925 TCGv srcl = get_gpr(ctx, a->rs1, EXT_NONE);
926 TCGv srch = get_gprh(ctx, a->rs1);
927
928 /*
929 * If rd == 0, the insn shall not read the csr, nor cause any of the
930 * side effects that might occur on a csr read.
931 */
932 if (a->rd == 0) {
933 return do_csrw_i128(ctx, a->csr, srcl, srch);
934 }
935
936 TCGv mask = tcg_constant_tl(-1);
937 return do_csrrw_i128(ctx, a->rd, a->csr, srcl, srch, mask, mask);
938 }
939 }
940
941 static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a)
942 {
943 /*
944 * If rs1 == 0, the insn shall not write to the csr at all, nor
945 * cause any of the side effects that might occur on a csr write.
946 * Note that if rs1 specifies a register other than x0, holding
947 * a zero value, the instruction will still attempt to write the
948 * unmodified value back to the csr and will cause side effects.
949 */
950 if (get_xl(ctx) < MXL_RV128) {
951 if (a->rs1 == 0) {
952 return do_csrr(ctx, a->rd, a->csr);
953 }
954
955 TCGv ones = tcg_constant_tl(-1);
956 TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
957 return do_csrrw(ctx, a->rd, a->csr, ones, mask);
958 } else {
959 if (a->rs1 == 0) {
960 return do_csrr_i128(ctx, a->rd, a->csr);
961 }
962
963 TCGv ones = tcg_constant_tl(-1);
964 TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
965 TCGv maskh = get_gprh(ctx, a->rs1);
966 return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, maskl, maskh);
967 }
968 }
969
970 static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a)
971 {
972 /*
973 * If rs1 == 0, the insn shall not write to the csr at all, nor
974 * cause any of the side effects that might occur on a csr write.
975 * Note that if rs1 specifies a register other than x0, holding
976 * a zero value, the instruction will still attempt to write the
977 * unmodified value back to the csr and will cause side effects.
978 */
979 if (get_xl(ctx) < MXL_RV128) {
980 if (a->rs1 == 0) {
981 return do_csrr(ctx, a->rd, a->csr);
982 }
983
984 TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
985 return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
986 } else {
987 if (a->rs1 == 0) {
988 return do_csrr_i128(ctx, a->rd, a->csr);
989 }
990
991 TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
992 TCGv maskh = get_gprh(ctx, a->rs1);
993 return do_csrrw_i128(ctx, a->rd, a->csr,
994 ctx->zero, ctx->zero, maskl, maskh);
995 }
996 }
997
998 static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a)
999 {
1000 RISCVMXL xl = get_xl(ctx);
1001 if (xl < MXL_RV128) {
1002 TCGv src = tcg_constant_tl(a->rs1);
1003
1004 /*
1005 * If rd == 0, the insn shall not read the csr, nor cause any of the
1006 * side effects that might occur on a csr read.
1007 */
1008 if (a->rd == 0) {
1009 return do_csrw(ctx, a->csr, src);
1010 }
1011
1012 TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
1013 (target_ulong)-1);
1014 return do_csrrw(ctx, a->rd, a->csr, src, mask);
1015 } else {
1016 TCGv src = tcg_constant_tl(a->rs1);
1017
1018 /*
1019 * If rd == 0, the insn shall not read the csr, nor cause any of the
1020 * side effects that might occur on a csr read.
1021 */
1022 if (a->rd == 0) {
1023 return do_csrw_i128(ctx, a->csr, src, ctx->zero);
1024 }
1025
1026 TCGv mask = tcg_constant_tl(-1);
1027 return do_csrrw_i128(ctx, a->rd, a->csr, src, ctx->zero, mask, mask);
1028 }
1029 }
1030
1031 static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a)
1032 {
1033 /*
1034 * If rs1 == 0, the insn shall not write to the csr at all, nor
1035 * cause any of the side effects that might occur on a csr write.
1036 * Note that if rs1 specifies a register other than x0, holding
1037 * a zero value, the instruction will still attempt to write the
1038 * unmodified value back to the csr and will cause side effects.
1039 */
1040 if (get_xl(ctx) < MXL_RV128) {
1041 if (a->rs1 == 0) {
1042 return do_csrr(ctx, a->rd, a->csr);
1043 }
1044
1045 TCGv ones = tcg_constant_tl(-1);
1046 TCGv mask = tcg_constant_tl(a->rs1);
1047 return do_csrrw(ctx, a->rd, a->csr, ones, mask);
1048 } else {
1049 if (a->rs1 == 0) {
1050 return do_csrr_i128(ctx, a->rd, a->csr);
1051 }
1052
1053 TCGv ones = tcg_constant_tl(-1);
1054 TCGv mask = tcg_constant_tl(a->rs1);
1055 return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, mask, ctx->zero);
1056 }
1057 }
1058
1059 static bool trans_csrrci(DisasContext *ctx, arg_csrrci * a)
1060 {
1061 /*
1062 * If rs1 == 0, the insn shall not write to the csr at all, nor
1063 * cause any of the side effects that might occur on a csr write.
1064 * Note that if rs1 specifies a register other than x0, holding
1065 * a zero value, the instruction will still attempt to write the
1066 * unmodified value back to the csr and will cause side effects.
1067 */
1068 if (get_xl(ctx) < MXL_RV128) {
1069 if (a->rs1 == 0) {
1070 return do_csrr(ctx, a->rd, a->csr);
1071 }
1072
1073 TCGv mask = tcg_constant_tl(a->rs1);
1074 return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
1075 } else {
1076 if (a->rs1 == 0) {
1077 return do_csrr_i128(ctx, a->rd, a->csr);
1078 }
1079
1080 TCGv mask = tcg_constant_tl(a->rs1);
1081 return do_csrrw_i128(ctx, a->rd, a->csr,
1082 ctx->zero, ctx->zero, mask, ctx->zero);
1083 }
1084 }