]> git.proxmox.com Git - mirror_qemu.git/blame - target/riscv/insn_trans/trans_xthead.c.inc
Merge tag 'pull-target-arm-20240111' of https://git.linaro.org/people/pmaydell/qemu...
[mirror_qemu.git] / target / riscv / insn_trans / trans_xthead.c.inc
CommitLineData
49a7f3aa
CM
1/*
2 * RISC-V translation routines for the T-Head vendor extensions (xthead*).
3 *
4 * Copyright (c) 2022 VRULL GmbH.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
c9410a68
CM
19#define REQUIRE_XTHEADBA(ctx) do { \
20 if (!ctx->cfg_ptr->ext_xtheadba) { \
21 return false; \
22 } \
23} while (0)
24
426c0491
CM
25#define REQUIRE_XTHEADBB(ctx) do { \
26 if (!ctx->cfg_ptr->ext_xtheadbb) { \
27 return false; \
28 } \
29} while (0)
30
fa134585
CM
31#define REQUIRE_XTHEADBS(ctx) do { \
32 if (!ctx->cfg_ptr->ext_xtheadbs) { \
33 return false; \
34 } \
35} while (0)
36
49a7f3aa
CM
37#define REQUIRE_XTHEADCMO(ctx) do { \
38 if (!ctx->cfg_ptr->ext_xtheadcmo) { \
39 return false; \
40 } \
41} while (0)
42
32909338
CM
43#define REQUIRE_XTHEADCONDMOV(ctx) do { \
44 if (!ctx->cfg_ptr->ext_xtheadcondmov) { \
45 return false; \
46 } \
47} while (0)
48
d4d90115
CM
49#define REQUIRE_XTHEADFMEMIDX(ctx) do { \
50 if (!ctx->cfg_ptr->ext_xtheadfmemidx) { \
51 return false; \
52 } \
53} while (0)
54
578086ba
CM
55#define REQUIRE_XTHEADFMV(ctx) do { \
56 if (!ctx->cfg_ptr->ext_xtheadfmv) { \
57 return false; \
58 } \
59} while (0)
60
b8a5832b
CM
61#define REQUIRE_XTHEADMAC(ctx) do { \
62 if (!ctx->cfg_ptr->ext_xtheadmac) { \
63 return false; \
64 } \
65} while (0)
66
45f9df86
CM
67#define REQUIRE_XTHEADMEMIDX(ctx) do { \
68 if (!ctx->cfg_ptr->ext_xtheadmemidx) { \
69 return false; \
70 } \
71} while (0)
72
af99aa72
CM
73#define REQUIRE_XTHEADMEMPAIR(ctx) do { \
74 if (!ctx->cfg_ptr->ext_xtheadmempair) { \
75 return false; \
76 } \
77} while (0)
78
134c3ffa
CM
79#define REQUIRE_XTHEADSYNC(ctx) do { \
80 if (!ctx->cfg_ptr->ext_xtheadsync) { \
81 return false; \
82 } \
83} while (0)
84
45f9df86
CM
85/*
86 * Calculate and return the address for indexed mem operations:
87 * If !zext_offs, then the address is rs1 + (rs2 << imm2).
88 * If zext_offs, then the address is rs1 + (zext(rs2[31:0]) << imm2).
89 */
90static TCGv get_th_address_indexed(DisasContext *ctx, int rs1, int rs2,
91 int imm2, bool zext_offs)
92{
93 TCGv src2 = get_gpr(ctx, rs2, EXT_NONE);
94 TCGv offs = tcg_temp_new();
95
96 if (zext_offs) {
97 tcg_gen_extract_tl(offs, src2, 0, 32);
98 tcg_gen_shli_tl(offs, offs, imm2);
99 } else {
100 tcg_gen_shli_tl(offs, src2, imm2);
101 }
102
f4344296 103 return get_address_indexed(ctx, rs1, offs);
45f9df86
CM
104}
105
c9410a68
CM
106/* XTheadBa */
107
108/*
109 * th.addsl is similar to sh[123]add (from Zba), but not an
110 * alternative encoding: while sh[123] applies the shift to rs1,
111 * th.addsl shifts rs2.
112 */
113
114#define GEN_TH_ADDSL(SHAMT) \
115static void gen_th_addsl##SHAMT(TCGv ret, TCGv arg1, TCGv arg2) \
116{ \
117 TCGv t = tcg_temp_new(); \
118 tcg_gen_shli_tl(t, arg2, SHAMT); \
119 tcg_gen_add_tl(ret, t, arg1); \
c9410a68
CM
120}
121
122GEN_TH_ADDSL(1)
123GEN_TH_ADDSL(2)
124GEN_TH_ADDSL(3)
125
126#define GEN_TRANS_TH_ADDSL(SHAMT) \
127static bool trans_th_addsl##SHAMT(DisasContext *ctx, \
128 arg_th_addsl##SHAMT * a) \
129{ \
130 REQUIRE_XTHEADBA(ctx); \
131 return gen_arith(ctx, a, EXT_NONE, gen_th_addsl##SHAMT, NULL); \
132}
133
134GEN_TRANS_TH_ADDSL(1)
135GEN_TRANS_TH_ADDSL(2)
136GEN_TRANS_TH_ADDSL(3)
137
426c0491
CM
138/* XTheadBb */
139
140/* th.srri is an alternate encoding for rori (from Zbb) */
141static bool trans_th_srri(DisasContext *ctx, arg_th_srri * a)
142{
143 REQUIRE_XTHEADBB(ctx);
144 return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
145 tcg_gen_rotri_tl, gen_roriw, NULL);
146}
147
148/* th.srriw is an alternate encoding for roriw (from Zbb) */
149static bool trans_th_srriw(DisasContext *ctx, arg_th_srriw *a)
150{
151 REQUIRE_XTHEADBB(ctx);
152 REQUIRE_64BIT(ctx);
153 ctx->ol = MXL_RV32;
154 return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_roriw, NULL);
155}
156
157/* th.ext and th.extu perform signed/unsigned bitfield extraction */
158static bool gen_th_bfextract(DisasContext *ctx, arg_th_bfext *a,
159 void (*f)(TCGv, TCGv, unsigned int, unsigned int))
160{
161 TCGv dest = dest_gpr(ctx, a->rd);
162 TCGv source = get_gpr(ctx, a->rs1, EXT_ZERO);
163
164 if (a->lsb <= a->msb) {
165 f(dest, source, a->lsb, a->msb - a->lsb + 1);
166 gen_set_gpr(ctx, a->rd, dest);
167 }
168 return true;
169}
170
171static bool trans_th_ext(DisasContext *ctx, arg_th_ext *a)
172{
173 REQUIRE_XTHEADBB(ctx);
174 return gen_th_bfextract(ctx, a, tcg_gen_sextract_tl);
175}
176
177static bool trans_th_extu(DisasContext *ctx, arg_th_extu *a)
178{
179 REQUIRE_XTHEADBB(ctx);
180 return gen_th_bfextract(ctx, a, tcg_gen_extract_tl);
181}
182
183/* th.ff0: find first zero (clz on an inverted input) */
184static bool gen_th_ff0(DisasContext *ctx, arg_th_ff0 *a, DisasExtend ext)
185{
186 TCGv dest = dest_gpr(ctx, a->rd);
187 TCGv src1 = get_gpr(ctx, a->rs1, ext);
188
189 int olen = get_olen(ctx);
190 TCGv t = tcg_temp_new();
191
192 tcg_gen_not_tl(t, src1);
193 if (olen != TARGET_LONG_BITS) {
194 if (olen == 32) {
195 gen_clzw(dest, t);
196 } else {
197 g_assert_not_reached();
198 }
199 } else {
200 gen_clz(dest, t);
201 }
202
426c0491
CM
203 gen_set_gpr(ctx, a->rd, dest);
204
205 return true;
206}
207
208static bool trans_th_ff0(DisasContext *ctx, arg_th_ff0 *a)
209{
210 REQUIRE_XTHEADBB(ctx);
211 return gen_th_ff0(ctx, a, EXT_NONE);
212}
213
214/* th.ff1 is an alternate encoding for clz (from Zbb) */
215static bool trans_th_ff1(DisasContext *ctx, arg_th_ff1 *a)
216{
217 REQUIRE_XTHEADBB(ctx);
218 return gen_unary_per_ol(ctx, a, EXT_NONE, gen_clz, gen_clzw);
219}
220
221static void gen_th_revw(TCGv ret, TCGv arg1)
222{
223 tcg_gen_bswap32_tl(ret, arg1, TCG_BSWAP_OS);
224}
225
226/* th.rev is an alternate encoding for the RV64 rev8 (from Zbb) */
227static bool trans_th_rev(DisasContext *ctx, arg_th_rev *a)
228{
229 REQUIRE_XTHEADBB(ctx);
230
231 return gen_unary_per_ol(ctx, a, EXT_NONE, tcg_gen_bswap_tl, gen_th_revw);
232}
233
234/* th.revw is a sign-extended byte-swap of the lower word */
235static bool trans_th_revw(DisasContext *ctx, arg_th_revw *a)
236{
237 REQUIRE_XTHEADBB(ctx);
238 REQUIRE_64BIT(ctx);
239 return gen_unary(ctx, a, EXT_NONE, gen_th_revw);
240}
241
242/* th.tstnbz is equivalent to an orc.b (from Zbb) with inverted result */
243static void gen_th_tstnbz(TCGv ret, TCGv source1)
244{
245 gen_orc_b(ret, source1);
246 tcg_gen_not_tl(ret, ret);
247}
248
249static bool trans_th_tstnbz(DisasContext *ctx, arg_th_tstnbz *a)
250{
251 REQUIRE_XTHEADBB(ctx);
252 return gen_unary(ctx, a, EXT_ZERO, gen_th_tstnbz);
253}
254
fa134585
CM
255/* XTheadBs */
256
257/* th.tst is an alternate encoding for bexti (from Zbs) */
258static bool trans_th_tst(DisasContext *ctx, arg_th_tst *a)
259{
260 REQUIRE_XTHEADBS(ctx);
261 return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_bext);
262}
263
49a7f3aa
CM
264/* XTheadCmo */
265
49a7f3aa
CM
266/* Test if priv level is M, S, or U (cannot fail). */
267#define REQUIRE_PRIV_MSU(ctx)
268
269/* Test if priv level is M or S. */
270#define REQUIRE_PRIV_MS(ctx) \
271do { \
47debc72 272 if (ctx->priv == PRV_U) { \
49a7f3aa
CM
273 return false; \
274 } \
275} while (0)
276
277#define NOP_PRIVCHECK(insn, extcheck, privcheck) \
278static bool trans_ ## insn(DisasContext *ctx, arg_ ## insn * a) \
279{ \
280 (void) a; \
281 extcheck(ctx); \
282 privcheck(ctx); \
283 return true; \
284}
285
286NOP_PRIVCHECK(th_dcache_call, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
287NOP_PRIVCHECK(th_dcache_ciall, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
288NOP_PRIVCHECK(th_dcache_iall, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
289NOP_PRIVCHECK(th_dcache_cpa, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
290NOP_PRIVCHECK(th_dcache_cipa, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
291NOP_PRIVCHECK(th_dcache_ipa, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
292NOP_PRIVCHECK(th_dcache_cva, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MSU)
293NOP_PRIVCHECK(th_dcache_civa, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MSU)
294NOP_PRIVCHECK(th_dcache_iva, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MSU)
295NOP_PRIVCHECK(th_dcache_csw, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
296NOP_PRIVCHECK(th_dcache_cisw, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
297NOP_PRIVCHECK(th_dcache_isw, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
298NOP_PRIVCHECK(th_dcache_cpal1, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
564a28bd 299NOP_PRIVCHECK(th_dcache_cval1, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MSU)
49a7f3aa
CM
300
301NOP_PRIVCHECK(th_icache_iall, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
302NOP_PRIVCHECK(th_icache_ialls, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
303NOP_PRIVCHECK(th_icache_ipa, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
304NOP_PRIVCHECK(th_icache_iva, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MSU)
305
306NOP_PRIVCHECK(th_l2cache_call, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
307NOP_PRIVCHECK(th_l2cache_ciall, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
308NOP_PRIVCHECK(th_l2cache_iall, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
134c3ffa 309
32909338
CM
310/* XTheadCondMov */
311
312static bool gen_th_condmove(DisasContext *ctx, arg_r *a, TCGCond cond)
313{
314 TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
315 TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
316 TCGv old = get_gpr(ctx, a->rd, EXT_NONE);
317 TCGv dest = dest_gpr(ctx, a->rd);
318
319 tcg_gen_movcond_tl(cond, dest, src2, ctx->zero, src1, old);
320
321 gen_set_gpr(ctx, a->rd, dest);
322 return true;
323}
324
325/* th.mveqz: "if (rs2 == 0) rd = rs1;" */
326static bool trans_th_mveqz(DisasContext *ctx, arg_th_mveqz *a)
327{
328 REQUIRE_XTHEADCONDMOV(ctx);
329 return gen_th_condmove(ctx, a, TCG_COND_EQ);
330}
331
332/* th.mvnez: "if (rs2 != 0) rd = rs1;" */
333static bool trans_th_mvnez(DisasContext *ctx, arg_th_mveqz *a)
334{
335 REQUIRE_XTHEADCONDMOV(ctx);
336 return gen_th_condmove(ctx, a, TCG_COND_NE);
337}
338
d4d90115
CM
339/* XTheadFMem */
340
341/*
342 * Load 64-bit float from indexed address.
343 * If !zext_offs, then address is rs1 + (rs2 << imm2).
344 * If zext_offs, then address is rs1 + (zext(rs2[31:0]) << imm2).
345 */
346static bool gen_fload_idx(DisasContext *ctx, arg_th_memidx *a, MemOp memop,
347 bool zext_offs)
348{
349 TCGv_i64 rd = cpu_fpr[a->rd];
350 TCGv addr = get_th_address_indexed(ctx, a->rs1, a->rs2, a->imm2, zext_offs);
351
352 tcg_gen_qemu_ld_i64(rd, addr, ctx->mem_idx, memop);
353 if ((memop & MO_SIZE) == MO_32) {
354 gen_nanbox_s(rd, rd);
355 }
356
357 mark_fs_dirty(ctx);
358 return true;
359}
360
361/*
362 * Store 64-bit float to indexed address.
363 * If !zext_offs, then address is rs1 + (rs2 << imm2).
364 * If zext_offs, then address is rs1 + (zext(rs2[31:0]) << imm2).
365 */
366static bool gen_fstore_idx(DisasContext *ctx, arg_th_memidx *a, MemOp memop,
367 bool zext_offs)
368{
369 TCGv_i64 rd = cpu_fpr[a->rd];
370 TCGv addr = get_th_address_indexed(ctx, a->rs1, a->rs2, a->imm2, zext_offs);
371
372 tcg_gen_qemu_st_i64(rd, addr, ctx->mem_idx, memop);
373
374 return true;
375}
376
377static bool trans_th_flrd(DisasContext *ctx, arg_th_memidx *a)
378{
379 REQUIRE_XTHEADFMEMIDX(ctx);
380 REQUIRE_FPU;
381 REQUIRE_EXT(ctx, RVD);
382 return gen_fload_idx(ctx, a, MO_TEUQ, false);
383}
384
385static bool trans_th_flrw(DisasContext *ctx, arg_th_memidx *a)
386{
387 REQUIRE_XTHEADFMEMIDX(ctx);
388 REQUIRE_FPU;
389 REQUIRE_EXT(ctx, RVF);
390 return gen_fload_idx(ctx, a, MO_TEUL, false);
391}
392
393static bool trans_th_flurd(DisasContext *ctx, arg_th_memidx *a)
394{
395 REQUIRE_XTHEADFMEMIDX(ctx);
396 REQUIRE_FPU;
397 REQUIRE_EXT(ctx, RVD);
398 return gen_fload_idx(ctx, a, MO_TEUQ, true);
399}
400
401static bool trans_th_flurw(DisasContext *ctx, arg_th_memidx *a)
402{
403 REQUIRE_XTHEADFMEMIDX(ctx);
404 REQUIRE_FPU;
405 REQUIRE_EXT(ctx, RVF);
406 return gen_fload_idx(ctx, a, MO_TEUL, true);
407}
408
409static bool trans_th_fsrd(DisasContext *ctx, arg_th_memidx *a)
410{
411 REQUIRE_XTHEADFMEMIDX(ctx);
412 REQUIRE_FPU;
413 REQUIRE_EXT(ctx, RVD);
414 return gen_fstore_idx(ctx, a, MO_TEUQ, false);
415}
416
417static bool trans_th_fsrw(DisasContext *ctx, arg_th_memidx *a)
418{
419 REQUIRE_XTHEADFMEMIDX(ctx);
420 REQUIRE_FPU;
421 REQUIRE_EXT(ctx, RVF);
422 return gen_fstore_idx(ctx, a, MO_TEUL, false);
423}
424
425static bool trans_th_fsurd(DisasContext *ctx, arg_th_memidx *a)
426{
427 REQUIRE_XTHEADFMEMIDX(ctx);
428 REQUIRE_FPU;
429 REQUIRE_EXT(ctx, RVD);
430 return gen_fstore_idx(ctx, a, MO_TEUQ, true);
431}
432
433static bool trans_th_fsurw(DisasContext *ctx, arg_th_memidx *a)
434{
435 REQUIRE_XTHEADFMEMIDX(ctx);
436 REQUIRE_FPU;
437 REQUIRE_EXT(ctx, RVF);
438 return gen_fstore_idx(ctx, a, MO_TEUL, true);
439}
440
578086ba
CM
441/* XTheadFmv */
442
443static bool trans_th_fmv_hw_x(DisasContext *ctx, arg_th_fmv_hw_x *a)
444{
445 REQUIRE_XTHEADFMV(ctx);
446 REQUIRE_32BIT(ctx);
447 REQUIRE_FPU;
448 REQUIRE_EXT(ctx, RVD);
449
450 TCGv src1 = get_gpr(ctx, a->rs1, EXT_ZERO);
451 TCGv_i64 t1 = tcg_temp_new_i64();
452
453 tcg_gen_extu_tl_i64(t1, src1);
454 tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rd], t1, 32, 32);
578086ba
CM
455 mark_fs_dirty(ctx);
456 return true;
457}
458
459static bool trans_th_fmv_x_hw(DisasContext *ctx, arg_th_fmv_x_hw *a)
460{
461 REQUIRE_XTHEADFMV(ctx);
462 REQUIRE_32BIT(ctx);
463 REQUIRE_FPU;
464 REQUIRE_EXT(ctx, RVD);
465 TCGv dst;
466 TCGv_i64 t1;
467
468 dst = dest_gpr(ctx, a->rd);
469 t1 = tcg_temp_new_i64();
470
471 tcg_gen_extract_i64(t1, cpu_fpr[a->rs1], 32, 32);
472 tcg_gen_trunc_i64_tl(dst, t1);
473 gen_set_gpr(ctx, a->rd, dst);
578086ba
CM
474 mark_fs_dirty(ctx);
475 return true;
476}
477
b8a5832b
CM
478/* XTheadMac */
479
480static bool gen_th_mac(DisasContext *ctx, arg_r *a,
481 void (*accumulate_func)(TCGv, TCGv, TCGv),
482 void (*extend_operand_func)(TCGv, TCGv))
483{
484 TCGv dest = dest_gpr(ctx, a->rd);
485 TCGv src0 = get_gpr(ctx, a->rd, EXT_NONE);
486 TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
487 TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
488 TCGv tmp = tcg_temp_new();
489
490 if (extend_operand_func) {
491 TCGv tmp2 = tcg_temp_new();
492 extend_operand_func(tmp, src1);
493 extend_operand_func(tmp2, src2);
494 tcg_gen_mul_tl(tmp, tmp, tmp2);
b8a5832b
CM
495 } else {
496 tcg_gen_mul_tl(tmp, src1, src2);
497 }
498
499 accumulate_func(dest, src0, tmp);
500 gen_set_gpr(ctx, a->rd, dest);
b8a5832b
CM
501 return true;
502}
503
504/* th.mula: "rd = rd + rs1 * rs2" */
505static bool trans_th_mula(DisasContext *ctx, arg_th_mula *a)
506{
507 REQUIRE_XTHEADMAC(ctx);
508 return gen_th_mac(ctx, a, tcg_gen_add_tl, NULL);
509}
510
511/* th.mulah: "rd = sext.w(rd + sext.w(rs1[15:0]) * sext.w(rs2[15:0]))" */
512static bool trans_th_mulah(DisasContext *ctx, arg_th_mulah *a)
513{
514 REQUIRE_XTHEADMAC(ctx);
515 ctx->ol = MXL_RV32;
516 return gen_th_mac(ctx, a, tcg_gen_add_tl, tcg_gen_ext16s_tl);
517}
518
519/* th.mulaw: "rd = sext.w(rd + rs1 * rs2)" */
520static bool trans_th_mulaw(DisasContext *ctx, arg_th_mulaw *a)
521{
522 REQUIRE_XTHEADMAC(ctx);
523 REQUIRE_64BIT(ctx);
524 ctx->ol = MXL_RV32;
525 return gen_th_mac(ctx, a, tcg_gen_add_tl, NULL);
526}
527
528/* th.muls: "rd = rd - rs1 * rs2" */
529static bool trans_th_muls(DisasContext *ctx, arg_th_muls *a)
530{
531 REQUIRE_XTHEADMAC(ctx);
532 return gen_th_mac(ctx, a, tcg_gen_sub_tl, NULL);
533}
534
535/* th.mulsh: "rd = sext.w(rd - sext.w(rs1[15:0]) * sext.w(rs2[15:0]))" */
536static bool trans_th_mulsh(DisasContext *ctx, arg_th_mulsh *a)
537{
538 REQUIRE_XTHEADMAC(ctx);
539 ctx->ol = MXL_RV32;
540 return gen_th_mac(ctx, a, tcg_gen_sub_tl, tcg_gen_ext16s_tl);
541}
542
543/* th.mulsw: "rd = sext.w(rd - rs1 * rs2)" */
544static bool trans_th_mulsw(DisasContext *ctx, arg_th_mulsw *a)
545{
546 REQUIRE_XTHEADMAC(ctx);
547 REQUIRE_64BIT(ctx);
548 ctx->ol = MXL_RV32;
549 return gen_th_mac(ctx, a, tcg_gen_sub_tl, NULL);
550}
551
45f9df86
CM
552/* XTheadMemIdx */
553
554/*
555 * Load with memop from indexed address and add (imm5 << imm2) to rs1.
556 * If !preinc, then the load address is rs1.
557 * If preinc, then the load address is rs1 + (imm5) << imm2).
558 */
559static bool gen_load_inc(DisasContext *ctx, arg_th_meminc *a, MemOp memop,
560 bool preinc)
561{
562 if (a->rs1 == a->rd) {
563 return false;
564 }
565
566 int imm = a->imm5 << a->imm2;
567 TCGv addr = get_address(ctx, a->rs1, preinc ? imm : 0);
568 TCGv rd = dest_gpr(ctx, a->rd);
569 TCGv rs1 = get_gpr(ctx, a->rs1, EXT_NONE);
570
571 tcg_gen_qemu_ld_tl(rd, addr, ctx->mem_idx, memop);
572 tcg_gen_addi_tl(rs1, rs1, imm);
573 gen_set_gpr(ctx, a->rd, rd);
574 gen_set_gpr(ctx, a->rs1, rs1);
45f9df86
CM
575 return true;
576}
577
578/*
579 * Store with memop to indexed address and add (imm5 << imm2) to rs1.
580 * If !preinc, then the store address is rs1.
581 * If preinc, then the store address is rs1 + (imm5) << imm2).
582 */
583static bool gen_store_inc(DisasContext *ctx, arg_th_meminc *a, MemOp memop,
584 bool preinc)
585{
586 int imm = a->imm5 << a->imm2;
587 TCGv addr = get_address(ctx, a->rs1, preinc ? imm : 0);
588 TCGv data = get_gpr(ctx, a->rd, EXT_NONE);
589 TCGv rs1 = get_gpr(ctx, a->rs1, EXT_NONE);
590
591 tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
592 tcg_gen_addi_tl(rs1, rs1, imm);
593 gen_set_gpr(ctx, a->rs1, rs1);
45f9df86
CM
594 return true;
595}
596
597static bool trans_th_ldia(DisasContext *ctx, arg_th_meminc *a)
598{
599 REQUIRE_XTHEADMEMIDX(ctx);
600 REQUIRE_64BIT(ctx);
601 return gen_load_inc(ctx, a, MO_TESQ, false);
602}
603
604static bool trans_th_ldib(DisasContext *ctx, arg_th_meminc *a)
605{
606 REQUIRE_XTHEADMEMIDX(ctx);
607 REQUIRE_64BIT(ctx);
608 return gen_load_inc(ctx, a, MO_TESQ, true);
609}
610
611static bool trans_th_lwia(DisasContext *ctx, arg_th_meminc *a)
612{
613 REQUIRE_XTHEADMEMIDX(ctx);
614 return gen_load_inc(ctx, a, MO_TESL, false);
615}
616
617static bool trans_th_lwib(DisasContext *ctx, arg_th_meminc *a)
618{
619 REQUIRE_XTHEADMEMIDX(ctx);
620 return gen_load_inc(ctx, a, MO_TESL, true);
621}
622
623static bool trans_th_lwuia(DisasContext *ctx, arg_th_meminc *a)
624{
625 REQUIRE_XTHEADMEMIDX(ctx);
626 REQUIRE_64BIT(ctx);
627 return gen_load_inc(ctx, a, MO_TEUL, false);
628}
629
630static bool trans_th_lwuib(DisasContext *ctx, arg_th_meminc *a)
631{
632 REQUIRE_XTHEADMEMIDX(ctx);
633 REQUIRE_64BIT(ctx);
634 return gen_load_inc(ctx, a, MO_TEUL, true);
635}
636
637static bool trans_th_lhia(DisasContext *ctx, arg_th_meminc *a)
638{
639 REQUIRE_XTHEADMEMIDX(ctx);
640 return gen_load_inc(ctx, a, MO_TESW, false);
641}
642
643static bool trans_th_lhib(DisasContext *ctx, arg_th_meminc *a)
644{
645 REQUIRE_XTHEADMEMIDX(ctx);
646 return gen_load_inc(ctx, a, MO_TESW, true);
647}
648
649static bool trans_th_lhuia(DisasContext *ctx, arg_th_meminc *a)
650{
651 REQUIRE_XTHEADMEMIDX(ctx);
652 return gen_load_inc(ctx, a, MO_TEUW, false);
653}
654
655static bool trans_th_lhuib(DisasContext *ctx, arg_th_meminc *a)
656{
657 REQUIRE_XTHEADMEMIDX(ctx);
658 return gen_load_inc(ctx, a, MO_TEUW, true);
659}
660
661static bool trans_th_lbia(DisasContext *ctx, arg_th_meminc *a)
662{
663 REQUIRE_XTHEADMEMIDX(ctx);
664 return gen_load_inc(ctx, a, MO_SB, false);
665}
666
667static bool trans_th_lbib(DisasContext *ctx, arg_th_meminc *a)
668{
669 REQUIRE_XTHEADMEMIDX(ctx);
670 return gen_load_inc(ctx, a, MO_SB, true);
671}
672
673static bool trans_th_lbuia(DisasContext *ctx, arg_th_meminc *a)
674{
675 REQUIRE_XTHEADMEMIDX(ctx);
676 return gen_load_inc(ctx, a, MO_UB, false);
677}
678
679static bool trans_th_lbuib(DisasContext *ctx, arg_th_meminc *a)
680{
681 REQUIRE_XTHEADMEMIDX(ctx);
682 return gen_load_inc(ctx, a, MO_UB, true);
683}
684
685static bool trans_th_sdia(DisasContext *ctx, arg_th_meminc *a)
686{
687 REQUIRE_XTHEADMEMIDX(ctx);
688 REQUIRE_64BIT(ctx);
689 return gen_store_inc(ctx, a, MO_TESQ, false);
690}
691
692static bool trans_th_sdib(DisasContext *ctx, arg_th_meminc *a)
693{
694 REQUIRE_XTHEADMEMIDX(ctx);
695 REQUIRE_64BIT(ctx);
696 return gen_store_inc(ctx, a, MO_TESQ, true);
697}
698
699static bool trans_th_swia(DisasContext *ctx, arg_th_meminc *a)
700{
701 REQUIRE_XTHEADMEMIDX(ctx);
702 return gen_store_inc(ctx, a, MO_TESL, false);
703}
704
705static bool trans_th_swib(DisasContext *ctx, arg_th_meminc *a)
706{
707 REQUIRE_XTHEADMEMIDX(ctx);
708 return gen_store_inc(ctx, a, MO_TESL, true);
709}
710
711static bool trans_th_shia(DisasContext *ctx, arg_th_meminc *a)
712{
713 REQUIRE_XTHEADMEMIDX(ctx);
714 return gen_store_inc(ctx, a, MO_TESW, false);
715}
716
717static bool trans_th_shib(DisasContext *ctx, arg_th_meminc *a)
718{
719 REQUIRE_XTHEADMEMIDX(ctx);
720 return gen_store_inc(ctx, a, MO_TESW, true);
721}
722
723static bool trans_th_sbia(DisasContext *ctx, arg_th_meminc *a)
724{
725 REQUIRE_XTHEADMEMIDX(ctx);
726 return gen_store_inc(ctx, a, MO_SB, false);
727}
728
729static bool trans_th_sbib(DisasContext *ctx, arg_th_meminc *a)
730{
731 REQUIRE_XTHEADMEMIDX(ctx);
732 return gen_store_inc(ctx, a, MO_SB, true);
733}
734
735/*
736 * Load with memop from indexed address.
737 * If !zext_offs, then address is rs1 + (rs2 << imm2).
738 * If zext_offs, then address is rs1 + (zext(rs2[31:0]) << imm2).
739 */
740static bool gen_load_idx(DisasContext *ctx, arg_th_memidx *a, MemOp memop,
741 bool zext_offs)
742{
743 TCGv rd = dest_gpr(ctx, a->rd);
744 TCGv addr = get_th_address_indexed(ctx, a->rs1, a->rs2, a->imm2, zext_offs);
745
746 tcg_gen_qemu_ld_tl(rd, addr, ctx->mem_idx, memop);
747 gen_set_gpr(ctx, a->rd, rd);
748
749 return true;
750}
751
752/*
753 * Store with memop to indexed address.
754 * If !zext_offs, then address is rs1 + (rs2 << imm2).
755 * If zext_offs, then address is rs1 + (zext(rs2[31:0]) << imm2).
756 */
757static bool gen_store_idx(DisasContext *ctx, arg_th_memidx *a, MemOp memop,
758 bool zext_offs)
759{
760 TCGv data = get_gpr(ctx, a->rd, EXT_NONE);
761 TCGv addr = get_th_address_indexed(ctx, a->rs1, a->rs2, a->imm2, zext_offs);
762
763 tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
764
765 return true;
766}
767
768static bool trans_th_lrd(DisasContext *ctx, arg_th_memidx *a)
769{
770 REQUIRE_XTHEADMEMIDX(ctx);
771 REQUIRE_64BIT(ctx);
772 return gen_load_idx(ctx, a, MO_TESQ, false);
773}
774
775static bool trans_th_lrw(DisasContext *ctx, arg_th_memidx *a)
776{
777 REQUIRE_XTHEADMEMIDX(ctx);
778 return gen_load_idx(ctx, a, MO_TESL, false);
779}
780
781static bool trans_th_lrwu(DisasContext *ctx, arg_th_memidx *a)
782{
783 REQUIRE_XTHEADMEMIDX(ctx);
784 REQUIRE_64BIT(ctx);
785 return gen_load_idx(ctx, a, MO_TEUL, false);
786}
787
788static bool trans_th_lrh(DisasContext *ctx, arg_th_memidx *a)
789{
790 REQUIRE_XTHEADMEMIDX(ctx);
791 return gen_load_idx(ctx, a, MO_TESW, false);
792}
793
794static bool trans_th_lrhu(DisasContext *ctx, arg_th_memidx *a)
795{
796 REQUIRE_XTHEADMEMIDX(ctx);
797 return gen_load_idx(ctx, a, MO_TEUW, false);
798}
799
800static bool trans_th_lrb(DisasContext *ctx, arg_th_memidx *a)
801{
802 REQUIRE_XTHEADMEMIDX(ctx);
803 return gen_load_idx(ctx, a, MO_SB, false);
804}
805
806static bool trans_th_lrbu(DisasContext *ctx, arg_th_memidx *a)
807{
808 REQUIRE_XTHEADMEMIDX(ctx);
809 return gen_load_idx(ctx, a, MO_UB, false);
810}
811
812static bool trans_th_srd(DisasContext *ctx, arg_th_memidx *a)
813{
814 REQUIRE_XTHEADMEMIDX(ctx);
815 REQUIRE_64BIT(ctx);
816 return gen_store_idx(ctx, a, MO_TESQ, false);
817}
818
819static bool trans_th_srw(DisasContext *ctx, arg_th_memidx *a)
820{
821 REQUIRE_XTHEADMEMIDX(ctx);
822 return gen_store_idx(ctx, a, MO_TESL, false);
823}
824
825static bool trans_th_srh(DisasContext *ctx, arg_th_memidx *a)
826{
827 REQUIRE_XTHEADMEMIDX(ctx);
828 return gen_store_idx(ctx, a, MO_TESW, false);
829}
830
831static bool trans_th_srb(DisasContext *ctx, arg_th_memidx *a)
832{
833 REQUIRE_XTHEADMEMIDX(ctx);
834 return gen_store_idx(ctx, a, MO_SB, false);
835}
836static bool trans_th_lurd(DisasContext *ctx, arg_th_memidx *a)
837{
838 REQUIRE_XTHEADMEMIDX(ctx);
839 REQUIRE_64BIT(ctx);
840 return gen_load_idx(ctx, a, MO_TESQ, true);
841}
842
843static bool trans_th_lurw(DisasContext *ctx, arg_th_memidx *a)
844{
845 REQUIRE_XTHEADMEMIDX(ctx);
846 return gen_load_idx(ctx, a, MO_TESL, true);
847}
848
849static bool trans_th_lurwu(DisasContext *ctx, arg_th_memidx *a)
850{
851 REQUIRE_XTHEADMEMIDX(ctx);
852 REQUIRE_64BIT(ctx);
853 return gen_load_idx(ctx, a, MO_TEUL, true);
854}
855
856static bool trans_th_lurh(DisasContext *ctx, arg_th_memidx *a)
857{
858 REQUIRE_XTHEADMEMIDX(ctx);
859 return gen_load_idx(ctx, a, MO_TESW, true);
860}
861
862static bool trans_th_lurhu(DisasContext *ctx, arg_th_memidx *a)
863{
864 REQUIRE_XTHEADMEMIDX(ctx);
865 return gen_load_idx(ctx, a, MO_TEUW, true);
866}
867
868static bool trans_th_lurb(DisasContext *ctx, arg_th_memidx *a)
869{
870 REQUIRE_XTHEADMEMIDX(ctx);
871 return gen_load_idx(ctx, a, MO_SB, true);
872}
873
874static bool trans_th_lurbu(DisasContext *ctx, arg_th_memidx *a)
875{
876 REQUIRE_XTHEADMEMIDX(ctx);
877 return gen_load_idx(ctx, a, MO_UB, true);
878}
879
880static bool trans_th_surd(DisasContext *ctx, arg_th_memidx *a)
881{
882 REQUIRE_XTHEADMEMIDX(ctx);
883 REQUIRE_64BIT(ctx);
884 return gen_store_idx(ctx, a, MO_TESQ, true);
885}
886
887static bool trans_th_surw(DisasContext *ctx, arg_th_memidx *a)
888{
889 REQUIRE_XTHEADMEMIDX(ctx);
890 return gen_store_idx(ctx, a, MO_TESL, true);
891}
892
893static bool trans_th_surh(DisasContext *ctx, arg_th_memidx *a)
894{
895 REQUIRE_XTHEADMEMIDX(ctx);
896 return gen_store_idx(ctx, a, MO_TESW, true);
897}
898
899static bool trans_th_surb(DisasContext *ctx, arg_th_memidx *a)
900{
901 REQUIRE_XTHEADMEMIDX(ctx);
902 return gen_store_idx(ctx, a, MO_SB, true);
903}
904
af99aa72
CM
905/* XTheadMemPair */
906
907static bool gen_loadpair_tl(DisasContext *ctx, arg_th_pair *a, MemOp memop,
908 int shamt)
909{
910 if (a->rs == a->rd1 || a->rs == a->rd2 || a->rd1 == a->rd2) {
911 return false;
912 }
913
914 TCGv t1 = tcg_temp_new();
915 TCGv t2 = tcg_temp_new();
916 TCGv addr1 = tcg_temp_new();
917 TCGv addr2 = tcg_temp_new();
918 int imm = a->sh2 << shamt;
919
920 addr1 = get_address(ctx, a->rs, imm);
921 addr2 = get_address(ctx, a->rs, memop_size(memop) + imm);
922
923 tcg_gen_qemu_ld_tl(t1, addr1, ctx->mem_idx, memop);
924 tcg_gen_qemu_ld_tl(t2, addr2, ctx->mem_idx, memop);
925 gen_set_gpr(ctx, a->rd1, t1);
926 gen_set_gpr(ctx, a->rd2, t2);
af99aa72
CM
927 return true;
928}
929
930static bool trans_th_ldd(DisasContext *ctx, arg_th_pair *a)
931{
932 REQUIRE_XTHEADMEMPAIR(ctx);
933 REQUIRE_64BIT(ctx);
934 return gen_loadpair_tl(ctx, a, MO_TESQ, 4);
935}
936
937static bool trans_th_lwd(DisasContext *ctx, arg_th_pair *a)
938{
939 REQUIRE_XTHEADMEMPAIR(ctx);
940 return gen_loadpair_tl(ctx, a, MO_TESL, 3);
941}
942
943static bool trans_th_lwud(DisasContext *ctx, arg_th_pair *a)
944{
945 REQUIRE_XTHEADMEMPAIR(ctx);
946 return gen_loadpair_tl(ctx, a, MO_TEUL, 3);
947}
948
949static bool gen_storepair_tl(DisasContext *ctx, arg_th_pair *a, MemOp memop,
950 int shamt)
951{
af99aa72
CM
952 TCGv data1 = get_gpr(ctx, a->rd1, EXT_NONE);
953 TCGv data2 = get_gpr(ctx, a->rd2, EXT_NONE);
954 TCGv addr1 = tcg_temp_new();
955 TCGv addr2 = tcg_temp_new();
956 int imm = a->sh2 << shamt;
957
958 addr1 = get_address(ctx, a->rs, imm);
959 addr2 = get_address(ctx, a->rs, memop_size(memop) + imm);
960
961 tcg_gen_qemu_st_tl(data1, addr1, ctx->mem_idx, memop);
962 tcg_gen_qemu_st_tl(data2, addr2, ctx->mem_idx, memop);
af99aa72
CM
963 return true;
964}
965
966static bool trans_th_sdd(DisasContext *ctx, arg_th_pair *a)
967{
968 REQUIRE_XTHEADMEMPAIR(ctx);
969 REQUIRE_64BIT(ctx);
970 return gen_storepair_tl(ctx, a, MO_TESQ, 4);
971}
972
973static bool trans_th_swd(DisasContext *ctx, arg_th_pair *a)
974{
975 REQUIRE_XTHEADMEMPAIR(ctx);
976 return gen_storepair_tl(ctx, a, MO_TESL, 3);
977}
978
134c3ffa
CM
979/* XTheadSync */
980
981static bool trans_th_sfence_vmas(DisasContext *ctx, arg_th_sfence_vmas *a)
982{
983 (void) a;
984 REQUIRE_XTHEADSYNC(ctx);
985
986#ifndef CONFIG_USER_ONLY
987 REQUIRE_PRIV_MS(ctx);
ad75a51e 988 gen_helper_tlb_flush_all(tcg_env);
134c3ffa
CM
989 return true;
990#else
991 return false;
992#endif
993}
994
995#ifndef CONFIG_USER_ONLY
996static void gen_th_sync_local(DisasContext *ctx)
997{
998 /*
999 * Emulate out-of-order barriers with pipeline flush
1000 * by exiting the translation block.
1001 */
022c7550 1002 gen_update_pc(ctx, ctx->cur_insn_len);
134c3ffa
CM
1003 tcg_gen_exit_tb(NULL, 0);
1004 ctx->base.is_jmp = DISAS_NORETURN;
1005}
1006#endif
1007
1008static bool trans_th_sync(DisasContext *ctx, arg_th_sync *a)
1009{
1010 (void) a;
1011 REQUIRE_XTHEADSYNC(ctx);
1012
1013#ifndef CONFIG_USER_ONLY
1014 REQUIRE_PRIV_MSU(ctx);
1015
1016 /*
1017 * th.sync is an out-of-order barrier.
1018 */
1019 gen_th_sync_local(ctx);
1020
1021 return true;
1022#else
1023 return false;
1024#endif
1025}
1026
1027static bool trans_th_sync_i(DisasContext *ctx, arg_th_sync_i *a)
1028{
1029 (void) a;
1030 REQUIRE_XTHEADSYNC(ctx);
1031
1032#ifndef CONFIG_USER_ONLY
1033 REQUIRE_PRIV_MSU(ctx);
1034
1035 /*
1036 * th.sync.i is th.sync plus pipeline flush.
1037 */
1038 gen_th_sync_local(ctx);
1039
1040 return true;
1041#else
1042 return false;
1043#endif
1044}
1045
1046static bool trans_th_sync_is(DisasContext *ctx, arg_th_sync_is *a)
1047{
1048 /* This instruction has the same behaviour like th.sync.i. */
1049 return trans_th_sync_i(ctx, a);
1050}
1051
1052static bool trans_th_sync_s(DisasContext *ctx, arg_th_sync_s *a)
1053{
1054 /* This instruction has the same behaviour like th.sync. */
1055 return trans_th_sync(ctx, a);
1056}