]> git.proxmox.com Git - mirror_qemu.git/blame - target/riscv/cpu.h
target/riscv: add support for Zcmp extension
[mirror_qemu.git] / target / riscv / cpu.h
CommitLineData
dc5bd18f
MC
1/*
2 * QEMU RISC-V CPU
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#ifndef RISCV_CPU_H
21#define RISCV_CPU_H
22
2e5b09fd 23#include "hw/core/cpu.h"
2b7168fc 24#include "hw/registerfields.h"
dc5bd18f 25#include "exec/cpu-defs.h"
69242e7e 26#include "qemu/cpu-float.h"
db1015e9 27#include "qom/object.h"
961738ff 28#include "qemu/int128.h"
e91a7227 29#include "cpu_bits.h"
6f23aaeb 30#include "qapi/qapi-types-common.h"
dc5bd18f 31
74433bf0
RH
32#define TCG_GUEST_DEFAULT_MO 0
33
62cf0245
AP
34/*
35 * RISC-V-specific extra insn start words:
36 * 1: Original instruction opcode
37 */
38#define TARGET_INSN_START_EXTRA_WORDS 1
39
dc5bd18f
MC
40#define TYPE_RISCV_CPU "riscv-cpu"
41
42#define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU
43#define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX)
0dacec87 44#define CPU_RESOLVING_TYPE TYPE_RISCV_CPU
dc5bd18f
MC
45
46#define TYPE_RISCV_CPU_ANY RISCV_CPU_TYPE_NAME("any")
8903bf6e
AF
47#define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32")
48#define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64")
332dab68 49#define TYPE_RISCV_CPU_BASE128 RISCV_CPU_TYPE_NAME("x-rv128")
36b80ad9 50#define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex")
6ddc7069 51#define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c")
dc5bd18f 52#define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31")
d784733b 53#define TYPE_RISCV_CPU_SIFIVE_E34 RISCV_CPU_TYPE_NAME("sifive-e34")
dc5bd18f
MC
54#define TYPE_RISCV_CPU_SIFIVE_E51 RISCV_CPU_TYPE_NAME("sifive-e51")
55#define TYPE_RISCV_CPU_SIFIVE_U34 RISCV_CPU_TYPE_NAME("sifive-u34")
56#define TYPE_RISCV_CPU_SIFIVE_U54 RISCV_CPU_TYPE_NAME("sifive-u54")
95bd8daa 57#define TYPE_RISCV_CPU_THEAD_C906 RISCV_CPU_TYPE_NAME("thead-c906")
10f1ca27 58#define TYPE_RISCV_CPU_HOST RISCV_CPU_TYPE_NAME("host")
dc5bd18f 59
c0a635f3
AF
60#if defined(TARGET_RISCV32)
61# define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE32
62#elif defined(TARGET_RISCV64)
63# define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE64
64#endif
65
dc5bd18f
MC
66#define RV(x) ((target_ulong)1 << (x - 'A'))
67
c66ffcd5
DHB
68/*
69 * Consider updating register_cpu_props() when adding
70 * new MISA bits here.
71 */
dc5bd18f 72#define RVI RV('I')
79f86934 73#define RVE RV('E') /* E and I are mutually exclusive */
dc5bd18f
MC
74#define RVM RV('M')
75#define RVA RV('A')
76#define RVF RV('F')
77#define RVD RV('D')
ad9e5aa2 78#define RVV RV('V')
dc5bd18f
MC
79#define RVC RV('C')
80#define RVS RV('S')
81#define RVU RV('U')
af1fa003 82#define RVH RV('H')
53dcea58 83#define RVJ RV('J')
dc5bd18f 84
dc5bd18f 85
a46d410c
AP
86/* Privileged specification version */
87enum {
88 PRIV_VERSION_1_10_0 = 0,
89 PRIV_VERSION_1_11_0,
3a4af26d 90 PRIV_VERSION_1_12_0,
a46d410c 91};
dc5bd18f 92
9ec6622d 93#define VEXT_VERSION_1_00_0 0x00010000
32931383 94
33a9a57d
YJ
95enum {
96 TRANSLATE_SUCCESS,
97 TRANSLATE_FAIL,
98 TRANSLATE_PMP_FAIL,
99 TRANSLATE_G_STAGE_FAIL
100};
101
dc5bd18f
MC
102#define MMU_USER_IDX 3
103
104#define MAX_RISCV_PMPS (16)
105
1ea4a06a 106typedef struct CPUArchState CPURISCVState;
dc5bd18f 107
bbf3d1b4 108#if !defined(CONFIG_USER_ONLY)
dc5bd18f 109#include "pmp.h"
95799e36 110#include "debug.h"
bbf3d1b4 111#endif
dc5bd18f 112
8a4b5257 113#define RV_VLEN_MAX 1024
3780e337 114#define RV_MAX_MHPMEVENTS 32
621f35bb 115#define RV_MAX_MHPMCOUNTERS 32
ad9e5aa2 116
33f1beaf
FC
117FIELD(VTYPE, VLMUL, 0, 3)
118FIELD(VTYPE, VSEW, 3, 3)
3479a814
FC
119FIELD(VTYPE, VTA, 6, 1)
120FIELD(VTYPE, VMA, 7, 1)
33f1beaf
FC
121FIELD(VTYPE, VEDIV, 8, 2)
122FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
2b7168fc 123
3780e337
AP
124typedef struct PMUCTRState {
125 /* Current value of a counter */
126 target_ulong mhpmcounter_val;
127 /* Current value of a counter in RV32*/
128 target_ulong mhpmcounterh_val;
129 /* Snapshot values of counter */
130 target_ulong mhpmcounter_prev;
131 /* Snapshort value of a counter in RV32 */
132 target_ulong mhpmcounterh_prev;
133 bool started;
14664483
AP
134 /* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */
135 target_ulong irq_overflow_left;
3780e337
AP
136} PMUCTRState;
137
1ea4a06a 138struct CPUArchState {
dc5bd18f 139 target_ulong gpr[32];
2b547084 140 target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
ad9e5aa2
LZ
141
142 /* vector coprocessor state. */
143 uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16);
144 target_ulong vxrm;
145 target_ulong vxsat;
146 target_ulong vl;
147 target_ulong vstart;
148 target_ulong vtype;
d96a271a 149 bool vill;
ad9e5aa2 150
dc5bd18f
MC
151 target_ulong pc;
152 target_ulong load_res;
153 target_ulong load_val;
154
04bc3027
PMD
155 /* Floating-Point state */
156 uint64_t fpr[32]; /* assume both F and D extensions */
dc5bd18f 157 target_ulong frm;
04bc3027 158 float_status fp_status;
dc5bd18f
MC
159
160 target_ulong badaddr;
62cf0245 161 target_ulong bins;
48eaeb56 162
36a18664 163 target_ulong guest_phys_fault_addr;
dc5bd18f 164
dc5bd18f 165 target_ulong priv_ver;
d2c1a177 166 target_ulong bext_ver;
32931383 167 target_ulong vext_ver;
e91a7227
RH
168
169 /* RISCVMXL, but uint32_t for vmstate migration */
170 uint32_t misa_mxl; /* current mxl */
171 uint32_t misa_mxl_max; /* max mxl for this cpu */
172 uint32_t misa_ext; /* current extensions */
173 uint32_t misa_ext_mask; /* max ext for this cpu */
440544e1 174 uint32_t xl; /* current xlen */
dc5bd18f 175
b3a5d1fb
FP
176 /* 128-bit helpers upper part return value */
177 target_ulong retxh;
178
5836c3ec
KC
179#ifdef CONFIG_USER_ONLY
180 uint32_t elf_flags;
181#endif
182
dc5bd18f
MC
183#ifndef CONFIG_USER_ONLY
184 target_ulong priv;
ef6bb7b6
AF
185 /* This contains QEMU specific information about the virt state. */
186 target_ulong virt;
cd032fe7 187 target_ulong geilen;
277b210d 188 uint64_t resetvec;
dc5bd18f
MC
189
190 target_ulong mhartid;
284d697c
YJ
191 /*
192 * For RV32 this is 32-bit mstatus and 32-bit mstatush.
193 * For RV64 this is a 64-bit mstatus.
194 */
195 uint64_t mstatus;
85ba724f 196
d028ac75 197 uint64_t mip;
33fe584f
AF
198 /*
199 * MIP contains the software writable version of SEIP ORed with the
200 * external interrupt value. The MIP register is always up-to-date.
201 * To keep track of the current source, we also save booleans of the values
202 * here.
203 */
204 bool external_seip;
205 bool software_seip;
66e594f2 206
d028ac75 207 uint64_t miclaim;
85ba724f 208
d028ac75
AP
209 uint64_t mie;
210 uint64_t mideleg;
dc5bd18f 211
dc5bd18f 212 target_ulong satp; /* since: priv-1.10.0 */
ac12b601 213 target_ulong stval;
dc5bd18f
MC
214 target_ulong medeleg;
215
216 target_ulong stvec;
217 target_ulong sepc;
218 target_ulong scause;
219
220 target_ulong mtvec;
221 target_ulong mepc;
222 target_ulong mcause;
223 target_ulong mtval; /* since: priv-1.10.0 */
224
43dc93af
AP
225 /* Machine and Supervisor interrupt priorities */
226 uint8_t miprio[64];
227 uint8_t siprio[64];
228
d1ceff40
AP
229 /* AIA CSRs */
230 target_ulong miselect;
231 target_ulong siselect;
232
bd023ce3
AF
233 /* Hypervisor CSRs */
234 target_ulong hstatus;
235 target_ulong hedeleg;
d028ac75 236 uint64_t hideleg;
bd023ce3
AF
237 target_ulong hcounteren;
238 target_ulong htval;
239 target_ulong htinst;
240 target_ulong hgatp;
cd032fe7
AP
241 target_ulong hgeie;
242 target_ulong hgeip;
c6957248 243 uint64_t htimedelta;
bd023ce3 244
43dc93af 245 /* Hypervisor controlled virtual interrupt priorities */
2b602398 246 target_ulong hvictl;
43dc93af
AP
247 uint8_t hviprio[64];
248
2c64ab66
FP
249 /* Upper 64-bits of 128-bit CSRs */
250 uint64_t mscratchh;
251 uint64_t sscratchh;
252
bd023ce3 253 /* Virtual CSRs */
284d697c
YJ
254 /*
255 * For RV32 this is 32-bit vsstatus and 32-bit vsstatush.
256 * For RV64 this is a 64-bit vsstatus.
257 */
258 uint64_t vsstatus;
bd023ce3
AF
259 target_ulong vstvec;
260 target_ulong vsscratch;
261 target_ulong vsepc;
262 target_ulong vscause;
263 target_ulong vstval;
264 target_ulong vsatp;
265
d1ceff40
AP
266 /* AIA VS-mode CSRs */
267 target_ulong vsiselect;
268
bd023ce3
AF
269 target_ulong mtval2;
270 target_ulong mtinst;
271
66e594f2
AF
272 /* HS Backup CSRs */
273 target_ulong stvec_hs;
274 target_ulong sscratch_hs;
275 target_ulong sepc_hs;
276 target_ulong scause_hs;
277 target_ulong stval_hs;
278 target_ulong satp_hs;
284d697c 279 uint64_t mstatus_hs;
66e594f2 280
ec352d0c
GK
281 /* Signals whether the current exception occurred with two-stage address
282 translation active. */
283 bool two_stage_lookup;
8e2aa21b
AP
284 /*
285 * Signals whether the current exception occurred while doing two-stage
286 * address translation for the VS-stage page table walk.
287 */
288 bool two_stage_indirect_lookup;
ec352d0c 289
8c59f5c1
MC
290 target_ulong scounteren;
291 target_ulong mcounteren;
dc5bd18f 292
b1675eeb
AP
293 target_ulong mcountinhibit;
294
3780e337
AP
295 /* PMU counter state */
296 PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS];
621f35bb 297
3780e337 298 /* PMU event selector configured values. First three are unused*/
621f35bb
AP
299 target_ulong mhpmevent_val[RV_MAX_MHPMEVENTS];
300
14664483
AP
301 /* PMU event selector configured values for RV32*/
302 target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS];
303
dc5bd18f
MC
304 target_ulong sscratch;
305 target_ulong mscratch;
306
43888c2f
AP
307 /* Sstc CSRs */
308 uint64_t stimecmp;
309
3ec0fe18
AP
310 uint64_t vstimecmp;
311
dc5bd18f
MC
312 /* physical memory protection */
313 pmp_table_t pmp_state;
2582a95c 314 target_ulong mseccfg;
753e3fe2 315
95799e36
BM
316 /* trigger module */
317 target_ulong trigger_cur;
9495c488
FC
318 target_ulong tdata1[RV_MAX_TRIGGERS];
319 target_ulong tdata2[RV_MAX_TRIGGERS];
320 target_ulong tdata3[RV_MAX_TRIGGERS];
321 struct CPUBreakpoint *cpu_breakpoint[RV_MAX_TRIGGERS];
322 struct CPUWatchpoint *cpu_watchpoint[RV_MAX_TRIGGERS];
5a4ae64c
LZ
323 QEMUTimer *itrigger_timer[RV_MAX_TRIGGERS];
324 int64_t last_icount;
577f0286 325 bool itrigger_enabled;
95799e36 326
c6957248 327 /* machine specific rdtime callback */
e2f01f3c
FC
328 uint64_t (*rdtime_fn)(void *);
329 void *rdtime_fn_arg;
c6957248 330
69077dd6
AP
331 /* machine specific AIA ireg read-modify-write callback */
332#define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \
333 ((((__xlen) & 0xff) << 24) | \
334 (((__vgein) & 0x3f) << 20) | \
335 (((__virt) & 0x1) << 18) | \
336 (((__priv) & 0x3) << 16) | \
337 (__isel & 0xffff))
338#define AIA_IREG_ISEL(__ireg) ((__ireg) & 0xffff)
339#define AIA_IREG_PRIV(__ireg) (((__ireg) >> 16) & 0x3)
340#define AIA_IREG_VIRT(__ireg) (((__ireg) >> 18) & 0x1)
341#define AIA_IREG_VGEIN(__ireg) (((__ireg) >> 20) & 0x3f)
342#define AIA_IREG_XLEN(__ireg) (((__ireg) >> 24) & 0xff)
343 int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg,
344 target_ulong *val, target_ulong new_val, target_ulong write_mask);
345 void *aia_ireg_rmw_fn_arg[4];
346
753e3fe2
JW
347 /* True if in debugger mode. */
348 bool debugger;
4bbe8033
AB
349
350 /*
351 * CSRs for PointerMasking extension
352 */
353 target_ulong mmte;
354 target_ulong mpmmask;
355 target_ulong mpmbase;
356 target_ulong spmmask;
357 target_ulong spmbase;
358 target_ulong upmmask;
359 target_ulong upmbase;
29a9ec9b
AP
360
361 /* CSRs for execution enviornment configuration */
362 uint64_t menvcfg;
3bee0e40
MC
363 uint64_t mstateen[SMSTATEEN_MAX_COUNT];
364 uint64_t hstateen[SMSTATEEN_MAX_COUNT];
365 uint64_t sstateen[SMSTATEEN_MAX_COUNT];
29a9ec9b
AP
366 target_ulong senvcfg;
367 uint64_t henvcfg;
dc5bd18f 368#endif
40bfa5f6
LZ
369 target_ulong cur_pmmask;
370 target_ulong cur_pmbase;
dc5bd18f 371
dc5bd18f 372 /* Fields from here on are preserved across CPU reset. */
43888c2f 373 QEMUTimer *stimer; /* Internal timer for S-mode interrupt */
3ec0fe18
AP
374 QEMUTimer *vstimer; /* Internal timer for VS-mode interrupt */
375 bool vstime_irq;
ad40be27
YJ
376
377 hwaddr kernel_addr;
378 hwaddr fdt_addr;
27abe66f
YJ
379
380 /* kvm timer */
381 bool kvm_timer_dirty;
382 uint64_t kvm_timer_time;
383 uint64_t kvm_timer_compare;
384 uint64_t kvm_timer_state;
385 uint64_t kvm_timer_frequency;
dc5bd18f
MC
386};
387
9295b1aa 388OBJECT_DECLARE_CPU_TYPE(RISCVCPU, RISCVCPUClass, RISCV_CPU)
dc5bd18f
MC
389
390/**
391 * RISCVCPUClass:
392 * @parent_realize: The parent class' realize handler.
4fa485a7 393 * @parent_phases: The parent class' reset phase handlers.
dc5bd18f
MC
394 *
395 * A RISCV CPU model.
396 */
db1015e9 397struct RISCVCPUClass {
dc5bd18f
MC
398 /*< private >*/
399 CPUClass parent_class;
400 /*< public >*/
401 DeviceRealize parent_realize;
4fa485a7 402 ResettablePhases parent_phases;
db1015e9 403};
dc5bd18f 404
6f23aaeb
AG
405/*
406 * map is a 16-bit bitmap: the most significant set bit in map is the maximum
6df3747a
AG
407 * satp mode that is supported. It may be chosen by the user and must respect
408 * what qemu implements (valid_1_10_32/64) and what the hw is capable of
409 * (supported bitmap below).
6f23aaeb
AG
410 *
411 * init is a 16-bit bitmap used to make sure the user selected a correct
412 * configuration as per the specification.
6df3747a
AG
413 *
414 * supported is a 16-bit bitmap used to reflect the hw capabilities.
6f23aaeb
AG
415 */
416typedef struct {
6df3747a 417 uint16_t map, init, supported;
6f23aaeb
AG
418} RISCVSATPMap;
419
466292bd
PT
420struct RISCVCPUConfig {
421 bool ext_i;
422 bool ext_e;
423 bool ext_g;
424 bool ext_m;
425 bool ext_a;
426 bool ext_f;
427 bool ext_d;
428 bool ext_c;
429 bool ext_s;
430 bool ext_u;
431 bool ext_h;
432 bool ext_j;
433 bool ext_v;
434 bool ext_zba;
435 bool ext_zbb;
436 bool ext_zbc;
eef82872
WL
437 bool ext_zbkb;
438 bool ext_zbkc;
439 bool ext_zbkx;
466292bd 440 bool ext_zbs;
2288a5ce
WL
441 bool ext_zca;
442 bool ext_zcb;
443 bool ext_zcd;
444 bool ext_zcf;
445 bool ext_zcmp;
446 bool ext_zcmt;
eef82872
WL
447 bool ext_zk;
448 bool ext_zkn;
449 bool ext_zknd;
450 bool ext_zkne;
451 bool ext_zknh;
452 bool ext_zkr;
453 bool ext_zks;
454 bool ext_zksed;
455 bool ext_zksh;
456 bool ext_zkt;
466292bd
PT
457 bool ext_ifencei;
458 bool ext_icsr;
e05da09b 459 bool ext_icbom;
a939c500 460 bool ext_icboz;
b8e1f32c 461 bool ext_zicond;
4696f0ab 462 bool ext_zihintpause;
3bee0e40 463 bool ext_smstateen;
43888c2f 464 bool ext_sstc;
0d190bd3 465 bool ext_svadu;
c5d77ddd 466 bool ext_svinval;
05e6ca5e
GR
467 bool ext_svnapot;
468 bool ext_svpbmt;
89ffdcec 469 bool ext_zdinx;
260b594d 470 bool ext_zawrs;
466292bd
PT
471 bool ext_zfh;
472 bool ext_zfhmin;
89ffdcec
WL
473 bool ext_zfinx;
474 bool ext_zhinx;
475 bool ext_zhinxmin;
466292bd
PT
476 bool ext_zve32f;
477 bool ext_zve64f;
a7336161 478 bool ext_zve64d;
de799beb 479 bool ext_zmmul;
a7336161
WL
480 bool ext_zvfh;
481 bool ext_zvfhmin;
dc9acc9c
AP
482 bool ext_smaia;
483 bool ext_ssaia;
14664483 484 bool ext_sscofpmf;
f1eed927 485 bool rvv_ta_all_1s;
355d5584 486 bool rvv_ma_all_1s;
466292bd 487
9951ba94
FC
488 uint32_t mvendorid;
489 uint64_t marchid;
075eeda9 490 uint64_t mimpid;
9951ba94 491
0d429bd2 492 /* Vendor-specific custom extensions */
c9410a68 493 bool ext_xtheadba;
426c0491 494 bool ext_xtheadbb;
fa134585 495 bool ext_xtheadbs;
49a7f3aa 496 bool ext_xtheadcmo;
32909338 497 bool ext_xtheadcondmov;
d4d90115 498 bool ext_xtheadfmemidx;
578086ba 499 bool ext_xtheadfmv;
b8a5832b 500 bool ext_xtheadmac;
45f9df86 501 bool ext_xtheadmemidx;
af99aa72 502 bool ext_xtheadmempair;
134c3ffa 503 bool ext_xtheadsync;
0d429bd2
PT
504 bool ext_XVentanaCondOps;
505
18d6d89e 506 uint8_t pmu_num;
466292bd
PT
507 char *priv_spec;
508 char *user_spec;
509 char *bext_spec;
510 char *vext_spec;
511 uint16_t vlen;
512 uint16_t elen;
e05da09b 513 uint16_t cbom_blocksize;
a939c500 514 uint16_t cboz_blocksize;
466292bd
PT
515 bool mmu;
516 bool pmp;
517 bool epmp;
1acdb3b0 518 bool debug;
54bd9b6e 519 bool misa_w;
a4a9a443
TO
520
521 bool short_isa_string;
6f23aaeb
AG
522
523#ifndef CONFIG_USER_ONLY
524 RISCVSATPMap satp_mode;
525#endif
466292bd
PT
526};
527
528typedef struct RISCVCPUConfig RISCVCPUConfig;
529
dc5bd18f
MC
530/**
531 * RISCVCPU:
532 * @env: #CPURISCVState
533 *
534 * A RISCV CPU.
535 */
b36e239e 536struct ArchCPU {
dc5bd18f
MC
537 /*< private >*/
538 CPUState parent_obj;
539 /*< public >*/
5b146dc7 540 CPUNegativeOffsetState neg;
dc5bd18f 541 CPURISCVState env;
c4e95030 542
b93777e1 543 char *dyn_csr_xml;
719d3561 544 char *dyn_vreg_xml;
b93777e1 545
c4e95030 546 /* Configuration Settings */
466292bd 547 RISCVCPUConfig cfg;
14664483
AP
548
549 QEMUTimer *pmu_timer;
550 /* A bitmask of Available programmable counters */
551 uint32_t pmu_avail_ctrs;
552 /* Mapping of events to counters */
553 GHashTable *pmu_event_ctr_map;
db1015e9 554};
dc5bd18f 555
dc5bd18f
MC
556static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
557{
e91a7227 558 return (env->misa_ext & ext) != 0;
dc5bd18f
MC
559}
560
dc5bd18f 561#include "cpu_user.h"
dc5bd18f
MC
562
563extern const char * const riscv_int_regnames[];
2b547084 564extern const char * const riscv_int_regnamesh[];
dc5bd18f 565extern const char * const riscv_fpr_regnames[];
dc5bd18f 566
c51a3f5d 567const char *riscv_cpu_get_trap_name(target_ulong cause, bool async);
dc5bd18f 568void riscv_cpu_do_interrupt(CPUState *cpu);
43a96588 569int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
1af0006a 570 int cpuid, DumpState *s);
43a96588 571int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
1af0006a 572 int cpuid, DumpState *s);
a010bdbe 573int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
dc5bd18f 574int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
43dc93af
AP
575int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero);
576uint8_t riscv_cpu_default_priority(int irq);
8f42415f 577uint64_t riscv_cpu_all_pending(CPURISCVState *env);
43dc93af
AP
578int riscv_cpu_mirq_pending(CPURISCVState *env);
579int riscv_cpu_sirq_pending(CPURISCVState *env);
580int riscv_cpu_vsirq_pending(CPURISCVState *env);
b345b480 581bool riscv_cpu_fp_enabled(CPURISCVState *env);
cd032fe7
AP
582target_ulong riscv_cpu_get_geilen(CPURISCVState *env);
583void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
61b4b69d 584bool riscv_cpu_vector_enabled(CPURISCVState *env);
ef6bb7b6
AF
585bool riscv_cpu_virt_enabled(CPURISCVState *env);
586void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
1c1c060a 587bool riscv_cpu_two_stage_lookup(int mmu_idx);
dc5bd18f 588int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch);
8905770b
MAL
589G_NORETURN void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
590 MMUAccessType access_type, int mmu_idx,
591 uintptr_t retaddr);
8a4ca3c1
RH
592bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
593 MMUAccessType access_type, int mmu_idx,
594 bool probe, uintptr_t retaddr);
dc5bd18f 595char *riscv_isa_string(RISCVCPU *cpu);
0442428a 596void riscv_cpu_list(void);
dc5bd18f 597
dc5bd18f
MC
598#define cpu_list riscv_cpu_list
599#define cpu_mmu_index riscv_cpu_mmu_index
600
85ba724f 601#ifndef CONFIG_USER_ONLY
d90ebc47
PMD
602void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
603 vaddr addr, unsigned size,
604 MMUAccessType access_type,
605 int mmu_idx, MemTxAttrs attrs,
606 MemTxResult response, uintptr_t retaddr);
6d2d454a 607hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
17b3c353 608bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
66e594f2 609void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
d028ac75 610int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
bbb9fc25
WL
611uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
612 uint64_t value);
85ba724f 613#define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
e2f01f3c
FC
614void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
615 void *arg);
69077dd6
AP
616void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
617 int (*rmw_fn)(void *arg,
618 target_ulong reg,
619 target_ulong *val,
620 target_ulong new_val,
621 target_ulong write_mask),
622 void *rmw_fn_arg);
85ba724f 623#endif
fb738839 624void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv);
dc5bd18f
MC
625
626void riscv_translate_init(void);
8905770b
MAL
627G_NORETURN void riscv_raise_exception(CPURISCVState *env,
628 uint32_t exception, uintptr_t pc);
dc5bd18f 629
fb738839
MC
630target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
631void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
dc5bd18f 632
c445593d
AF
633#define TB_FLAGS_PRIV_MMU_MASK 3
634#define TB_FLAGS_PRIV_HYP_ACCESS_MASK (1 << 2)
83a71719 635#define TB_FLAGS_MSTATUS_FS MSTATUS_FS
61b4b69d 636#define TB_FLAGS_MSTATUS_VS MSTATUS_VS
dc5bd18f 637
2b7168fc
LZ
638#include "exec/cpu-all.h"
639
61d56494 640FIELD(TB_FLAGS, MEM_IDX, 0, 3)
33f1beaf 641FIELD(TB_FLAGS, LMUL, 3, 3)
61d56494 642FIELD(TB_FLAGS, SEW, 6, 3)
33f1beaf
FC
643/* Skip MSTATUS_VS (0x600) bits */
644FIELD(TB_FLAGS, VL_EQ_VLMAX, 11, 1)
645FIELD(TB_FLAGS, VILL, 12, 1)
646/* Skip MSTATUS_FS (0x6000) bits */
743077b3 647/* Is a Hypervisor instruction load/store allowed? */
33f1beaf
FC
648FIELD(TB_FLAGS, HLSX, 15, 1)
649FIELD(TB_FLAGS, MSTATUS_HS_FS, 16, 2)
650FIELD(TB_FLAGS, MSTATUS_HS_VS, 18, 2)
92371bd9 651/* The combination of MXL/SXL/UXL that applies to the current cpu mode. */
33f1beaf 652FIELD(TB_FLAGS, XL, 20, 2)
0774a7a1 653/* If PointerMasking should be applied */
4208dc7e
LZ
654FIELD(TB_FLAGS, PM_MASK_ENABLED, 22, 1)
655FIELD(TB_FLAGS, PM_BASE_ENABLED, 23, 1)
f1eed927 656FIELD(TB_FLAGS, VTA, 24, 1)
355d5584 657FIELD(TB_FLAGS, VMA, 25, 1)
2c9d7471
LZ
658/* Native debug itrigger */
659FIELD(TB_FLAGS, ITRIGGER, 26, 1)
2b7168fc 660
db23e5d9
RH
661#ifdef TARGET_RISCV32
662#define riscv_cpu_mxl(env) ((void)(env), MXL_RV32)
663#else
664static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env)
665{
666 return env->misa_mxl;
667}
668#endif
2b602398 669#define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env)))
51ae0cab 670
d4ea7117
DHB
671static inline const RISCVCPUConfig *riscv_cpu_cfg(CPURISCVState *env)
672{
673 return &env_archcpu(env)->cfg;
674}
675
440544e1
LZ
676#if defined(TARGET_RISCV32)
677#define cpu_recompute_xl(env) ((void)(env), MXL_RV32)
678#else
679static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
680{
681 RISCVMXL xl = env->misa_mxl;
682#if !defined(CONFIG_USER_ONLY)
683 /*
684 * When emulating a 32-bit-only cpu, use RV32.
685 * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
686 * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
687 * back to RV64 for lower privs.
688 */
689 if (xl != MXL_RV32) {
690 switch (env->priv) {
691 case PRV_M:
692 break;
693 case PRV_U:
694 xl = get_field(env->mstatus, MSTATUS64_UXL);
695 break;
696 default: /* PRV_S | PRV_H */
697 xl = get_field(env->mstatus, MSTATUS64_SXL);
698 break;
699 }
700 }
701#endif
702 return xl;
703}
704#endif
705
31961cfe
LZ
706static inline int riscv_cpu_xlen(CPURISCVState *env)
707{
708 return 16 << env->xl;
709}
710
05e6ca5e
GR
711#ifdef TARGET_RISCV32
712#define riscv_cpu_sxl(env) ((void)(env), MXL_RV32)
713#else
714static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
715{
716#ifdef CONFIG_USER_ONLY
717 return env->misa_mxl;
718#else
719 return get_field(env->mstatus, MSTATUS64_SXL);
720#endif
721}
722#endif
723
2b7168fc 724/*
a689a82b
FC
725 * Encode LMUL to lmul as follows:
726 * LMUL vlmul lmul
727 * 1 000 0
728 * 2 001 1
729 * 4 010 2
730 * 8 011 3
731 * - 100 -
732 * 1/8 101 -3
733 * 1/4 110 -2
734 * 1/2 111 -1
735 *
736 * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
737 * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
738 * => VLMAX = vlen >> (1 + 3 - (-3))
739 * = 256 >> 7
740 * = 2
2b7168fc
LZ
741 */
742static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype)
743{
a689a82b
FC
744 uint8_t sew = FIELD_EX64(vtype, VTYPE, VSEW);
745 int8_t lmul = sextract32(FIELD_EX64(vtype, VTYPE, VLMUL), 0, 3);
2b7168fc
LZ
746 return cpu->cfg.vlen >> (sew + 3 - lmul);
747}
748
53677acf
RH
749void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
750 target_ulong *cs_base, uint32_t *pflags);
dc5bd18f 751
40bfa5f6
LZ
752void riscv_cpu_update_mask(CPURISCVState *env);
753
533c91e8
AF
754RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
755 target_ulong *ret_value,
756 target_ulong new_value, target_ulong write_mask);
757RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
758 target_ulong *ret_value,
759 target_ulong new_value,
760 target_ulong write_mask);
c7b95171 761
fb738839
MC
762static inline void riscv_csr_write(CPURISCVState *env, int csrno,
763 target_ulong val)
c7b95171
MC
764{
765 riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS));
766}
767
fb738839 768static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
c7b95171
MC
769{
770 target_ulong val = 0;
771 riscv_csrrw(env, csrno, &val, 0, 0);
772 return val;
773}
774
0e62f92e
AF
775typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env,
776 int csrno);
605def6e
AF
777typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
778 target_ulong *ret_value);
779typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
780 target_ulong new_value);
781typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
782 target_ulong *ret_value,
783 target_ulong new_value,
784 target_ulong write_mask);
c7b95171 785
961738ff
FP
786RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
787 Int128 *ret_value,
788 Int128 new_value, Int128 write_mask);
789
457c360f
FP
790typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
791 Int128 *ret_value);
792typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno,
793 Int128 new_value);
794
c7b95171 795typedef struct {
8ceac5dc 796 const char *name;
a88365c1 797 riscv_csr_predicate_fn predicate;
c7b95171
MC
798 riscv_csr_read_fn read;
799 riscv_csr_write_fn write;
800 riscv_csr_op_fn op;
457c360f
FP
801 riscv_csr_read128_fn read128;
802 riscv_csr_write128_fn write128;
a4b2fa43
AP
803 /* The default priv spec version should be PRIV_VERSION_1_10_0 (i.e 0) */
804 uint32_t min_priv_ver;
c7b95171
MC
805} riscv_csr_operations;
806
56118ee8
BM
807/* CSR function table constants */
808enum {
809 CSR_TABLE_SIZE = 0x1000
810};
811
14664483
AP
812/**
813 * The event id are encoded based on the encoding specified in the
814 * SBI specification v0.3
815 */
816
817enum riscv_pmu_event_idx {
818 RISCV_PMU_EVENT_HW_CPU_CYCLES = 0x01,
819 RISCV_PMU_EVENT_HW_INSTRUCTIONS = 0x02,
820 RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019,
821 RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B,
822 RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021,
823};
824
56118ee8 825/* CSR function table */
6f03770d 826extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
56118ee8 827
6f23aaeb
AG
828extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[];
829
c7b95171
MC
830void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
831void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
dc5bd18f 832
5371f5cd
JW
833void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
834
6f23aaeb
AG
835uint8_t satp_mode_max_from_map(uint32_t map);
836const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit);
837
dc5bd18f 838#endif /* RISCV_CPU_H */