]> git.proxmox.com Git - mirror_qemu.git/blob - target/riscv/machine.c
Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into staging
[mirror_qemu.git] / target / riscv / machine.c
1 /*
2 * RISC-V VMState Description
3 *
4 * Copyright (c) 2020 Huawei Technologies Co., Ltd
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "qemu/error-report.h"
22 #include "sysemu/kvm.h"
23 #include "migration/cpu.h"
24 #include "sysemu/cpu-timers.h"
25 #include "debug.h"
26
27 static bool pmp_needed(void *opaque)
28 {
29 RISCVCPU *cpu = opaque;
30 CPURISCVState *env = &cpu->env;
31
32 return riscv_feature(env, RISCV_FEATURE_PMP);
33 }
34
35 static int pmp_post_load(void *opaque, int version_id)
36 {
37 RISCVCPU *cpu = opaque;
38 CPURISCVState *env = &cpu->env;
39 int i;
40
41 for (i = 0; i < MAX_RISCV_PMPS; i++) {
42 pmp_update_rule_addr(env, i);
43 }
44 pmp_update_rule_nums(env);
45
46 return 0;
47 }
48
49 static const VMStateDescription vmstate_pmp_entry = {
50 .name = "cpu/pmp/entry",
51 .version_id = 1,
52 .minimum_version_id = 1,
53 .fields = (VMStateField[]) {
54 VMSTATE_UINTTL(addr_reg, pmp_entry_t),
55 VMSTATE_UINT8(cfg_reg, pmp_entry_t),
56 VMSTATE_END_OF_LIST()
57 }
58 };
59
60 static const VMStateDescription vmstate_pmp = {
61 .name = "cpu/pmp",
62 .version_id = 1,
63 .minimum_version_id = 1,
64 .needed = pmp_needed,
65 .post_load = pmp_post_load,
66 .fields = (VMStateField[]) {
67 VMSTATE_STRUCT_ARRAY(env.pmp_state.pmp, RISCVCPU, MAX_RISCV_PMPS,
68 0, vmstate_pmp_entry, pmp_entry_t),
69 VMSTATE_END_OF_LIST()
70 }
71 };
72
73 static bool hyper_needed(void *opaque)
74 {
75 RISCVCPU *cpu = opaque;
76 CPURISCVState *env = &cpu->env;
77
78 return riscv_has_ext(env, RVH);
79 }
80
81 static const VMStateDescription vmstate_hyper = {
82 .name = "cpu/hyper",
83 .version_id = 2,
84 .minimum_version_id = 2,
85 .needed = hyper_needed,
86 .fields = (VMStateField[]) {
87 VMSTATE_UINTTL(env.hstatus, RISCVCPU),
88 VMSTATE_UINTTL(env.hedeleg, RISCVCPU),
89 VMSTATE_UINT64(env.hideleg, RISCVCPU),
90 VMSTATE_UINTTL(env.hcounteren, RISCVCPU),
91 VMSTATE_UINTTL(env.htval, RISCVCPU),
92 VMSTATE_UINTTL(env.htinst, RISCVCPU),
93 VMSTATE_UINTTL(env.hgatp, RISCVCPU),
94 VMSTATE_UINTTL(env.hgeie, RISCVCPU),
95 VMSTATE_UINTTL(env.hgeip, RISCVCPU),
96 VMSTATE_UINT64(env.htimedelta, RISCVCPU),
97 VMSTATE_UINT64(env.vstimecmp, RISCVCPU),
98
99 VMSTATE_UINTTL(env.hvictl, RISCVCPU),
100 VMSTATE_UINT8_ARRAY(env.hviprio, RISCVCPU, 64),
101
102 VMSTATE_UINT64(env.vsstatus, RISCVCPU),
103 VMSTATE_UINTTL(env.vstvec, RISCVCPU),
104 VMSTATE_UINTTL(env.vsscratch, RISCVCPU),
105 VMSTATE_UINTTL(env.vsepc, RISCVCPU),
106 VMSTATE_UINTTL(env.vscause, RISCVCPU),
107 VMSTATE_UINTTL(env.vstval, RISCVCPU),
108 VMSTATE_UINTTL(env.vsatp, RISCVCPU),
109 VMSTATE_UINTTL(env.vsiselect, RISCVCPU),
110
111 VMSTATE_UINTTL(env.mtval2, RISCVCPU),
112 VMSTATE_UINTTL(env.mtinst, RISCVCPU),
113
114 VMSTATE_UINTTL(env.stvec_hs, RISCVCPU),
115 VMSTATE_UINTTL(env.sscratch_hs, RISCVCPU),
116 VMSTATE_UINTTL(env.sepc_hs, RISCVCPU),
117 VMSTATE_UINTTL(env.scause_hs, RISCVCPU),
118 VMSTATE_UINTTL(env.stval_hs, RISCVCPU),
119 VMSTATE_UINTTL(env.satp_hs, RISCVCPU),
120 VMSTATE_UINT64(env.mstatus_hs, RISCVCPU),
121
122 VMSTATE_END_OF_LIST()
123 }
124 };
125
126 static bool vector_needed(void *opaque)
127 {
128 RISCVCPU *cpu = opaque;
129 CPURISCVState *env = &cpu->env;
130
131 return riscv_has_ext(env, RVV);
132 }
133
134 static const VMStateDescription vmstate_vector = {
135 .name = "cpu/vector",
136 .version_id = 2,
137 .minimum_version_id = 2,
138 .needed = vector_needed,
139 .fields = (VMStateField[]) {
140 VMSTATE_UINT64_ARRAY(env.vreg, RISCVCPU, 32 * RV_VLEN_MAX / 64),
141 VMSTATE_UINTTL(env.vxrm, RISCVCPU),
142 VMSTATE_UINTTL(env.vxsat, RISCVCPU),
143 VMSTATE_UINTTL(env.vl, RISCVCPU),
144 VMSTATE_UINTTL(env.vstart, RISCVCPU),
145 VMSTATE_UINTTL(env.vtype, RISCVCPU),
146 VMSTATE_BOOL(env.vill, RISCVCPU),
147 VMSTATE_END_OF_LIST()
148 }
149 };
150
151 static bool pointermasking_needed(void *opaque)
152 {
153 RISCVCPU *cpu = opaque;
154 CPURISCVState *env = &cpu->env;
155
156 return riscv_has_ext(env, RVJ);
157 }
158
159 static const VMStateDescription vmstate_pointermasking = {
160 .name = "cpu/pointer_masking",
161 .version_id = 1,
162 .minimum_version_id = 1,
163 .needed = pointermasking_needed,
164 .fields = (VMStateField[]) {
165 VMSTATE_UINTTL(env.mmte, RISCVCPU),
166 VMSTATE_UINTTL(env.mpmmask, RISCVCPU),
167 VMSTATE_UINTTL(env.mpmbase, RISCVCPU),
168 VMSTATE_UINTTL(env.spmmask, RISCVCPU),
169 VMSTATE_UINTTL(env.spmbase, RISCVCPU),
170 VMSTATE_UINTTL(env.upmmask, RISCVCPU),
171 VMSTATE_UINTTL(env.upmbase, RISCVCPU),
172
173 VMSTATE_END_OF_LIST()
174 }
175 };
176
177 static bool rv128_needed(void *opaque)
178 {
179 RISCVCPU *cpu = opaque;
180 CPURISCVState *env = &cpu->env;
181
182 return env->misa_mxl_max == MXL_RV128;
183 }
184
185 static const VMStateDescription vmstate_rv128 = {
186 .name = "cpu/rv128",
187 .version_id = 1,
188 .minimum_version_id = 1,
189 .needed = rv128_needed,
190 .fields = (VMStateField[]) {
191 VMSTATE_UINTTL_ARRAY(env.gprh, RISCVCPU, 32),
192 VMSTATE_UINT64(env.mscratchh, RISCVCPU),
193 VMSTATE_UINT64(env.sscratchh, RISCVCPU),
194 VMSTATE_END_OF_LIST()
195 }
196 };
197
198 static bool kvmtimer_needed(void *opaque)
199 {
200 return kvm_enabled();
201 }
202
203 static int cpu_post_load(void *opaque, int version_id)
204 {
205 RISCVCPU *cpu = opaque;
206 CPURISCVState *env = &cpu->env;
207
208 env->kvm_timer_dirty = true;
209 return 0;
210 }
211
212 static const VMStateDescription vmstate_kvmtimer = {
213 .name = "cpu/kvmtimer",
214 .version_id = 1,
215 .minimum_version_id = 1,
216 .needed = kvmtimer_needed,
217 .post_load = cpu_post_load,
218 .fields = (VMStateField[]) {
219 VMSTATE_UINT64(env.kvm_timer_time, RISCVCPU),
220 VMSTATE_UINT64(env.kvm_timer_compare, RISCVCPU),
221 VMSTATE_UINT64(env.kvm_timer_state, RISCVCPU),
222 VMSTATE_END_OF_LIST()
223 }
224 };
225
226 static bool debug_needed(void *opaque)
227 {
228 RISCVCPU *cpu = opaque;
229 CPURISCVState *env = &cpu->env;
230
231 return riscv_feature(env, RISCV_FEATURE_DEBUG);
232 }
233
234 static int debug_post_load(void *opaque, int version_id)
235 {
236 RISCVCPU *cpu = opaque;
237 CPURISCVState *env = &cpu->env;
238
239 if (icount_enabled()) {
240 env->itrigger_enabled = riscv_itrigger_enabled(env);
241 }
242
243 return 0;
244 }
245
246 static const VMStateDescription vmstate_debug = {
247 .name = "cpu/debug",
248 .version_id = 2,
249 .minimum_version_id = 2,
250 .needed = debug_needed,
251 .post_load = debug_post_load,
252 .fields = (VMStateField[]) {
253 VMSTATE_UINTTL(env.trigger_cur, RISCVCPU),
254 VMSTATE_UINTTL_ARRAY(env.tdata1, RISCVCPU, RV_MAX_TRIGGERS),
255 VMSTATE_UINTTL_ARRAY(env.tdata2, RISCVCPU, RV_MAX_TRIGGERS),
256 VMSTATE_UINTTL_ARRAY(env.tdata3, RISCVCPU, RV_MAX_TRIGGERS),
257 VMSTATE_END_OF_LIST()
258 }
259 };
260
261 static int riscv_cpu_post_load(void *opaque, int version_id)
262 {
263 RISCVCPU *cpu = opaque;
264 CPURISCVState *env = &cpu->env;
265
266 env->xl = cpu_recompute_xl(env);
267 riscv_cpu_update_mask(env);
268 return 0;
269 }
270
271 static bool smstateen_needed(void *opaque)
272 {
273 RISCVCPU *cpu = opaque;
274
275 return cpu->cfg.ext_smstateen;
276 }
277
278 static const VMStateDescription vmstate_smstateen = {
279 .name = "cpu/smtateen",
280 .version_id = 1,
281 .minimum_version_id = 1,
282 .needed = smstateen_needed,
283 .fields = (VMStateField[]) {
284 VMSTATE_UINT64_ARRAY(env.mstateen, RISCVCPU, 4),
285 VMSTATE_UINT64_ARRAY(env.hstateen, RISCVCPU, 4),
286 VMSTATE_UINT64_ARRAY(env.sstateen, RISCVCPU, 4),
287 VMSTATE_END_OF_LIST()
288 }
289 };
290
291 static bool envcfg_needed(void *opaque)
292 {
293 RISCVCPU *cpu = opaque;
294 CPURISCVState *env = &cpu->env;
295
296 return (env->priv_ver >= PRIV_VERSION_1_12_0 ? 1 : 0);
297 }
298
299 static const VMStateDescription vmstate_envcfg = {
300 .name = "cpu/envcfg",
301 .version_id = 1,
302 .minimum_version_id = 1,
303 .needed = envcfg_needed,
304 .fields = (VMStateField[]) {
305 VMSTATE_UINT64(env.menvcfg, RISCVCPU),
306 VMSTATE_UINTTL(env.senvcfg, RISCVCPU),
307 VMSTATE_UINT64(env.henvcfg, RISCVCPU),
308 VMSTATE_END_OF_LIST()
309 }
310 };
311
312 static bool pmu_needed(void *opaque)
313 {
314 RISCVCPU *cpu = opaque;
315
316 return cpu->cfg.pmu_num;
317 }
318
319 static const VMStateDescription vmstate_pmu_ctr_state = {
320 .name = "cpu/pmu",
321 .version_id = 1,
322 .minimum_version_id = 1,
323 .needed = pmu_needed,
324 .fields = (VMStateField[]) {
325 VMSTATE_UINTTL(mhpmcounter_val, PMUCTRState),
326 VMSTATE_UINTTL(mhpmcounterh_val, PMUCTRState),
327 VMSTATE_UINTTL(mhpmcounter_prev, PMUCTRState),
328 VMSTATE_UINTTL(mhpmcounterh_prev, PMUCTRState),
329 VMSTATE_BOOL(started, PMUCTRState),
330 VMSTATE_END_OF_LIST()
331 }
332 };
333
334 const VMStateDescription vmstate_riscv_cpu = {
335 .name = "cpu",
336 .version_id = 5,
337 .minimum_version_id = 5,
338 .post_load = riscv_cpu_post_load,
339 .fields = (VMStateField[]) {
340 VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32),
341 VMSTATE_UINT64_ARRAY(env.fpr, RISCVCPU, 32),
342 VMSTATE_UINT8_ARRAY(env.miprio, RISCVCPU, 64),
343 VMSTATE_UINT8_ARRAY(env.siprio, RISCVCPU, 64),
344 VMSTATE_UINTTL(env.pc, RISCVCPU),
345 VMSTATE_UINTTL(env.load_res, RISCVCPU),
346 VMSTATE_UINTTL(env.load_val, RISCVCPU),
347 VMSTATE_UINTTL(env.frm, RISCVCPU),
348 VMSTATE_UINTTL(env.badaddr, RISCVCPU),
349 VMSTATE_UINTTL(env.guest_phys_fault_addr, RISCVCPU),
350 VMSTATE_UINTTL(env.priv_ver, RISCVCPU),
351 VMSTATE_UINTTL(env.vext_ver, RISCVCPU),
352 VMSTATE_UINT32(env.misa_mxl, RISCVCPU),
353 VMSTATE_UINT32(env.misa_ext, RISCVCPU),
354 VMSTATE_UINT32(env.misa_mxl_max, RISCVCPU),
355 VMSTATE_UINT32(env.misa_ext_mask, RISCVCPU),
356 VMSTATE_UINT32(env.features, RISCVCPU),
357 VMSTATE_UINTTL(env.priv, RISCVCPU),
358 VMSTATE_UINTTL(env.virt, RISCVCPU),
359 VMSTATE_UINT64(env.resetvec, RISCVCPU),
360 VMSTATE_UINTTL(env.mhartid, RISCVCPU),
361 VMSTATE_UINT64(env.mstatus, RISCVCPU),
362 VMSTATE_UINT64(env.mip, RISCVCPU),
363 VMSTATE_UINT64(env.miclaim, RISCVCPU),
364 VMSTATE_UINT64(env.mie, RISCVCPU),
365 VMSTATE_UINT64(env.mideleg, RISCVCPU),
366 VMSTATE_UINTTL(env.satp, RISCVCPU),
367 VMSTATE_UINTTL(env.stval, RISCVCPU),
368 VMSTATE_UINTTL(env.medeleg, RISCVCPU),
369 VMSTATE_UINTTL(env.stvec, RISCVCPU),
370 VMSTATE_UINTTL(env.sepc, RISCVCPU),
371 VMSTATE_UINTTL(env.scause, RISCVCPU),
372 VMSTATE_UINTTL(env.mtvec, RISCVCPU),
373 VMSTATE_UINTTL(env.mepc, RISCVCPU),
374 VMSTATE_UINTTL(env.mcause, RISCVCPU),
375 VMSTATE_UINTTL(env.mtval, RISCVCPU),
376 VMSTATE_UINTTL(env.miselect, RISCVCPU),
377 VMSTATE_UINTTL(env.siselect, RISCVCPU),
378 VMSTATE_UINTTL(env.scounteren, RISCVCPU),
379 VMSTATE_UINTTL(env.mcounteren, RISCVCPU),
380 VMSTATE_UINTTL(env.mcountinhibit, RISCVCPU),
381 VMSTATE_STRUCT_ARRAY(env.pmu_ctrs, RISCVCPU, RV_MAX_MHPMCOUNTERS, 0,
382 vmstate_pmu_ctr_state, PMUCTRState),
383 VMSTATE_UINTTL_ARRAY(env.mhpmevent_val, RISCVCPU, RV_MAX_MHPMEVENTS),
384 VMSTATE_UINTTL_ARRAY(env.mhpmeventh_val, RISCVCPU, RV_MAX_MHPMEVENTS),
385 VMSTATE_UINTTL(env.sscratch, RISCVCPU),
386 VMSTATE_UINTTL(env.mscratch, RISCVCPU),
387 VMSTATE_UINT64(env.mfromhost, RISCVCPU),
388 VMSTATE_UINT64(env.mtohost, RISCVCPU),
389 VMSTATE_UINT64(env.stimecmp, RISCVCPU),
390
391 VMSTATE_END_OF_LIST()
392 },
393 .subsections = (const VMStateDescription * []) {
394 &vmstate_pmp,
395 &vmstate_hyper,
396 &vmstate_vector,
397 &vmstate_pointermasking,
398 &vmstate_rv128,
399 &vmstate_kvmtimer,
400 &vmstate_envcfg,
401 &vmstate_debug,
402 &vmstate_smstateen,
403 NULL
404 }
405 };