]> git.proxmox.com Git - mirror_qemu.git/blob - target/riscv/machine.c
Merge tag 'pull-qapi-2022-09-07' of git://repo.or.cz/qemu/armbru into staging
[mirror_qemu.git] / target / riscv / machine.c
1 /*
2 * RISC-V VMState Description
3 *
4 * Copyright (c) 2020 Huawei Technologies Co., Ltd
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "qemu/error-report.h"
22 #include "sysemu/kvm.h"
23 #include "migration/cpu.h"
24
25 static bool pmp_needed(void *opaque)
26 {
27 RISCVCPU *cpu = opaque;
28 CPURISCVState *env = &cpu->env;
29
30 return riscv_feature(env, RISCV_FEATURE_PMP);
31 }
32
33 static int pmp_post_load(void *opaque, int version_id)
34 {
35 RISCVCPU *cpu = opaque;
36 CPURISCVState *env = &cpu->env;
37 int i;
38
39 for (i = 0; i < MAX_RISCV_PMPS; i++) {
40 pmp_update_rule_addr(env, i);
41 }
42 pmp_update_rule_nums(env);
43
44 return 0;
45 }
46
47 static const VMStateDescription vmstate_pmp_entry = {
48 .name = "cpu/pmp/entry",
49 .version_id = 1,
50 .minimum_version_id = 1,
51 .fields = (VMStateField[]) {
52 VMSTATE_UINTTL(addr_reg, pmp_entry_t),
53 VMSTATE_UINT8(cfg_reg, pmp_entry_t),
54 VMSTATE_END_OF_LIST()
55 }
56 };
57
58 static const VMStateDescription vmstate_pmp = {
59 .name = "cpu/pmp",
60 .version_id = 1,
61 .minimum_version_id = 1,
62 .needed = pmp_needed,
63 .post_load = pmp_post_load,
64 .fields = (VMStateField[]) {
65 VMSTATE_STRUCT_ARRAY(env.pmp_state.pmp, RISCVCPU, MAX_RISCV_PMPS,
66 0, vmstate_pmp_entry, pmp_entry_t),
67 VMSTATE_END_OF_LIST()
68 }
69 };
70
71 static bool hyper_needed(void *opaque)
72 {
73 RISCVCPU *cpu = opaque;
74 CPURISCVState *env = &cpu->env;
75
76 return riscv_has_ext(env, RVH);
77 }
78
79 static const VMStateDescription vmstate_hyper = {
80 .name = "cpu/hyper",
81 .version_id = 2,
82 .minimum_version_id = 2,
83 .needed = hyper_needed,
84 .fields = (VMStateField[]) {
85 VMSTATE_UINTTL(env.hstatus, RISCVCPU),
86 VMSTATE_UINTTL(env.hedeleg, RISCVCPU),
87 VMSTATE_UINT64(env.hideleg, RISCVCPU),
88 VMSTATE_UINTTL(env.hcounteren, RISCVCPU),
89 VMSTATE_UINTTL(env.htval, RISCVCPU),
90 VMSTATE_UINTTL(env.htinst, RISCVCPU),
91 VMSTATE_UINTTL(env.hgatp, RISCVCPU),
92 VMSTATE_UINTTL(env.hgeie, RISCVCPU),
93 VMSTATE_UINTTL(env.hgeip, RISCVCPU),
94 VMSTATE_UINT64(env.htimedelta, RISCVCPU),
95 VMSTATE_UINT64(env.vstimecmp, RISCVCPU),
96
97 VMSTATE_UINTTL(env.hvictl, RISCVCPU),
98 VMSTATE_UINT8_ARRAY(env.hviprio, RISCVCPU, 64),
99
100 VMSTATE_UINT64(env.vsstatus, RISCVCPU),
101 VMSTATE_UINTTL(env.vstvec, RISCVCPU),
102 VMSTATE_UINTTL(env.vsscratch, RISCVCPU),
103 VMSTATE_UINTTL(env.vsepc, RISCVCPU),
104 VMSTATE_UINTTL(env.vscause, RISCVCPU),
105 VMSTATE_UINTTL(env.vstval, RISCVCPU),
106 VMSTATE_UINTTL(env.vsatp, RISCVCPU),
107 VMSTATE_UINTTL(env.vsiselect, RISCVCPU),
108
109 VMSTATE_UINTTL(env.mtval2, RISCVCPU),
110 VMSTATE_UINTTL(env.mtinst, RISCVCPU),
111
112 VMSTATE_UINTTL(env.stvec_hs, RISCVCPU),
113 VMSTATE_UINTTL(env.sscratch_hs, RISCVCPU),
114 VMSTATE_UINTTL(env.sepc_hs, RISCVCPU),
115 VMSTATE_UINTTL(env.scause_hs, RISCVCPU),
116 VMSTATE_UINTTL(env.stval_hs, RISCVCPU),
117 VMSTATE_UINTTL(env.satp_hs, RISCVCPU),
118 VMSTATE_UINT64(env.mstatus_hs, RISCVCPU),
119
120 VMSTATE_END_OF_LIST()
121 }
122 };
123
124 static bool vector_needed(void *opaque)
125 {
126 RISCVCPU *cpu = opaque;
127 CPURISCVState *env = &cpu->env;
128
129 return riscv_has_ext(env, RVV);
130 }
131
132 static const VMStateDescription vmstate_vector = {
133 .name = "cpu/vector",
134 .version_id = 2,
135 .minimum_version_id = 2,
136 .needed = vector_needed,
137 .fields = (VMStateField[]) {
138 VMSTATE_UINT64_ARRAY(env.vreg, RISCVCPU, 32 * RV_VLEN_MAX / 64),
139 VMSTATE_UINTTL(env.vxrm, RISCVCPU),
140 VMSTATE_UINTTL(env.vxsat, RISCVCPU),
141 VMSTATE_UINTTL(env.vl, RISCVCPU),
142 VMSTATE_UINTTL(env.vstart, RISCVCPU),
143 VMSTATE_UINTTL(env.vtype, RISCVCPU),
144 VMSTATE_BOOL(env.vill, RISCVCPU),
145 VMSTATE_END_OF_LIST()
146 }
147 };
148
149 static bool pointermasking_needed(void *opaque)
150 {
151 RISCVCPU *cpu = opaque;
152 CPURISCVState *env = &cpu->env;
153
154 return riscv_has_ext(env, RVJ);
155 }
156
157 static const VMStateDescription vmstate_pointermasking = {
158 .name = "cpu/pointer_masking",
159 .version_id = 1,
160 .minimum_version_id = 1,
161 .needed = pointermasking_needed,
162 .fields = (VMStateField[]) {
163 VMSTATE_UINTTL(env.mmte, RISCVCPU),
164 VMSTATE_UINTTL(env.mpmmask, RISCVCPU),
165 VMSTATE_UINTTL(env.mpmbase, RISCVCPU),
166 VMSTATE_UINTTL(env.spmmask, RISCVCPU),
167 VMSTATE_UINTTL(env.spmbase, RISCVCPU),
168 VMSTATE_UINTTL(env.upmmask, RISCVCPU),
169 VMSTATE_UINTTL(env.upmbase, RISCVCPU),
170
171 VMSTATE_END_OF_LIST()
172 }
173 };
174
175 static bool rv128_needed(void *opaque)
176 {
177 RISCVCPU *cpu = opaque;
178 CPURISCVState *env = &cpu->env;
179
180 return env->misa_mxl_max == MXL_RV128;
181 }
182
183 static const VMStateDescription vmstate_rv128 = {
184 .name = "cpu/rv128",
185 .version_id = 1,
186 .minimum_version_id = 1,
187 .needed = rv128_needed,
188 .fields = (VMStateField[]) {
189 VMSTATE_UINTTL_ARRAY(env.gprh, RISCVCPU, 32),
190 VMSTATE_UINT64(env.mscratchh, RISCVCPU),
191 VMSTATE_UINT64(env.sscratchh, RISCVCPU),
192 VMSTATE_END_OF_LIST()
193 }
194 };
195
196 static bool kvmtimer_needed(void *opaque)
197 {
198 return kvm_enabled();
199 }
200
201 static int cpu_post_load(void *opaque, int version_id)
202 {
203 RISCVCPU *cpu = opaque;
204 CPURISCVState *env = &cpu->env;
205
206 env->kvm_timer_dirty = true;
207 return 0;
208 }
209
210 static const VMStateDescription vmstate_kvmtimer = {
211 .name = "cpu/kvmtimer",
212 .version_id = 1,
213 .minimum_version_id = 1,
214 .needed = kvmtimer_needed,
215 .post_load = cpu_post_load,
216 .fields = (VMStateField[]) {
217 VMSTATE_UINT64(env.kvm_timer_time, RISCVCPU),
218 VMSTATE_UINT64(env.kvm_timer_compare, RISCVCPU),
219 VMSTATE_UINT64(env.kvm_timer_state, RISCVCPU),
220 VMSTATE_END_OF_LIST()
221 }
222 };
223
224 static bool debug_needed(void *opaque)
225 {
226 RISCVCPU *cpu = opaque;
227 CPURISCVState *env = &cpu->env;
228
229 return riscv_feature(env, RISCV_FEATURE_DEBUG);
230 }
231
232 static const VMStateDescription vmstate_debug_type2 = {
233 .name = "cpu/debug/type2",
234 .version_id = 1,
235 .minimum_version_id = 1,
236 .fields = (VMStateField[]) {
237 VMSTATE_UINTTL(mcontrol, type2_trigger_t),
238 VMSTATE_UINTTL(maddress, type2_trigger_t),
239 VMSTATE_END_OF_LIST()
240 }
241 };
242
243 static const VMStateDescription vmstate_debug = {
244 .name = "cpu/debug",
245 .version_id = 1,
246 .minimum_version_id = 1,
247 .needed = debug_needed,
248 .fields = (VMStateField[]) {
249 VMSTATE_UINTTL(env.trigger_cur, RISCVCPU),
250 VMSTATE_STRUCT_ARRAY(env.type2_trig, RISCVCPU, TRIGGER_TYPE2_NUM,
251 0, vmstate_debug_type2, type2_trigger_t),
252 VMSTATE_END_OF_LIST()
253 }
254 };
255
256 static int riscv_cpu_post_load(void *opaque, int version_id)
257 {
258 RISCVCPU *cpu = opaque;
259 CPURISCVState *env = &cpu->env;
260
261 env->xl = cpu_recompute_xl(env);
262 riscv_cpu_update_mask(env);
263 return 0;
264 }
265
266 static bool envcfg_needed(void *opaque)
267 {
268 RISCVCPU *cpu = opaque;
269 CPURISCVState *env = &cpu->env;
270
271 return (env->priv_ver >= PRIV_VERSION_1_12_0 ? 1 : 0);
272 }
273
274 static const VMStateDescription vmstate_envcfg = {
275 .name = "cpu/envcfg",
276 .version_id = 1,
277 .minimum_version_id = 1,
278 .needed = envcfg_needed,
279 .fields = (VMStateField[]) {
280 VMSTATE_UINT64(env.menvcfg, RISCVCPU),
281 VMSTATE_UINTTL(env.senvcfg, RISCVCPU),
282 VMSTATE_UINT64(env.henvcfg, RISCVCPU),
283 VMSTATE_END_OF_LIST()
284 }
285 };
286
287 static bool pmu_needed(void *opaque)
288 {
289 RISCVCPU *cpu = opaque;
290
291 return cpu->cfg.pmu_num;
292 }
293
294 static const VMStateDescription vmstate_pmu_ctr_state = {
295 .name = "cpu/pmu",
296 .version_id = 1,
297 .minimum_version_id = 1,
298 .needed = pmu_needed,
299 .fields = (VMStateField[]) {
300 VMSTATE_UINTTL(mhpmcounter_val, PMUCTRState),
301 VMSTATE_UINTTL(mhpmcounterh_val, PMUCTRState),
302 VMSTATE_UINTTL(mhpmcounter_prev, PMUCTRState),
303 VMSTATE_UINTTL(mhpmcounterh_prev, PMUCTRState),
304 VMSTATE_BOOL(started, PMUCTRState),
305 VMSTATE_END_OF_LIST()
306 }
307 };
308
309 const VMStateDescription vmstate_riscv_cpu = {
310 .name = "cpu",
311 .version_id = 4,
312 .minimum_version_id = 4,
313 .post_load = riscv_cpu_post_load,
314 .fields = (VMStateField[]) {
315 VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32),
316 VMSTATE_UINT64_ARRAY(env.fpr, RISCVCPU, 32),
317 VMSTATE_UINT8_ARRAY(env.miprio, RISCVCPU, 64),
318 VMSTATE_UINT8_ARRAY(env.siprio, RISCVCPU, 64),
319 VMSTATE_UINTTL(env.pc, RISCVCPU),
320 VMSTATE_UINTTL(env.load_res, RISCVCPU),
321 VMSTATE_UINTTL(env.load_val, RISCVCPU),
322 VMSTATE_UINTTL(env.frm, RISCVCPU),
323 VMSTATE_UINTTL(env.badaddr, RISCVCPU),
324 VMSTATE_UINTTL(env.guest_phys_fault_addr, RISCVCPU),
325 VMSTATE_UINTTL(env.priv_ver, RISCVCPU),
326 VMSTATE_UINTTL(env.vext_ver, RISCVCPU),
327 VMSTATE_UINT32(env.misa_mxl, RISCVCPU),
328 VMSTATE_UINT32(env.misa_ext, RISCVCPU),
329 VMSTATE_UINT32(env.misa_mxl_max, RISCVCPU),
330 VMSTATE_UINT32(env.misa_ext_mask, RISCVCPU),
331 VMSTATE_UINT32(env.features, RISCVCPU),
332 VMSTATE_UINTTL(env.priv, RISCVCPU),
333 VMSTATE_UINTTL(env.virt, RISCVCPU),
334 VMSTATE_UINTTL(env.resetvec, RISCVCPU),
335 VMSTATE_UINTTL(env.mhartid, RISCVCPU),
336 VMSTATE_UINT64(env.mstatus, RISCVCPU),
337 VMSTATE_UINT64(env.mip, RISCVCPU),
338 VMSTATE_UINT64(env.miclaim, RISCVCPU),
339 VMSTATE_UINT64(env.mie, RISCVCPU),
340 VMSTATE_UINT64(env.mideleg, RISCVCPU),
341 VMSTATE_UINTTL(env.satp, RISCVCPU),
342 VMSTATE_UINTTL(env.stval, RISCVCPU),
343 VMSTATE_UINTTL(env.medeleg, RISCVCPU),
344 VMSTATE_UINTTL(env.stvec, RISCVCPU),
345 VMSTATE_UINTTL(env.sepc, RISCVCPU),
346 VMSTATE_UINTTL(env.scause, RISCVCPU),
347 VMSTATE_UINTTL(env.mtvec, RISCVCPU),
348 VMSTATE_UINTTL(env.mepc, RISCVCPU),
349 VMSTATE_UINTTL(env.mcause, RISCVCPU),
350 VMSTATE_UINTTL(env.mtval, RISCVCPU),
351 VMSTATE_UINTTL(env.miselect, RISCVCPU),
352 VMSTATE_UINTTL(env.siselect, RISCVCPU),
353 VMSTATE_UINTTL(env.scounteren, RISCVCPU),
354 VMSTATE_UINTTL(env.mcounteren, RISCVCPU),
355 VMSTATE_UINTTL(env.mcountinhibit, RISCVCPU),
356 VMSTATE_STRUCT_ARRAY(env.pmu_ctrs, RISCVCPU, RV_MAX_MHPMCOUNTERS, 0,
357 vmstate_pmu_ctr_state, PMUCTRState),
358 VMSTATE_UINTTL_ARRAY(env.mhpmevent_val, RISCVCPU, RV_MAX_MHPMEVENTS),
359 VMSTATE_UINTTL_ARRAY(env.mhpmeventh_val, RISCVCPU, RV_MAX_MHPMEVENTS),
360 VMSTATE_UINTTL(env.sscratch, RISCVCPU),
361 VMSTATE_UINTTL(env.mscratch, RISCVCPU),
362 VMSTATE_UINT64(env.mfromhost, RISCVCPU),
363 VMSTATE_UINT64(env.mtohost, RISCVCPU),
364 VMSTATE_UINT64(env.stimecmp, RISCVCPU),
365
366 VMSTATE_END_OF_LIST()
367 },
368 .subsections = (const VMStateDescription * []) {
369 &vmstate_pmp,
370 &vmstate_hyper,
371 &vmstate_vector,
372 &vmstate_pointermasking,
373 &vmstate_rv128,
374 &vmstate_kvmtimer,
375 &vmstate_envcfg,
376 &vmstate_debug,
377 NULL
378 }
379 };