]> git.proxmox.com Git - mirror_qemu.git/blob - target/riscv/machine.c
Merge remote-tracking branch 'remotes/bonzini-gitlab/tags/for-upstream' into staging
[mirror_qemu.git] / target / riscv / machine.c
1 /*
2 * RISC-V VMState Description
3 *
4 * Copyright (c) 2020 Huawei Technologies Co., Ltd
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "qemu/error-report.h"
22 #include "sysemu/kvm.h"
23 #include "migration/cpu.h"
24
25 static bool pmp_needed(void *opaque)
26 {
27 RISCVCPU *cpu = opaque;
28 CPURISCVState *env = &cpu->env;
29
30 return riscv_feature(env, RISCV_FEATURE_PMP);
31 }
32
33 static int pmp_post_load(void *opaque, int version_id)
34 {
35 RISCVCPU *cpu = opaque;
36 CPURISCVState *env = &cpu->env;
37 int i;
38
39 for (i = 0; i < MAX_RISCV_PMPS; i++) {
40 pmp_update_rule_addr(env, i);
41 }
42 pmp_update_rule_nums(env);
43
44 return 0;
45 }
46
47 static const VMStateDescription vmstate_pmp_entry = {
48 .name = "cpu/pmp/entry",
49 .version_id = 1,
50 .minimum_version_id = 1,
51 .fields = (VMStateField[]) {
52 VMSTATE_UINTTL(addr_reg, pmp_entry_t),
53 VMSTATE_UINT8(cfg_reg, pmp_entry_t),
54 VMSTATE_END_OF_LIST()
55 }
56 };
57
58 static const VMStateDescription vmstate_pmp = {
59 .name = "cpu/pmp",
60 .version_id = 1,
61 .minimum_version_id = 1,
62 .needed = pmp_needed,
63 .post_load = pmp_post_load,
64 .fields = (VMStateField[]) {
65 VMSTATE_STRUCT_ARRAY(env.pmp_state.pmp, RISCVCPU, MAX_RISCV_PMPS,
66 0, vmstate_pmp_entry, pmp_entry_t),
67 VMSTATE_END_OF_LIST()
68 }
69 };
70
71 static bool hyper_needed(void *opaque)
72 {
73 RISCVCPU *cpu = opaque;
74 CPURISCVState *env = &cpu->env;
75
76 return riscv_has_ext(env, RVH);
77 }
78
79 static const VMStateDescription vmstate_hyper = {
80 .name = "cpu/hyper",
81 .version_id = 2,
82 .minimum_version_id = 2,
83 .needed = hyper_needed,
84 .fields = (VMStateField[]) {
85 VMSTATE_UINTTL(env.hstatus, RISCVCPU),
86 VMSTATE_UINTTL(env.hedeleg, RISCVCPU),
87 VMSTATE_UINT64(env.hideleg, RISCVCPU),
88 VMSTATE_UINTTL(env.hcounteren, RISCVCPU),
89 VMSTATE_UINTTL(env.htval, RISCVCPU),
90 VMSTATE_UINTTL(env.htinst, RISCVCPU),
91 VMSTATE_UINTTL(env.hgatp, RISCVCPU),
92 VMSTATE_UINTTL(env.hgeie, RISCVCPU),
93 VMSTATE_UINTTL(env.hgeip, RISCVCPU),
94 VMSTATE_UINT64(env.htimedelta, RISCVCPU),
95
96 VMSTATE_UINTTL(env.hvictl, RISCVCPU),
97 VMSTATE_UINT8_ARRAY(env.hviprio, RISCVCPU, 64),
98
99 VMSTATE_UINT64(env.vsstatus, RISCVCPU),
100 VMSTATE_UINTTL(env.vstvec, RISCVCPU),
101 VMSTATE_UINTTL(env.vsscratch, RISCVCPU),
102 VMSTATE_UINTTL(env.vsepc, RISCVCPU),
103 VMSTATE_UINTTL(env.vscause, RISCVCPU),
104 VMSTATE_UINTTL(env.vstval, RISCVCPU),
105 VMSTATE_UINTTL(env.vsatp, RISCVCPU),
106 VMSTATE_UINTTL(env.vsiselect, RISCVCPU),
107
108 VMSTATE_UINTTL(env.mtval2, RISCVCPU),
109 VMSTATE_UINTTL(env.mtinst, RISCVCPU),
110
111 VMSTATE_UINTTL(env.stvec_hs, RISCVCPU),
112 VMSTATE_UINTTL(env.sscratch_hs, RISCVCPU),
113 VMSTATE_UINTTL(env.sepc_hs, RISCVCPU),
114 VMSTATE_UINTTL(env.scause_hs, RISCVCPU),
115 VMSTATE_UINTTL(env.stval_hs, RISCVCPU),
116 VMSTATE_UINTTL(env.satp_hs, RISCVCPU),
117 VMSTATE_UINT64(env.mstatus_hs, RISCVCPU),
118
119 VMSTATE_END_OF_LIST()
120 }
121 };
122
123 static bool vector_needed(void *opaque)
124 {
125 RISCVCPU *cpu = opaque;
126 CPURISCVState *env = &cpu->env;
127
128 return riscv_has_ext(env, RVV);
129 }
130
131 static const VMStateDescription vmstate_vector = {
132 .name = "cpu/vector",
133 .version_id = 2,
134 .minimum_version_id = 2,
135 .needed = vector_needed,
136 .fields = (VMStateField[]) {
137 VMSTATE_UINT64_ARRAY(env.vreg, RISCVCPU, 32 * RV_VLEN_MAX / 64),
138 VMSTATE_UINTTL(env.vxrm, RISCVCPU),
139 VMSTATE_UINTTL(env.vxsat, RISCVCPU),
140 VMSTATE_UINTTL(env.vl, RISCVCPU),
141 VMSTATE_UINTTL(env.vstart, RISCVCPU),
142 VMSTATE_UINTTL(env.vtype, RISCVCPU),
143 VMSTATE_BOOL(env.vill, RISCVCPU),
144 VMSTATE_END_OF_LIST()
145 }
146 };
147
148 static bool pointermasking_needed(void *opaque)
149 {
150 RISCVCPU *cpu = opaque;
151 CPURISCVState *env = &cpu->env;
152
153 return riscv_has_ext(env, RVJ);
154 }
155
156 static const VMStateDescription vmstate_pointermasking = {
157 .name = "cpu/pointer_masking",
158 .version_id = 1,
159 .minimum_version_id = 1,
160 .needed = pointermasking_needed,
161 .fields = (VMStateField[]) {
162 VMSTATE_UINTTL(env.mmte, RISCVCPU),
163 VMSTATE_UINTTL(env.mpmmask, RISCVCPU),
164 VMSTATE_UINTTL(env.mpmbase, RISCVCPU),
165 VMSTATE_UINTTL(env.spmmask, RISCVCPU),
166 VMSTATE_UINTTL(env.spmbase, RISCVCPU),
167 VMSTATE_UINTTL(env.upmmask, RISCVCPU),
168 VMSTATE_UINTTL(env.upmbase, RISCVCPU),
169
170 VMSTATE_END_OF_LIST()
171 }
172 };
173
174 static bool rv128_needed(void *opaque)
175 {
176 RISCVCPU *cpu = opaque;
177 CPURISCVState *env = &cpu->env;
178
179 return env->misa_mxl_max == MXL_RV128;
180 }
181
182 static const VMStateDescription vmstate_rv128 = {
183 .name = "cpu/rv128",
184 .version_id = 1,
185 .minimum_version_id = 1,
186 .needed = rv128_needed,
187 .fields = (VMStateField[]) {
188 VMSTATE_UINTTL_ARRAY(env.gprh, RISCVCPU, 32),
189 VMSTATE_UINT64(env.mscratchh, RISCVCPU),
190 VMSTATE_UINT64(env.sscratchh, RISCVCPU),
191 VMSTATE_END_OF_LIST()
192 }
193 };
194
195 static bool kvmtimer_needed(void *opaque)
196 {
197 return kvm_enabled();
198 }
199
200 static int cpu_post_load(void *opaque, int version_id)
201 {
202 RISCVCPU *cpu = opaque;
203 CPURISCVState *env = &cpu->env;
204
205 env->kvm_timer_dirty = true;
206 return 0;
207 }
208
209 static const VMStateDescription vmstate_kvmtimer = {
210 .name = "cpu/kvmtimer",
211 .version_id = 1,
212 .minimum_version_id = 1,
213 .needed = kvmtimer_needed,
214 .post_load = cpu_post_load,
215 .fields = (VMStateField[]) {
216 VMSTATE_UINT64(env.kvm_timer_time, RISCVCPU),
217 VMSTATE_UINT64(env.kvm_timer_compare, RISCVCPU),
218 VMSTATE_UINT64(env.kvm_timer_state, RISCVCPU),
219
220 VMSTATE_END_OF_LIST()
221 }
222 };
223
224 static int riscv_cpu_post_load(void *opaque, int version_id)
225 {
226 RISCVCPU *cpu = opaque;
227 CPURISCVState *env = &cpu->env;
228
229 env->xl = cpu_recompute_xl(env);
230 riscv_cpu_update_mask(env);
231 return 0;
232 }
233
234 const VMStateDescription vmstate_riscv_cpu = {
235 .name = "cpu",
236 .version_id = 3,
237 .minimum_version_id = 3,
238 .post_load = riscv_cpu_post_load,
239 .fields = (VMStateField[]) {
240 VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32),
241 VMSTATE_UINT64_ARRAY(env.fpr, RISCVCPU, 32),
242 VMSTATE_UINT8_ARRAY(env.miprio, RISCVCPU, 64),
243 VMSTATE_UINT8_ARRAY(env.siprio, RISCVCPU, 64),
244 VMSTATE_UINTTL(env.pc, RISCVCPU),
245 VMSTATE_UINTTL(env.load_res, RISCVCPU),
246 VMSTATE_UINTTL(env.load_val, RISCVCPU),
247 VMSTATE_UINTTL(env.frm, RISCVCPU),
248 VMSTATE_UINTTL(env.badaddr, RISCVCPU),
249 VMSTATE_UINTTL(env.guest_phys_fault_addr, RISCVCPU),
250 VMSTATE_UINTTL(env.priv_ver, RISCVCPU),
251 VMSTATE_UINTTL(env.vext_ver, RISCVCPU),
252 VMSTATE_UINT32(env.misa_mxl, RISCVCPU),
253 VMSTATE_UINT32(env.misa_ext, RISCVCPU),
254 VMSTATE_UINT32(env.misa_mxl_max, RISCVCPU),
255 VMSTATE_UINT32(env.misa_ext_mask, RISCVCPU),
256 VMSTATE_UINT32(env.features, RISCVCPU),
257 VMSTATE_UINTTL(env.priv, RISCVCPU),
258 VMSTATE_UINTTL(env.virt, RISCVCPU),
259 VMSTATE_UINTTL(env.resetvec, RISCVCPU),
260 VMSTATE_UINTTL(env.mhartid, RISCVCPU),
261 VMSTATE_UINT64(env.mstatus, RISCVCPU),
262 VMSTATE_UINT64(env.mip, RISCVCPU),
263 VMSTATE_UINT64(env.miclaim, RISCVCPU),
264 VMSTATE_UINT64(env.mie, RISCVCPU),
265 VMSTATE_UINT64(env.mideleg, RISCVCPU),
266 VMSTATE_UINTTL(env.satp, RISCVCPU),
267 VMSTATE_UINTTL(env.stval, RISCVCPU),
268 VMSTATE_UINTTL(env.medeleg, RISCVCPU),
269 VMSTATE_UINTTL(env.stvec, RISCVCPU),
270 VMSTATE_UINTTL(env.sepc, RISCVCPU),
271 VMSTATE_UINTTL(env.scause, RISCVCPU),
272 VMSTATE_UINTTL(env.mtvec, RISCVCPU),
273 VMSTATE_UINTTL(env.mepc, RISCVCPU),
274 VMSTATE_UINTTL(env.mcause, RISCVCPU),
275 VMSTATE_UINTTL(env.mtval, RISCVCPU),
276 VMSTATE_UINTTL(env.miselect, RISCVCPU),
277 VMSTATE_UINTTL(env.siselect, RISCVCPU),
278 VMSTATE_UINTTL(env.scounteren, RISCVCPU),
279 VMSTATE_UINTTL(env.mcounteren, RISCVCPU),
280 VMSTATE_UINTTL(env.sscratch, RISCVCPU),
281 VMSTATE_UINTTL(env.mscratch, RISCVCPU),
282 VMSTATE_UINT64(env.mfromhost, RISCVCPU),
283 VMSTATE_UINT64(env.mtohost, RISCVCPU),
284 VMSTATE_UINT64(env.timecmp, RISCVCPU),
285
286 VMSTATE_END_OF_LIST()
287 },
288 .subsections = (const VMStateDescription * []) {
289 &vmstate_pmp,
290 &vmstate_hyper,
291 &vmstate_vector,
292 &vmstate_pointermasking,
293 &vmstate_rv128,
294 &vmstate_kvmtimer,
295 NULL
296 }
297 };