]> git.proxmox.com Git - mirror_qemu.git/blob - target/riscv/kvm.c
Merge tag 'linux-user-for-8.0-pull-request' of https://gitlab.com/laurent_vivier...
[mirror_qemu.git] / target / riscv / kvm.c
1 /*
2 * RISC-V implementation of KVM hooks
3 *
4 * Copyright (c) 2020 Huawei Technologies Co., Ltd
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "qemu/osdep.h"
20 #include <sys/ioctl.h>
21
22 #include <linux/kvm.h>
23
24 #include "qemu/timer.h"
25 #include "qemu/error-report.h"
26 #include "qemu/main-loop.h"
27 #include "sysemu/sysemu.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/kvm_int.h"
30 #include "cpu.h"
31 #include "trace.h"
32 #include "hw/pci/pci.h"
33 #include "exec/memattrs.h"
34 #include "exec/address-spaces.h"
35 #include "hw/boards.h"
36 #include "hw/irq.h"
37 #include "qemu/log.h"
38 #include "hw/loader.h"
39 #include "kvm_riscv.h"
40 #include "sbi_ecall_interface.h"
41 #include "chardev/char-fe.h"
42 #include "migration/migration.h"
43 #include "sysemu/runstate.h"
44
45 static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
46 uint64_t idx)
47 {
48 uint64_t id = KVM_REG_RISCV | type | idx;
49
50 switch (riscv_cpu_mxl(env)) {
51 case MXL_RV32:
52 id |= KVM_REG_SIZE_U32;
53 break;
54 case MXL_RV64:
55 id |= KVM_REG_SIZE_U64;
56 break;
57 default:
58 g_assert_not_reached();
59 }
60 return id;
61 }
62
63 #define RISCV_CORE_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, \
64 KVM_REG_RISCV_CORE_REG(name))
65
66 #define RISCV_CSR_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CSR, \
67 KVM_REG_RISCV_CSR_REG(name))
68
69 #define RISCV_TIMER_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_TIMER, \
70 KVM_REG_RISCV_TIMER_REG(name))
71
72 #define RISCV_FP_F_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_F, idx)
73
74 #define RISCV_FP_D_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_D, idx)
75
76 #define KVM_RISCV_GET_CSR(cs, env, csr, reg) \
77 do { \
78 int ret = kvm_get_one_reg(cs, RISCV_CSR_REG(env, csr), &reg); \
79 if (ret) { \
80 return ret; \
81 } \
82 } while (0)
83
84 #define KVM_RISCV_SET_CSR(cs, env, csr, reg) \
85 do { \
86 int ret = kvm_set_one_reg(cs, RISCV_CSR_REG(env, csr), &reg); \
87 if (ret) { \
88 return ret; \
89 } \
90 } while (0)
91
92 #define KVM_RISCV_GET_TIMER(cs, env, name, reg) \
93 do { \
94 int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(env, name), &reg); \
95 if (ret) { \
96 abort(); \
97 } \
98 } while (0)
99
100 #define KVM_RISCV_SET_TIMER(cs, env, name, reg) \
101 do { \
102 int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(env, time), &reg); \
103 if (ret) { \
104 abort(); \
105 } \
106 } while (0)
107
108 static int kvm_riscv_get_regs_core(CPUState *cs)
109 {
110 int ret = 0;
111 int i;
112 target_ulong reg;
113 CPURISCVState *env = &RISCV_CPU(cs)->env;
114
115 ret = kvm_get_one_reg(cs, RISCV_CORE_REG(env, regs.pc), &reg);
116 if (ret) {
117 return ret;
118 }
119 env->pc = reg;
120
121 for (i = 1; i < 32; i++) {
122 uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i);
123 ret = kvm_get_one_reg(cs, id, &reg);
124 if (ret) {
125 return ret;
126 }
127 env->gpr[i] = reg;
128 }
129
130 return ret;
131 }
132
133 static int kvm_riscv_put_regs_core(CPUState *cs)
134 {
135 int ret = 0;
136 int i;
137 target_ulong reg;
138 CPURISCVState *env = &RISCV_CPU(cs)->env;
139
140 reg = env->pc;
141 ret = kvm_set_one_reg(cs, RISCV_CORE_REG(env, regs.pc), &reg);
142 if (ret) {
143 return ret;
144 }
145
146 for (i = 1; i < 32; i++) {
147 uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i);
148 reg = env->gpr[i];
149 ret = kvm_set_one_reg(cs, id, &reg);
150 if (ret) {
151 return ret;
152 }
153 }
154
155 return ret;
156 }
157
158 static int kvm_riscv_get_regs_csr(CPUState *cs)
159 {
160 int ret = 0;
161 CPURISCVState *env = &RISCV_CPU(cs)->env;
162
163 KVM_RISCV_GET_CSR(cs, env, sstatus, env->mstatus);
164 KVM_RISCV_GET_CSR(cs, env, sie, env->mie);
165 KVM_RISCV_GET_CSR(cs, env, stvec, env->stvec);
166 KVM_RISCV_GET_CSR(cs, env, sscratch, env->sscratch);
167 KVM_RISCV_GET_CSR(cs, env, sepc, env->sepc);
168 KVM_RISCV_GET_CSR(cs, env, scause, env->scause);
169 KVM_RISCV_GET_CSR(cs, env, stval, env->stval);
170 KVM_RISCV_GET_CSR(cs, env, sip, env->mip);
171 KVM_RISCV_GET_CSR(cs, env, satp, env->satp);
172 return ret;
173 }
174
175 static int kvm_riscv_put_regs_csr(CPUState *cs)
176 {
177 int ret = 0;
178 CPURISCVState *env = &RISCV_CPU(cs)->env;
179
180 KVM_RISCV_SET_CSR(cs, env, sstatus, env->mstatus);
181 KVM_RISCV_SET_CSR(cs, env, sie, env->mie);
182 KVM_RISCV_SET_CSR(cs, env, stvec, env->stvec);
183 KVM_RISCV_SET_CSR(cs, env, sscratch, env->sscratch);
184 KVM_RISCV_SET_CSR(cs, env, sepc, env->sepc);
185 KVM_RISCV_SET_CSR(cs, env, scause, env->scause);
186 KVM_RISCV_SET_CSR(cs, env, stval, env->stval);
187 KVM_RISCV_SET_CSR(cs, env, sip, env->mip);
188 KVM_RISCV_SET_CSR(cs, env, satp, env->satp);
189
190 return ret;
191 }
192
193 static int kvm_riscv_get_regs_fp(CPUState *cs)
194 {
195 int ret = 0;
196 int i;
197 CPURISCVState *env = &RISCV_CPU(cs)->env;
198
199 if (riscv_has_ext(env, RVD)) {
200 uint64_t reg;
201 for (i = 0; i < 32; i++) {
202 ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(env, i), &reg);
203 if (ret) {
204 return ret;
205 }
206 env->fpr[i] = reg;
207 }
208 return ret;
209 }
210
211 if (riscv_has_ext(env, RVF)) {
212 uint32_t reg;
213 for (i = 0; i < 32; i++) {
214 ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(env, i), &reg);
215 if (ret) {
216 return ret;
217 }
218 env->fpr[i] = reg;
219 }
220 return ret;
221 }
222
223 return ret;
224 }
225
226 static int kvm_riscv_put_regs_fp(CPUState *cs)
227 {
228 int ret = 0;
229 int i;
230 CPURISCVState *env = &RISCV_CPU(cs)->env;
231
232 if (riscv_has_ext(env, RVD)) {
233 uint64_t reg;
234 for (i = 0; i < 32; i++) {
235 reg = env->fpr[i];
236 ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(env, i), &reg);
237 if (ret) {
238 return ret;
239 }
240 }
241 return ret;
242 }
243
244 if (riscv_has_ext(env, RVF)) {
245 uint32_t reg;
246 for (i = 0; i < 32; i++) {
247 reg = env->fpr[i];
248 ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(env, i), &reg);
249 if (ret) {
250 return ret;
251 }
252 }
253 return ret;
254 }
255
256 return ret;
257 }
258
259 static void kvm_riscv_get_regs_timer(CPUState *cs)
260 {
261 CPURISCVState *env = &RISCV_CPU(cs)->env;
262
263 if (env->kvm_timer_dirty) {
264 return;
265 }
266
267 KVM_RISCV_GET_TIMER(cs, env, time, env->kvm_timer_time);
268 KVM_RISCV_GET_TIMER(cs, env, compare, env->kvm_timer_compare);
269 KVM_RISCV_GET_TIMER(cs, env, state, env->kvm_timer_state);
270 KVM_RISCV_GET_TIMER(cs, env, frequency, env->kvm_timer_frequency);
271
272 env->kvm_timer_dirty = true;
273 }
274
275 static void kvm_riscv_put_regs_timer(CPUState *cs)
276 {
277 uint64_t reg;
278 CPURISCVState *env = &RISCV_CPU(cs)->env;
279
280 if (!env->kvm_timer_dirty) {
281 return;
282 }
283
284 KVM_RISCV_SET_TIMER(cs, env, time, env->kvm_timer_time);
285 KVM_RISCV_SET_TIMER(cs, env, compare, env->kvm_timer_compare);
286
287 /*
288 * To set register of RISCV_TIMER_REG(state) will occur a error from KVM
289 * on env->kvm_timer_state == 0, It's better to adapt in KVM, but it
290 * doesn't matter that adaping in QEMU now.
291 * TODO If KVM changes, adapt here.
292 */
293 if (env->kvm_timer_state) {
294 KVM_RISCV_SET_TIMER(cs, env, state, env->kvm_timer_state);
295 }
296
297 /*
298 * For now, migration will not work between Hosts with different timer
299 * frequency. Therefore, we should check whether they are the same here
300 * during the migration.
301 */
302 if (migration_is_running(migrate_get_current()->state)) {
303 KVM_RISCV_GET_TIMER(cs, env, frequency, reg);
304 if (reg != env->kvm_timer_frequency) {
305 error_report("Dst Hosts timer frequency != Src Hosts");
306 }
307 }
308
309 env->kvm_timer_dirty = false;
310 }
311
312 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
313 KVM_CAP_LAST_INFO
314 };
315
316 int kvm_arch_get_registers(CPUState *cs)
317 {
318 int ret = 0;
319
320 ret = kvm_riscv_get_regs_core(cs);
321 if (ret) {
322 return ret;
323 }
324
325 ret = kvm_riscv_get_regs_csr(cs);
326 if (ret) {
327 return ret;
328 }
329
330 ret = kvm_riscv_get_regs_fp(cs);
331 if (ret) {
332 return ret;
333 }
334
335 return ret;
336 }
337
338 int kvm_arch_put_registers(CPUState *cs, int level)
339 {
340 int ret = 0;
341
342 ret = kvm_riscv_put_regs_core(cs);
343 if (ret) {
344 return ret;
345 }
346
347 ret = kvm_riscv_put_regs_csr(cs);
348 if (ret) {
349 return ret;
350 }
351
352 ret = kvm_riscv_put_regs_fp(cs);
353 if (ret) {
354 return ret;
355 }
356
357 return ret;
358 }
359
360 int kvm_arch_release_virq_post(int virq)
361 {
362 return 0;
363 }
364
365 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
366 uint64_t address, uint32_t data, PCIDevice *dev)
367 {
368 return 0;
369 }
370
371 int kvm_arch_destroy_vcpu(CPUState *cs)
372 {
373 return 0;
374 }
375
376 unsigned long kvm_arch_vcpu_id(CPUState *cpu)
377 {
378 return cpu->cpu_index;
379 }
380
381 static void kvm_riscv_vm_state_change(void *opaque, bool running,
382 RunState state)
383 {
384 CPUState *cs = opaque;
385
386 if (running) {
387 kvm_riscv_put_regs_timer(cs);
388 } else {
389 kvm_riscv_get_regs_timer(cs);
390 }
391 }
392
393 void kvm_arch_init_irq_routing(KVMState *s)
394 {
395 }
396
397 int kvm_arch_init_vcpu(CPUState *cs)
398 {
399 int ret = 0;
400 target_ulong isa;
401 RISCVCPU *cpu = RISCV_CPU(cs);
402 CPURISCVState *env = &cpu->env;
403 uint64_t id;
404
405 qemu_add_vm_change_state_handler(kvm_riscv_vm_state_change, cs);
406
407 id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
408 KVM_REG_RISCV_CONFIG_REG(isa));
409 ret = kvm_get_one_reg(cs, id, &isa);
410 if (ret) {
411 return ret;
412 }
413 env->misa_ext = isa;
414
415 return ret;
416 }
417
418 int kvm_arch_msi_data_to_gsi(uint32_t data)
419 {
420 abort();
421 }
422
423 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
424 int vector, PCIDevice *dev)
425 {
426 return 0;
427 }
428
429 int kvm_arch_init(MachineState *ms, KVMState *s)
430 {
431 return 0;
432 }
433
434 int kvm_arch_irqchip_create(KVMState *s)
435 {
436 return 0;
437 }
438
439 int kvm_arch_process_async_events(CPUState *cs)
440 {
441 return 0;
442 }
443
444 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
445 {
446 }
447
448 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
449 {
450 return MEMTXATTRS_UNSPECIFIED;
451 }
452
453 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
454 {
455 return true;
456 }
457
458 static int kvm_riscv_handle_sbi(CPUState *cs, struct kvm_run *run)
459 {
460 int ret = 0;
461 unsigned char ch;
462 switch (run->riscv_sbi.extension_id) {
463 case SBI_EXT_0_1_CONSOLE_PUTCHAR:
464 ch = run->riscv_sbi.args[0];
465 qemu_chr_fe_write(serial_hd(0)->be, &ch, sizeof(ch));
466 break;
467 case SBI_EXT_0_1_CONSOLE_GETCHAR:
468 ret = qemu_chr_fe_read_all(serial_hd(0)->be, &ch, sizeof(ch));
469 if (ret == sizeof(ch)) {
470 run->riscv_sbi.ret[0] = ch;
471 } else {
472 run->riscv_sbi.ret[0] = -1;
473 }
474 ret = 0;
475 break;
476 default:
477 qemu_log_mask(LOG_UNIMP,
478 "%s: un-handled SBI EXIT, specific reasons is %lu\n",
479 __func__, run->riscv_sbi.extension_id);
480 ret = -1;
481 break;
482 }
483 return ret;
484 }
485
486 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
487 {
488 int ret = 0;
489 switch (run->exit_reason) {
490 case KVM_EXIT_RISCV_SBI:
491 ret = kvm_riscv_handle_sbi(cs, run);
492 break;
493 default:
494 qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
495 __func__, run->exit_reason);
496 ret = -1;
497 break;
498 }
499 return ret;
500 }
501
502 void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
503 {
504 CPURISCVState *env = &cpu->env;
505
506 if (!kvm_enabled()) {
507 return;
508 }
509 env->pc = cpu->env.kernel_addr;
510 env->gpr[10] = kvm_arch_vcpu_id(CPU(cpu)); /* a0 */
511 env->gpr[11] = cpu->env.fdt_addr; /* a1 */
512 env->satp = 0;
513 }
514
515 void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level)
516 {
517 int ret;
518 unsigned virq = level ? KVM_INTERRUPT_SET : KVM_INTERRUPT_UNSET;
519
520 if (irq != IRQ_S_EXT) {
521 perror("kvm riscv set irq != IRQ_S_EXT\n");
522 abort();
523 }
524
525 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
526 if (ret < 0) {
527 perror("Set irq failed");
528 abort();
529 }
530 }
531
532 bool kvm_arch_cpu_check_are_resettable(void)
533 {
534 return true;
535 }
536
537 void kvm_arch_accel_class_init(ObjectClass *oc)
538 {
539 }