]> git.proxmox.com Git - mirror_qemu.git/blame - target/riscv/kvm.c
Remove qemu-common.h include from most units
[mirror_qemu.git] / target / riscv / kvm.c
CommitLineData
91654e61
YJ
1/*
2 * RISC-V implementation of KVM hooks
3 *
4 * Copyright (c) 2020 Huawei Technologies Co., Ltd
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include "qemu/osdep.h"
20#include <sys/ioctl.h>
21
22#include <linux/kvm.h>
23
91654e61
YJ
24#include "qemu/timer.h"
25#include "qemu/error-report.h"
26#include "qemu/main-loop.h"
27#include "sysemu/sysemu.h"
28#include "sysemu/kvm.h"
29#include "sysemu/kvm_int.h"
30#include "cpu.h"
31#include "trace.h"
32#include "hw/pci/pci.h"
33#include "exec/memattrs.h"
34#include "exec/address-spaces.h"
35#include "hw/boards.h"
36#include "hw/irq.h"
37#include "qemu/log.h"
38#include "hw/loader.h"
ad40be27 39#include "kvm_riscv.h"
4eb47125
YJ
40#include "sbi_ecall_interface.h"
41#include "chardev/char-fe.h"
27abe66f 42#include "migration/migration.h"
9ad3e016 43#include "sysemu/runstate.h"
91654e61 44
0a312b85
YJ
45static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
46 uint64_t idx)
47{
48 uint64_t id = KVM_REG_RISCV | type | idx;
49
50 switch (riscv_cpu_mxl(env)) {
51 case MXL_RV32:
52 id |= KVM_REG_SIZE_U32;
53 break;
54 case MXL_RV64:
55 id |= KVM_REG_SIZE_U64;
56 break;
57 default:
58 g_assert_not_reached();
59 }
60 return id;
61}
62
937f0b45
YJ
63#define RISCV_CORE_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, \
64 KVM_REG_RISCV_CORE_REG(name))
65
66#define RISCV_CSR_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CSR, \
67 KVM_REG_RISCV_CSR_REG(name))
68
27abe66f
YJ
69#define RISCV_TIMER_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_TIMER, \
70 KVM_REG_RISCV_TIMER_REG(name))
71
937f0b45
YJ
72#define RISCV_FP_F_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_F, idx)
73
74#define RISCV_FP_D_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_D, idx)
75
76#define KVM_RISCV_GET_CSR(cs, env, csr, reg) \
77 do { \
78 int ret = kvm_get_one_reg(cs, RISCV_CSR_REG(env, csr), &reg); \
79 if (ret) { \
80 return ret; \
81 } \
82 } while (0)
83
9997cc1e
YJ
84#define KVM_RISCV_SET_CSR(cs, env, csr, reg) \
85 do { \
86 int ret = kvm_set_one_reg(cs, RISCV_CSR_REG(env, csr), &reg); \
87 if (ret) { \
88 return ret; \
89 } \
90 } while (0)
91
27abe66f
YJ
92#define KVM_RISCV_GET_TIMER(cs, env, name, reg) \
93 do { \
94 int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(env, name), &reg); \
95 if (ret) { \
96 abort(); \
97 } \
98 } while (0)
99
100#define KVM_RISCV_SET_TIMER(cs, env, name, reg) \
101 do { \
102 int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(env, time), &reg); \
103 if (ret) { \
104 abort(); \
105 } \
106 } while (0)
107
937f0b45
YJ
108static int kvm_riscv_get_regs_core(CPUState *cs)
109{
110 int ret = 0;
111 int i;
112 target_ulong reg;
113 CPURISCVState *env = &RISCV_CPU(cs)->env;
114
115 ret = kvm_get_one_reg(cs, RISCV_CORE_REG(env, regs.pc), &reg);
116 if (ret) {
117 return ret;
118 }
119 env->pc = reg;
120
121 for (i = 1; i < 32; i++) {
122 uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i);
123 ret = kvm_get_one_reg(cs, id, &reg);
124 if (ret) {
125 return ret;
126 }
127 env->gpr[i] = reg;
128 }
129
130 return ret;
131}
132
9997cc1e
YJ
133static int kvm_riscv_put_regs_core(CPUState *cs)
134{
135 int ret = 0;
136 int i;
137 target_ulong reg;
138 CPURISCVState *env = &RISCV_CPU(cs)->env;
139
140 reg = env->pc;
141 ret = kvm_set_one_reg(cs, RISCV_CORE_REG(env, regs.pc), &reg);
142 if (ret) {
143 return ret;
144 }
145
146 for (i = 1; i < 32; i++) {
147 uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i);
148 reg = env->gpr[i];
149 ret = kvm_set_one_reg(cs, id, &reg);
150 if (ret) {
151 return ret;
152 }
153 }
154
155 return ret;
156}
157
937f0b45
YJ
158static int kvm_riscv_get_regs_csr(CPUState *cs)
159{
160 int ret = 0;
161 CPURISCVState *env = &RISCV_CPU(cs)->env;
162
163 KVM_RISCV_GET_CSR(cs, env, sstatus, env->mstatus);
164 KVM_RISCV_GET_CSR(cs, env, sie, env->mie);
165 KVM_RISCV_GET_CSR(cs, env, stvec, env->stvec);
166 KVM_RISCV_GET_CSR(cs, env, sscratch, env->sscratch);
167 KVM_RISCV_GET_CSR(cs, env, sepc, env->sepc);
168 KVM_RISCV_GET_CSR(cs, env, scause, env->scause);
169 KVM_RISCV_GET_CSR(cs, env, stval, env->stval);
170 KVM_RISCV_GET_CSR(cs, env, sip, env->mip);
171 KVM_RISCV_GET_CSR(cs, env, satp, env->satp);
172 return ret;
173}
174
9997cc1e
YJ
175static int kvm_riscv_put_regs_csr(CPUState *cs)
176{
177 int ret = 0;
178 CPURISCVState *env = &RISCV_CPU(cs)->env;
179
180 KVM_RISCV_SET_CSR(cs, env, sstatus, env->mstatus);
181 KVM_RISCV_SET_CSR(cs, env, sie, env->mie);
182 KVM_RISCV_SET_CSR(cs, env, stvec, env->stvec);
183 KVM_RISCV_SET_CSR(cs, env, sscratch, env->sscratch);
184 KVM_RISCV_SET_CSR(cs, env, sepc, env->sepc);
185 KVM_RISCV_SET_CSR(cs, env, scause, env->scause);
186 KVM_RISCV_SET_CSR(cs, env, stval, env->stval);
187 KVM_RISCV_SET_CSR(cs, env, sip, env->mip);
188 KVM_RISCV_SET_CSR(cs, env, satp, env->satp);
189
190 return ret;
191}
192
937f0b45
YJ
193static int kvm_riscv_get_regs_fp(CPUState *cs)
194{
195 int ret = 0;
196 int i;
197 CPURISCVState *env = &RISCV_CPU(cs)->env;
198
199 if (riscv_has_ext(env, RVD)) {
200 uint64_t reg;
201 for (i = 0; i < 32; i++) {
202 ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(env, i), &reg);
203 if (ret) {
204 return ret;
205 }
206 env->fpr[i] = reg;
207 }
208 return ret;
209 }
210
211 if (riscv_has_ext(env, RVF)) {
212 uint32_t reg;
213 for (i = 0; i < 32; i++) {
214 ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(env, i), &reg);
215 if (ret) {
216 return ret;
217 }
218 env->fpr[i] = reg;
219 }
220 return ret;
221 }
222
223 return ret;
224}
225
9997cc1e
YJ
226static int kvm_riscv_put_regs_fp(CPUState *cs)
227{
228 int ret = 0;
229 int i;
230 CPURISCVState *env = &RISCV_CPU(cs)->env;
231
232 if (riscv_has_ext(env, RVD)) {
233 uint64_t reg;
234 for (i = 0; i < 32; i++) {
235 reg = env->fpr[i];
236 ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(env, i), &reg);
237 if (ret) {
238 return ret;
239 }
240 }
241 return ret;
242 }
243
244 if (riscv_has_ext(env, RVF)) {
245 uint32_t reg;
246 for (i = 0; i < 32; i++) {
247 reg = env->fpr[i];
248 ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(env, i), &reg);
249 if (ret) {
250 return ret;
251 }
252 }
253 return ret;
254 }
255
256 return ret;
257}
258
27abe66f
YJ
259static void kvm_riscv_get_regs_timer(CPUState *cs)
260{
261 CPURISCVState *env = &RISCV_CPU(cs)->env;
262
263 if (env->kvm_timer_dirty) {
264 return;
265 }
266
267 KVM_RISCV_GET_TIMER(cs, env, time, env->kvm_timer_time);
268 KVM_RISCV_GET_TIMER(cs, env, compare, env->kvm_timer_compare);
269 KVM_RISCV_GET_TIMER(cs, env, state, env->kvm_timer_state);
270 KVM_RISCV_GET_TIMER(cs, env, frequency, env->kvm_timer_frequency);
271
272 env->kvm_timer_dirty = true;
273}
274
275static void kvm_riscv_put_regs_timer(CPUState *cs)
276{
277 uint64_t reg;
278 CPURISCVState *env = &RISCV_CPU(cs)->env;
279
280 if (!env->kvm_timer_dirty) {
281 return;
282 }
283
284 KVM_RISCV_SET_TIMER(cs, env, time, env->kvm_timer_time);
285 KVM_RISCV_SET_TIMER(cs, env, compare, env->kvm_timer_compare);
286
287 /*
288 * To set register of RISCV_TIMER_REG(state) will occur a error from KVM
289 * on env->kvm_timer_state == 0, It's better to adapt in KVM, but it
290 * doesn't matter that adaping in QEMU now.
291 * TODO If KVM changes, adapt here.
292 */
293 if (env->kvm_timer_state) {
294 KVM_RISCV_SET_TIMER(cs, env, state, env->kvm_timer_state);
295 }
296
297 /*
298 * For now, migration will not work between Hosts with different timer
299 * frequency. Therefore, we should check whether they are the same here
300 * during the migration.
301 */
302 if (migration_is_running(migrate_get_current()->state)) {
303 KVM_RISCV_GET_TIMER(cs, env, frequency, reg);
304 if (reg != env->kvm_timer_frequency) {
305 error_report("Dst Hosts timer frequency != Src Hosts");
306 }
307 }
308
309 env->kvm_timer_dirty = false;
310}
9997cc1e 311
91654e61
YJ
312const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
313 KVM_CAP_LAST_INFO
314};
315
316int kvm_arch_get_registers(CPUState *cs)
317{
937f0b45
YJ
318 int ret = 0;
319
320 ret = kvm_riscv_get_regs_core(cs);
321 if (ret) {
322 return ret;
323 }
324
325 ret = kvm_riscv_get_regs_csr(cs);
326 if (ret) {
327 return ret;
328 }
329
330 ret = kvm_riscv_get_regs_fp(cs);
331 if (ret) {
332 return ret;
333 }
334
335 return ret;
91654e61
YJ
336}
337
338int kvm_arch_put_registers(CPUState *cs, int level)
339{
9997cc1e
YJ
340 int ret = 0;
341
342 ret = kvm_riscv_put_regs_core(cs);
343 if (ret) {
344 return ret;
345 }
346
347 ret = kvm_riscv_put_regs_csr(cs);
348 if (ret) {
349 return ret;
350 }
351
352 ret = kvm_riscv_put_regs_fp(cs);
353 if (ret) {
354 return ret;
355 }
356
357 return ret;
91654e61
YJ
358}
359
360int kvm_arch_release_virq_post(int virq)
361{
362 return 0;
363}
364
365int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
366 uint64_t address, uint32_t data, PCIDevice *dev)
367{
368 return 0;
369}
370
371int kvm_arch_destroy_vcpu(CPUState *cs)
372{
373 return 0;
374}
375
376unsigned long kvm_arch_vcpu_id(CPUState *cpu)
377{
378 return cpu->cpu_index;
379}
380
9ad3e016
YJ
381static void kvm_riscv_vm_state_change(void *opaque, bool running,
382 RunState state)
383{
384 CPUState *cs = opaque;
385
386 if (running) {
387 kvm_riscv_put_regs_timer(cs);
388 } else {
389 kvm_riscv_get_regs_timer(cs);
390 }
391}
392
91654e61
YJ
393void kvm_arch_init_irq_routing(KVMState *s)
394{
395}
396
397int kvm_arch_init_vcpu(CPUState *cs)
398{
0a312b85
YJ
399 int ret = 0;
400 target_ulong isa;
401 RISCVCPU *cpu = RISCV_CPU(cs);
402 CPURISCVState *env = &cpu->env;
403 uint64_t id;
404
9ad3e016
YJ
405 qemu_add_vm_change_state_handler(kvm_riscv_vm_state_change, cs);
406
0a312b85
YJ
407 id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
408 KVM_REG_RISCV_CONFIG_REG(isa));
409 ret = kvm_get_one_reg(cs, id, &isa);
410 if (ret) {
411 return ret;
412 }
413 env->misa_ext = isa;
414
415 return ret;
91654e61
YJ
416}
417
418int kvm_arch_msi_data_to_gsi(uint32_t data)
419{
420 abort();
421}
422
423int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
424 int vector, PCIDevice *dev)
425{
426 return 0;
427}
428
429int kvm_arch_init(MachineState *ms, KVMState *s)
430{
431 return 0;
432}
433
434int kvm_arch_irqchip_create(KVMState *s)
435{
436 return 0;
437}
438
439int kvm_arch_process_async_events(CPUState *cs)
440{
441 return 0;
442}
443
444void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
445{
446}
447
448MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
449{
450 return MEMTXATTRS_UNSPECIFIED;
451}
452
453bool kvm_arch_stop_on_emulation_error(CPUState *cs)
454{
455 return true;
456}
457
4eb47125
YJ
458static int kvm_riscv_handle_sbi(CPUState *cs, struct kvm_run *run)
459{
460 int ret = 0;
461 unsigned char ch;
462 switch (run->riscv_sbi.extension_id) {
463 case SBI_EXT_0_1_CONSOLE_PUTCHAR:
464 ch = run->riscv_sbi.args[0];
465 qemu_chr_fe_write(serial_hd(0)->be, &ch, sizeof(ch));
466 break;
467 case SBI_EXT_0_1_CONSOLE_GETCHAR:
468 ret = qemu_chr_fe_read_all(serial_hd(0)->be, &ch, sizeof(ch));
469 if (ret == sizeof(ch)) {
470 run->riscv_sbi.args[0] = ch;
471 } else {
472 run->riscv_sbi.args[0] = -1;
473 }
474 break;
475 default:
476 qemu_log_mask(LOG_UNIMP,
477 "%s: un-handled SBI EXIT, specific reasons is %lu\n",
478 __func__, run->riscv_sbi.extension_id);
479 ret = -1;
480 break;
481 }
482 return ret;
483}
484
91654e61
YJ
485int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
486{
4eb47125
YJ
487 int ret = 0;
488 switch (run->exit_reason) {
489 case KVM_EXIT_RISCV_SBI:
490 ret = kvm_riscv_handle_sbi(cs, run);
491 break;
492 default:
493 qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
494 __func__, run->exit_reason);
495 ret = -1;
496 break;
497 }
498 return ret;
91654e61
YJ
499}
500
ad40be27
YJ
501void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
502{
503 CPURISCVState *env = &cpu->env;
504
505 if (!kvm_enabled()) {
506 return;
507 }
508 env->pc = cpu->env.kernel_addr;
509 env->gpr[10] = kvm_arch_vcpu_id(CPU(cpu)); /* a0 */
510 env->gpr[11] = cpu->env.fdt_addr; /* a1 */
511 env->satp = 0;
512}
513
2b650fbb
YJ
514void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level)
515{
516 int ret;
517 unsigned virq = level ? KVM_INTERRUPT_SET : KVM_INTERRUPT_UNSET;
518
519 if (irq != IRQ_S_EXT) {
520 perror("kvm riscv set irq != IRQ_S_EXT\n");
521 abort();
522 }
523
524 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
525 if (ret < 0) {
526 perror("Set irq failed");
527 abort();
528 }
529}
530
91654e61
YJ
531bool kvm_arch_cpu_check_are_resettable(void)
532{
533 return true;
534}