]> git.proxmox.com Git - mirror_qemu.git/blob - target/mips/kvm.c
mips: introduce internal.h and cleanup cpu.h
[mirror_qemu.git] / target / mips / kvm.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: MIPS specific KVM APIs
7 *
8 * Copyright (C) 2012-2014 Imagination Technologies Ltd.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12 #include "qemu/osdep.h"
13 #include <sys/ioctl.h>
14
15 #include <linux/kvm.h>
16
17 #include "qemu-common.h"
18 #include "cpu.h"
19 #include "internal.h"
20 #include "qemu/error-report.h"
21 #include "qemu/timer.h"
22 #include "sysemu/sysemu.h"
23 #include "sysemu/kvm.h"
24 #include "sysemu/cpus.h"
25 #include "kvm_mips.h"
26 #include "exec/memattrs.h"
27
28 #define DEBUG_KVM 0
29
30 #define DPRINTF(fmt, ...) \
31 do { if (DEBUG_KVM) { fprintf(stderr, fmt, ## __VA_ARGS__); } } while (0)
32
33 static int kvm_mips_fpu_cap;
34 static int kvm_mips_msa_cap;
35
36 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
37 KVM_CAP_LAST_INFO
38 };
39
40 static void kvm_mips_update_state(void *opaque, int running, RunState state);
41
42 unsigned long kvm_arch_vcpu_id(CPUState *cs)
43 {
44 return cs->cpu_index;
45 }
46
47 int kvm_arch_init(MachineState *ms, KVMState *s)
48 {
49 /* MIPS has 128 signals */
50 kvm_set_sigmask_len(s, 16);
51
52 kvm_mips_fpu_cap = kvm_check_extension(s, KVM_CAP_MIPS_FPU);
53 kvm_mips_msa_cap = kvm_check_extension(s, KVM_CAP_MIPS_MSA);
54
55 DPRINTF("%s\n", __func__);
56 return 0;
57 }
58
59 int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
60 {
61 return 0;
62 }
63
64 int kvm_arch_init_vcpu(CPUState *cs)
65 {
66 MIPSCPU *cpu = MIPS_CPU(cs);
67 CPUMIPSState *env = &cpu->env;
68 int ret = 0;
69
70 qemu_add_vm_change_state_handler(kvm_mips_update_state, cs);
71
72 if (kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
73 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_FPU, 0, 0);
74 if (ret < 0) {
75 /* mark unsupported so it gets disabled on reset */
76 kvm_mips_fpu_cap = 0;
77 ret = 0;
78 }
79 }
80
81 if (kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) {
82 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_MSA, 0, 0);
83 if (ret < 0) {
84 /* mark unsupported so it gets disabled on reset */
85 kvm_mips_msa_cap = 0;
86 ret = 0;
87 }
88 }
89
90 DPRINTF("%s\n", __func__);
91 return ret;
92 }
93
94 void kvm_mips_reset_vcpu(MIPSCPU *cpu)
95 {
96 CPUMIPSState *env = &cpu->env;
97
98 if (!kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
99 warn_report("KVM does not support FPU, disabling");
100 env->CP0_Config1 &= ~(1 << CP0C1_FP);
101 }
102 if (!kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) {
103 warn_report("KVM does not support MSA, disabling");
104 env->CP0_Config3 &= ~(1 << CP0C3_MSAP);
105 }
106
107 DPRINTF("%s\n", __func__);
108 }
109
110 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
111 {
112 DPRINTF("%s\n", __func__);
113 return 0;
114 }
115
116 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
117 {
118 DPRINTF("%s\n", __func__);
119 return 0;
120 }
121
122 static inline int cpu_mips_io_interrupts_pending(MIPSCPU *cpu)
123 {
124 CPUMIPSState *env = &cpu->env;
125
126 return env->CP0_Cause & (0x1 << (2 + CP0Ca_IP));
127 }
128
129
130 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
131 {
132 MIPSCPU *cpu = MIPS_CPU(cs);
133 int r;
134 struct kvm_mips_interrupt intr;
135
136 qemu_mutex_lock_iothread();
137
138 if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
139 cpu_mips_io_interrupts_pending(cpu)) {
140 intr.cpu = -1;
141 intr.irq = 2;
142 r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
143 if (r < 0) {
144 error_report("%s: cpu %d: failed to inject IRQ %x",
145 __func__, cs->cpu_index, intr.irq);
146 }
147 }
148
149 qemu_mutex_unlock_iothread();
150 }
151
152 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
153 {
154 return MEMTXATTRS_UNSPECIFIED;
155 }
156
157 int kvm_arch_process_async_events(CPUState *cs)
158 {
159 return cs->halted;
160 }
161
162 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
163 {
164 int ret;
165
166 DPRINTF("%s\n", __func__);
167 switch (run->exit_reason) {
168 default:
169 error_report("%s: unknown exit reason %d",
170 __func__, run->exit_reason);
171 ret = -1;
172 break;
173 }
174
175 return ret;
176 }
177
178 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
179 {
180 DPRINTF("%s\n", __func__);
181 return true;
182 }
183
184 void kvm_arch_init_irq_routing(KVMState *s)
185 {
186 }
187
188 int kvm_mips_set_interrupt(MIPSCPU *cpu, int irq, int level)
189 {
190 CPUState *cs = CPU(cpu);
191 struct kvm_mips_interrupt intr;
192
193 if (!kvm_enabled()) {
194 return 0;
195 }
196
197 intr.cpu = -1;
198
199 if (level) {
200 intr.irq = irq;
201 } else {
202 intr.irq = -irq;
203 }
204
205 kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
206
207 return 0;
208 }
209
210 int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level)
211 {
212 CPUState *cs = current_cpu;
213 CPUState *dest_cs = CPU(cpu);
214 struct kvm_mips_interrupt intr;
215
216 if (!kvm_enabled()) {
217 return 0;
218 }
219
220 intr.cpu = dest_cs->cpu_index;
221
222 if (level) {
223 intr.irq = irq;
224 } else {
225 intr.irq = -irq;
226 }
227
228 DPRINTF("%s: CPU %d, IRQ: %d\n", __func__, intr.cpu, intr.irq);
229
230 kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
231
232 return 0;
233 }
234
235 #define MIPS_CP0_32(_R, _S) \
236 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
237
238 #define MIPS_CP0_64(_R, _S) \
239 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
240
241 #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
242 #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
243 #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
244 #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
245 #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
246 #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
247 #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
248 #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
249 #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
250 #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
251 #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
252 #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
253 #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
254 #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
255 #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
256 #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
257 #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
258 #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
259 #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
260 #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
261 #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
262
263 static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id,
264 int32_t *addr)
265 {
266 struct kvm_one_reg cp0reg = {
267 .id = reg_id,
268 .addr = (uintptr_t)addr
269 };
270
271 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
272 }
273
274 static inline int kvm_mips_put_one_ureg(CPUState *cs, uint64_t reg_id,
275 uint32_t *addr)
276 {
277 struct kvm_one_reg cp0reg = {
278 .id = reg_id,
279 .addr = (uintptr_t)addr
280 };
281
282 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
283 }
284
285 static inline int kvm_mips_put_one_ulreg(CPUState *cs, uint64_t reg_id,
286 target_ulong *addr)
287 {
288 uint64_t val64 = *addr;
289 struct kvm_one_reg cp0reg = {
290 .id = reg_id,
291 .addr = (uintptr_t)&val64
292 };
293
294 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
295 }
296
297 static inline int kvm_mips_put_one_reg64(CPUState *cs, uint64_t reg_id,
298 int64_t *addr)
299 {
300 struct kvm_one_reg cp0reg = {
301 .id = reg_id,
302 .addr = (uintptr_t)addr
303 };
304
305 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
306 }
307
308 static inline int kvm_mips_put_one_ureg64(CPUState *cs, uint64_t reg_id,
309 uint64_t *addr)
310 {
311 struct kvm_one_reg cp0reg = {
312 .id = reg_id,
313 .addr = (uintptr_t)addr
314 };
315
316 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
317 }
318
319 static inline int kvm_mips_get_one_reg(CPUState *cs, uint64_t reg_id,
320 int32_t *addr)
321 {
322 struct kvm_one_reg cp0reg = {
323 .id = reg_id,
324 .addr = (uintptr_t)addr
325 };
326
327 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
328 }
329
330 static inline int kvm_mips_get_one_ureg(CPUState *cs, uint64_t reg_id,
331 uint32_t *addr)
332 {
333 struct kvm_one_reg cp0reg = {
334 .id = reg_id,
335 .addr = (uintptr_t)addr
336 };
337
338 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
339 }
340
341 static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64_t reg_id,
342 target_ulong *addr)
343 {
344 int ret;
345 uint64_t val64 = 0;
346 struct kvm_one_reg cp0reg = {
347 .id = reg_id,
348 .addr = (uintptr_t)&val64
349 };
350
351 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
352 if (ret >= 0) {
353 *addr = val64;
354 }
355 return ret;
356 }
357
358 static inline int kvm_mips_get_one_reg64(CPUState *cs, uint64_t reg_id,
359 int64_t *addr)
360 {
361 struct kvm_one_reg cp0reg = {
362 .id = reg_id,
363 .addr = (uintptr_t)addr
364 };
365
366 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
367 }
368
369 static inline int kvm_mips_get_one_ureg64(CPUState *cs, uint64_t reg_id,
370 uint64_t *addr)
371 {
372 struct kvm_one_reg cp0reg = {
373 .id = reg_id,
374 .addr = (uintptr_t)addr
375 };
376
377 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
378 }
379
380 #define KVM_REG_MIPS_CP0_CONFIG_MASK (1U << CP0C0_M)
381 #define KVM_REG_MIPS_CP0_CONFIG1_MASK ((1U << CP0C1_M) | \
382 (1U << CP0C1_FP))
383 #define KVM_REG_MIPS_CP0_CONFIG2_MASK (1U << CP0C2_M)
384 #define KVM_REG_MIPS_CP0_CONFIG3_MASK ((1U << CP0C3_M) | \
385 (1U << CP0C3_MSAP))
386 #define KVM_REG_MIPS_CP0_CONFIG4_MASK (1U << CP0C4_M)
387 #define KVM_REG_MIPS_CP0_CONFIG5_MASK ((1U << CP0C5_MSAEn) | \
388 (1U << CP0C5_UFE) | \
389 (1U << CP0C5_FRE) | \
390 (1U << CP0C5_UFR))
391
392 static inline int kvm_mips_change_one_reg(CPUState *cs, uint64_t reg_id,
393 int32_t *addr, int32_t mask)
394 {
395 int err;
396 int32_t tmp, change;
397
398 err = kvm_mips_get_one_reg(cs, reg_id, &tmp);
399 if (err < 0) {
400 return err;
401 }
402
403 /* only change bits in mask */
404 change = (*addr ^ tmp) & mask;
405 if (!change) {
406 return 0;
407 }
408
409 tmp = tmp ^ change;
410 return kvm_mips_put_one_reg(cs, reg_id, &tmp);
411 }
412
413 /*
414 * We freeze the KVM timer when either the VM clock is stopped or the state is
415 * saved (the state is dirty).
416 */
417
418 /*
419 * Save the state of the KVM timer when VM clock is stopped or state is synced
420 * to QEMU.
421 */
422 static int kvm_mips_save_count(CPUState *cs)
423 {
424 MIPSCPU *cpu = MIPS_CPU(cs);
425 CPUMIPSState *env = &cpu->env;
426 uint64_t count_ctl;
427 int err, ret = 0;
428
429 /* freeze KVM timer */
430 err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
431 if (err < 0) {
432 DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err);
433 ret = err;
434 } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
435 count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
436 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
437 if (err < 0) {
438 DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
439 ret = err;
440 }
441 }
442
443 /* read CP0_Cause */
444 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause);
445 if (err < 0) {
446 DPRINTF("%s: Failed to get CP0_CAUSE (%d)\n", __func__, err);
447 ret = err;
448 }
449
450 /* read CP0_Count */
451 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count);
452 if (err < 0) {
453 DPRINTF("%s: Failed to get CP0_COUNT (%d)\n", __func__, err);
454 ret = err;
455 }
456
457 return ret;
458 }
459
460 /*
461 * Restore the state of the KVM timer when VM clock is restarted or state is
462 * synced to KVM.
463 */
464 static int kvm_mips_restore_count(CPUState *cs)
465 {
466 MIPSCPU *cpu = MIPS_CPU(cs);
467 CPUMIPSState *env = &cpu->env;
468 uint64_t count_ctl;
469 int err_dc, err, ret = 0;
470
471 /* check the timer is frozen */
472 err_dc = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
473 if (err_dc < 0) {
474 DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err_dc);
475 ret = err_dc;
476 } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
477 /* freeze timer (sets COUNT_RESUME for us) */
478 count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
479 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
480 if (err < 0) {
481 DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
482 ret = err;
483 }
484 }
485
486 /* load CP0_Cause */
487 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause);
488 if (err < 0) {
489 DPRINTF("%s: Failed to put CP0_CAUSE (%d)\n", __func__, err);
490 ret = err;
491 }
492
493 /* load CP0_Count */
494 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count);
495 if (err < 0) {
496 DPRINTF("%s: Failed to put CP0_COUNT (%d)\n", __func__, err);
497 ret = err;
498 }
499
500 /* resume KVM timer */
501 if (err_dc >= 0) {
502 count_ctl &= ~KVM_REG_MIPS_COUNT_CTL_DC;
503 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
504 if (err < 0) {
505 DPRINTF("%s: Failed to set COUNT_CTL.DC=0 (%d)\n", __func__, err);
506 ret = err;
507 }
508 }
509
510 return ret;
511 }
512
513 /*
514 * Handle the VM clock being started or stopped
515 */
516 static void kvm_mips_update_state(void *opaque, int running, RunState state)
517 {
518 CPUState *cs = opaque;
519 int ret;
520 uint64_t count_resume;
521
522 /*
523 * If state is already dirty (synced to QEMU) then the KVM timer state is
524 * already saved and can be restored when it is synced back to KVM.
525 */
526 if (!running) {
527 if (!cs->vcpu_dirty) {
528 ret = kvm_mips_save_count(cs);
529 if (ret < 0) {
530 warn_report("Failed saving count");
531 }
532 }
533 } else {
534 /* Set clock restore time to now */
535 count_resume = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
536 ret = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_RESUME,
537 &count_resume);
538 if (ret < 0) {
539 warn_report("Failed setting COUNT_RESUME");
540 return;
541 }
542
543 if (!cs->vcpu_dirty) {
544 ret = kvm_mips_restore_count(cs);
545 if (ret < 0) {
546 warn_report("Failed restoring count");
547 }
548 }
549 }
550 }
551
552 static int kvm_mips_put_fpu_registers(CPUState *cs, int level)
553 {
554 MIPSCPU *cpu = MIPS_CPU(cs);
555 CPUMIPSState *env = &cpu->env;
556 int err, ret = 0;
557 unsigned int i;
558
559 /* Only put FPU state if we're emulating a CPU with an FPU */
560 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
561 /* FPU Control Registers */
562 if (level == KVM_PUT_FULL_STATE) {
563 err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
564 &env->active_fpu.fcr0);
565 if (err < 0) {
566 DPRINTF("%s: Failed to put FCR_IR (%d)\n", __func__, err);
567 ret = err;
568 }
569 }
570 err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
571 &env->active_fpu.fcr31);
572 if (err < 0) {
573 DPRINTF("%s: Failed to put FCR_CSR (%d)\n", __func__, err);
574 ret = err;
575 }
576
577 /*
578 * FPU register state is a subset of MSA vector state, so don't put FPU
579 * registers if we're emulating a CPU with MSA.
580 */
581 if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) {
582 /* Floating point registers */
583 for (i = 0; i < 32; ++i) {
584 if (env->CP0_Status & (1 << CP0St_FR)) {
585 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
586 &env->active_fpu.fpr[i].d);
587 } else {
588 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
589 &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
590 }
591 if (err < 0) {
592 DPRINTF("%s: Failed to put FPR%u (%d)\n", __func__, i, err);
593 ret = err;
594 }
595 }
596 }
597 }
598
599 /* Only put MSA state if we're emulating a CPU with MSA */
600 if (env->CP0_Config3 & (1 << CP0C3_MSAP)) {
601 /* MSA Control Registers */
602 if (level == KVM_PUT_FULL_STATE) {
603 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_IR,
604 &env->msair);
605 if (err < 0) {
606 DPRINTF("%s: Failed to put MSA_IR (%d)\n", __func__, err);
607 ret = err;
608 }
609 }
610 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
611 &env->active_tc.msacsr);
612 if (err < 0) {
613 DPRINTF("%s: Failed to put MSA_CSR (%d)\n", __func__, err);
614 ret = err;
615 }
616
617 /* Vector registers (includes FP registers) */
618 for (i = 0; i < 32; ++i) {
619 /* Big endian MSA not supported by QEMU yet anyway */
620 err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
621 env->active_fpu.fpr[i].wr.d);
622 if (err < 0) {
623 DPRINTF("%s: Failed to put VEC%u (%d)\n", __func__, i, err);
624 ret = err;
625 }
626 }
627 }
628
629 return ret;
630 }
631
632 static int kvm_mips_get_fpu_registers(CPUState *cs)
633 {
634 MIPSCPU *cpu = MIPS_CPU(cs);
635 CPUMIPSState *env = &cpu->env;
636 int err, ret = 0;
637 unsigned int i;
638
639 /* Only get FPU state if we're emulating a CPU with an FPU */
640 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
641 /* FPU Control Registers */
642 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
643 &env->active_fpu.fcr0);
644 if (err < 0) {
645 DPRINTF("%s: Failed to get FCR_IR (%d)\n", __func__, err);
646 ret = err;
647 }
648 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
649 &env->active_fpu.fcr31);
650 if (err < 0) {
651 DPRINTF("%s: Failed to get FCR_CSR (%d)\n", __func__, err);
652 ret = err;
653 } else {
654 restore_fp_status(env);
655 }
656
657 /*
658 * FPU register state is a subset of MSA vector state, so don't save FPU
659 * registers if we're emulating a CPU with MSA.
660 */
661 if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) {
662 /* Floating point registers */
663 for (i = 0; i < 32; ++i) {
664 if (env->CP0_Status & (1 << CP0St_FR)) {
665 err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
666 &env->active_fpu.fpr[i].d);
667 } else {
668 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
669 &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
670 }
671 if (err < 0) {
672 DPRINTF("%s: Failed to get FPR%u (%d)\n", __func__, i, err);
673 ret = err;
674 }
675 }
676 }
677 }
678
679 /* Only get MSA state if we're emulating a CPU with MSA */
680 if (env->CP0_Config3 & (1 << CP0C3_MSAP)) {
681 /* MSA Control Registers */
682 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_IR,
683 &env->msair);
684 if (err < 0) {
685 DPRINTF("%s: Failed to get MSA_IR (%d)\n", __func__, err);
686 ret = err;
687 }
688 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
689 &env->active_tc.msacsr);
690 if (err < 0) {
691 DPRINTF("%s: Failed to get MSA_CSR (%d)\n", __func__, err);
692 ret = err;
693 } else {
694 restore_msa_fp_status(env);
695 }
696
697 /* Vector registers (includes FP registers) */
698 for (i = 0; i < 32; ++i) {
699 /* Big endian MSA not supported by QEMU yet anyway */
700 err = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
701 env->active_fpu.fpr[i].wr.d);
702 if (err < 0) {
703 DPRINTF("%s: Failed to get VEC%u (%d)\n", __func__, i, err);
704 ret = err;
705 }
706 }
707 }
708
709 return ret;
710 }
711
712
713 static int kvm_mips_put_cp0_registers(CPUState *cs, int level)
714 {
715 MIPSCPU *cpu = MIPS_CPU(cs);
716 CPUMIPSState *env = &cpu->env;
717 int err, ret = 0;
718
719 (void)level;
720
721 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index);
722 if (err < 0) {
723 DPRINTF("%s: Failed to put CP0_INDEX (%d)\n", __func__, err);
724 ret = err;
725 }
726 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT,
727 &env->CP0_Context);
728 if (err < 0) {
729 DPRINTF("%s: Failed to put CP0_CONTEXT (%d)\n", __func__, err);
730 ret = err;
731 }
732 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL,
733 &env->active_tc.CP0_UserLocal);
734 if (err < 0) {
735 DPRINTF("%s: Failed to put CP0_USERLOCAL (%d)\n", __func__, err);
736 ret = err;
737 }
738 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK,
739 &env->CP0_PageMask);
740 if (err < 0) {
741 DPRINTF("%s: Failed to put CP0_PAGEMASK (%d)\n", __func__, err);
742 ret = err;
743 }
744 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired);
745 if (err < 0) {
746 DPRINTF("%s: Failed to put CP0_WIRED (%d)\n", __func__, err);
747 ret = err;
748 }
749 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna);
750 if (err < 0) {
751 DPRINTF("%s: Failed to put CP0_HWRENA (%d)\n", __func__, err);
752 ret = err;
753 }
754 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR,
755 &env->CP0_BadVAddr);
756 if (err < 0) {
757 DPRINTF("%s: Failed to put CP0_BADVADDR (%d)\n", __func__, err);
758 ret = err;
759 }
760
761 /* If VM clock stopped then state will be restored when it is restarted */
762 if (runstate_is_running()) {
763 err = kvm_mips_restore_count(cs);
764 if (err < 0) {
765 ret = err;
766 }
767 }
768
769 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI,
770 &env->CP0_EntryHi);
771 if (err < 0) {
772 DPRINTF("%s: Failed to put CP0_ENTRYHI (%d)\n", __func__, err);
773 ret = err;
774 }
775 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE,
776 &env->CP0_Compare);
777 if (err < 0) {
778 DPRINTF("%s: Failed to put CP0_COMPARE (%d)\n", __func__, err);
779 ret = err;
780 }
781 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status);
782 if (err < 0) {
783 DPRINTF("%s: Failed to put CP0_STATUS (%d)\n", __func__, err);
784 ret = err;
785 }
786 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC);
787 if (err < 0) {
788 DPRINTF("%s: Failed to put CP0_EPC (%d)\n", __func__, err);
789 ret = err;
790 }
791 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
792 if (err < 0) {
793 DPRINTF("%s: Failed to put CP0_PRID (%d)\n", __func__, err);
794 ret = err;
795 }
796 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG,
797 &env->CP0_Config0,
798 KVM_REG_MIPS_CP0_CONFIG_MASK);
799 if (err < 0) {
800 DPRINTF("%s: Failed to change CP0_CONFIG (%d)\n", __func__, err);
801 ret = err;
802 }
803 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1,
804 &env->CP0_Config1,
805 KVM_REG_MIPS_CP0_CONFIG1_MASK);
806 if (err < 0) {
807 DPRINTF("%s: Failed to change CP0_CONFIG1 (%d)\n", __func__, err);
808 ret = err;
809 }
810 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2,
811 &env->CP0_Config2,
812 KVM_REG_MIPS_CP0_CONFIG2_MASK);
813 if (err < 0) {
814 DPRINTF("%s: Failed to change CP0_CONFIG2 (%d)\n", __func__, err);
815 ret = err;
816 }
817 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3,
818 &env->CP0_Config3,
819 KVM_REG_MIPS_CP0_CONFIG3_MASK);
820 if (err < 0) {
821 DPRINTF("%s: Failed to change CP0_CONFIG3 (%d)\n", __func__, err);
822 ret = err;
823 }
824 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4,
825 &env->CP0_Config4,
826 KVM_REG_MIPS_CP0_CONFIG4_MASK);
827 if (err < 0) {
828 DPRINTF("%s: Failed to change CP0_CONFIG4 (%d)\n", __func__, err);
829 ret = err;
830 }
831 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5,
832 &env->CP0_Config5,
833 KVM_REG_MIPS_CP0_CONFIG5_MASK);
834 if (err < 0) {
835 DPRINTF("%s: Failed to change CP0_CONFIG5 (%d)\n", __func__, err);
836 ret = err;
837 }
838 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
839 &env->CP0_ErrorEPC);
840 if (err < 0) {
841 DPRINTF("%s: Failed to put CP0_ERROREPC (%d)\n", __func__, err);
842 ret = err;
843 }
844
845 return ret;
846 }
847
848 static int kvm_mips_get_cp0_registers(CPUState *cs)
849 {
850 MIPSCPU *cpu = MIPS_CPU(cs);
851 CPUMIPSState *env = &cpu->env;
852 int err, ret = 0;
853
854 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index);
855 if (err < 0) {
856 DPRINTF("%s: Failed to get CP0_INDEX (%d)\n", __func__, err);
857 ret = err;
858 }
859 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT,
860 &env->CP0_Context);
861 if (err < 0) {
862 DPRINTF("%s: Failed to get CP0_CONTEXT (%d)\n", __func__, err);
863 ret = err;
864 }
865 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL,
866 &env->active_tc.CP0_UserLocal);
867 if (err < 0) {
868 DPRINTF("%s: Failed to get CP0_USERLOCAL (%d)\n", __func__, err);
869 ret = err;
870 }
871 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK,
872 &env->CP0_PageMask);
873 if (err < 0) {
874 DPRINTF("%s: Failed to get CP0_PAGEMASK (%d)\n", __func__, err);
875 ret = err;
876 }
877 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired);
878 if (err < 0) {
879 DPRINTF("%s: Failed to get CP0_WIRED (%d)\n", __func__, err);
880 ret = err;
881 }
882 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna);
883 if (err < 0) {
884 DPRINTF("%s: Failed to get CP0_HWRENA (%d)\n", __func__, err);
885 ret = err;
886 }
887 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR,
888 &env->CP0_BadVAddr);
889 if (err < 0) {
890 DPRINTF("%s: Failed to get CP0_BADVADDR (%d)\n", __func__, err);
891 ret = err;
892 }
893 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI,
894 &env->CP0_EntryHi);
895 if (err < 0) {
896 DPRINTF("%s: Failed to get CP0_ENTRYHI (%d)\n", __func__, err);
897 ret = err;
898 }
899 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE,
900 &env->CP0_Compare);
901 if (err < 0) {
902 DPRINTF("%s: Failed to get CP0_COMPARE (%d)\n", __func__, err);
903 ret = err;
904 }
905 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status);
906 if (err < 0) {
907 DPRINTF("%s: Failed to get CP0_STATUS (%d)\n", __func__, err);
908 ret = err;
909 }
910
911 /* If VM clock stopped then state was already saved when it was stopped */
912 if (runstate_is_running()) {
913 err = kvm_mips_save_count(cs);
914 if (err < 0) {
915 ret = err;
916 }
917 }
918
919 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC);
920 if (err < 0) {
921 DPRINTF("%s: Failed to get CP0_EPC (%d)\n", __func__, err);
922 ret = err;
923 }
924 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
925 if (err < 0) {
926 DPRINTF("%s: Failed to get CP0_PRID (%d)\n", __func__, err);
927 ret = err;
928 }
929 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, &env->CP0_Config0);
930 if (err < 0) {
931 DPRINTF("%s: Failed to get CP0_CONFIG (%d)\n", __func__, err);
932 ret = err;
933 }
934 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, &env->CP0_Config1);
935 if (err < 0) {
936 DPRINTF("%s: Failed to get CP0_CONFIG1 (%d)\n", __func__, err);
937 ret = err;
938 }
939 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, &env->CP0_Config2);
940 if (err < 0) {
941 DPRINTF("%s: Failed to get CP0_CONFIG2 (%d)\n", __func__, err);
942 ret = err;
943 }
944 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, &env->CP0_Config3);
945 if (err < 0) {
946 DPRINTF("%s: Failed to get CP0_CONFIG3 (%d)\n", __func__, err);
947 ret = err;
948 }
949 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, &env->CP0_Config4);
950 if (err < 0) {
951 DPRINTF("%s: Failed to get CP0_CONFIG4 (%d)\n", __func__, err);
952 ret = err;
953 }
954 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, &env->CP0_Config5);
955 if (err < 0) {
956 DPRINTF("%s: Failed to get CP0_CONFIG5 (%d)\n", __func__, err);
957 ret = err;
958 }
959 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
960 &env->CP0_ErrorEPC);
961 if (err < 0) {
962 DPRINTF("%s: Failed to get CP0_ERROREPC (%d)\n", __func__, err);
963 ret = err;
964 }
965
966 return ret;
967 }
968
969 int kvm_arch_put_registers(CPUState *cs, int level)
970 {
971 MIPSCPU *cpu = MIPS_CPU(cs);
972 CPUMIPSState *env = &cpu->env;
973 struct kvm_regs regs;
974 int ret;
975 int i;
976
977 /* Set the registers based on QEMU's view of things */
978 for (i = 0; i < 32; i++) {
979 regs.gpr[i] = (int64_t)(target_long)env->active_tc.gpr[i];
980 }
981
982 regs.hi = (int64_t)(target_long)env->active_tc.HI[0];
983 regs.lo = (int64_t)(target_long)env->active_tc.LO[0];
984 regs.pc = (int64_t)(target_long)env->active_tc.PC;
985
986 ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
987
988 if (ret < 0) {
989 return ret;
990 }
991
992 ret = kvm_mips_put_cp0_registers(cs, level);
993 if (ret < 0) {
994 return ret;
995 }
996
997 ret = kvm_mips_put_fpu_registers(cs, level);
998 if (ret < 0) {
999 return ret;
1000 }
1001
1002 return ret;
1003 }
1004
1005 int kvm_arch_get_registers(CPUState *cs)
1006 {
1007 MIPSCPU *cpu = MIPS_CPU(cs);
1008 CPUMIPSState *env = &cpu->env;
1009 int ret = 0;
1010 struct kvm_regs regs;
1011 int i;
1012
1013 /* Get the current register set as KVM seems it */
1014 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
1015
1016 if (ret < 0) {
1017 return ret;
1018 }
1019
1020 for (i = 0; i < 32; i++) {
1021 env->active_tc.gpr[i] = regs.gpr[i];
1022 }
1023
1024 env->active_tc.HI[0] = regs.hi;
1025 env->active_tc.LO[0] = regs.lo;
1026 env->active_tc.PC = regs.pc;
1027
1028 kvm_mips_get_cp0_registers(cs);
1029 kvm_mips_get_fpu_registers(cs);
1030
1031 return ret;
1032 }
1033
1034 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
1035 uint64_t address, uint32_t data, PCIDevice *dev)
1036 {
1037 return 0;
1038 }
1039
1040 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
1041 int vector, PCIDevice *dev)
1042 {
1043 return 0;
1044 }
1045
1046 int kvm_arch_release_virq_post(int virq)
1047 {
1048 return 0;
1049 }
1050
1051 int kvm_arch_msi_data_to_gsi(uint32_t data)
1052 {
1053 abort();
1054 }