]> git.proxmox.com Git - mirror_qemu.git/blob - target/mips/kvm.c
Remove qemu-common.h include from most units
[mirror_qemu.git] / target / mips / kvm.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: MIPS specific KVM APIs
7 *
8 * Copyright (C) 2012-2014 Imagination Technologies Ltd.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12 #include "qemu/osdep.h"
13 #include <sys/ioctl.h>
14
15 #include <linux/kvm.h>
16
17 #include "cpu.h"
18 #include "internal.h"
19 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
21 #include "sysemu/kvm.h"
22 #include "sysemu/kvm_int.h"
23 #include "sysemu/runstate.h"
24 #include "kvm_mips.h"
25 #include "hw/boards.h"
26 #include "fpu_helper.h"
27
28 #define DEBUG_KVM 0
29
30 #define DPRINTF(fmt, ...) \
31 do { if (DEBUG_KVM) { fprintf(stderr, fmt, ## __VA_ARGS__); } } while (0)
32
33 static int kvm_mips_fpu_cap;
34 static int kvm_mips_msa_cap;
35
36 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
37 KVM_CAP_LAST_INFO
38 };
39
40 static void kvm_mips_update_state(void *opaque, bool running, RunState state);
41
42 unsigned long kvm_arch_vcpu_id(CPUState *cs)
43 {
44 return cs->cpu_index;
45 }
46
47 int kvm_arch_init(MachineState *ms, KVMState *s)
48 {
49 /* MIPS has 128 signals */
50 kvm_set_sigmask_len(s, 16);
51
52 kvm_mips_fpu_cap = kvm_check_extension(s, KVM_CAP_MIPS_FPU);
53 kvm_mips_msa_cap = kvm_check_extension(s, KVM_CAP_MIPS_MSA);
54
55 DPRINTF("%s\n", __func__);
56 return 0;
57 }
58
59 int kvm_arch_irqchip_create(KVMState *s)
60 {
61 return 0;
62 }
63
64 int kvm_arch_init_vcpu(CPUState *cs)
65 {
66 MIPSCPU *cpu = MIPS_CPU(cs);
67 CPUMIPSState *env = &cpu->env;
68 int ret = 0;
69
70 qemu_add_vm_change_state_handler(kvm_mips_update_state, cs);
71
72 if (kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
73 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_FPU, 0, 0);
74 if (ret < 0) {
75 /* mark unsupported so it gets disabled on reset */
76 kvm_mips_fpu_cap = 0;
77 ret = 0;
78 }
79 }
80
81 if (kvm_mips_msa_cap && ase_msa_available(env)) {
82 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_MSA, 0, 0);
83 if (ret < 0) {
84 /* mark unsupported so it gets disabled on reset */
85 kvm_mips_msa_cap = 0;
86 ret = 0;
87 }
88 }
89
90 DPRINTF("%s\n", __func__);
91 return ret;
92 }
93
94 int kvm_arch_destroy_vcpu(CPUState *cs)
95 {
96 return 0;
97 }
98
99 void kvm_mips_reset_vcpu(MIPSCPU *cpu)
100 {
101 CPUMIPSState *env = &cpu->env;
102
103 if (!kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
104 warn_report("KVM does not support FPU, disabling");
105 env->CP0_Config1 &= ~(1 << CP0C1_FP);
106 }
107 if (!kvm_mips_msa_cap && ase_msa_available(env)) {
108 warn_report("KVM does not support MSA, disabling");
109 env->CP0_Config3 &= ~(1 << CP0C3_MSAP);
110 }
111
112 DPRINTF("%s\n", __func__);
113 }
114
115 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
116 {
117 DPRINTF("%s\n", __func__);
118 return 0;
119 }
120
121 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
122 {
123 DPRINTF("%s\n", __func__);
124 return 0;
125 }
126
127 static inline int cpu_mips_io_interrupts_pending(MIPSCPU *cpu)
128 {
129 CPUMIPSState *env = &cpu->env;
130
131 return env->CP0_Cause & (0x1 << (2 + CP0Ca_IP));
132 }
133
134
135 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
136 {
137 MIPSCPU *cpu = MIPS_CPU(cs);
138 int r;
139 struct kvm_mips_interrupt intr;
140
141 qemu_mutex_lock_iothread();
142
143 if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
144 cpu_mips_io_interrupts_pending(cpu)) {
145 intr.cpu = -1;
146 intr.irq = 2;
147 r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
148 if (r < 0) {
149 error_report("%s: cpu %d: failed to inject IRQ %x",
150 __func__, cs->cpu_index, intr.irq);
151 }
152 }
153
154 qemu_mutex_unlock_iothread();
155 }
156
157 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
158 {
159 return MEMTXATTRS_UNSPECIFIED;
160 }
161
162 int kvm_arch_process_async_events(CPUState *cs)
163 {
164 return cs->halted;
165 }
166
167 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
168 {
169 int ret;
170
171 DPRINTF("%s\n", __func__);
172 switch (run->exit_reason) {
173 default:
174 error_report("%s: unknown exit reason %d",
175 __func__, run->exit_reason);
176 ret = -1;
177 break;
178 }
179
180 return ret;
181 }
182
183 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
184 {
185 DPRINTF("%s\n", __func__);
186 return true;
187 }
188
189 void kvm_arch_init_irq_routing(KVMState *s)
190 {
191 }
192
193 int kvm_mips_set_interrupt(MIPSCPU *cpu, int irq, int level)
194 {
195 CPUState *cs = CPU(cpu);
196 struct kvm_mips_interrupt intr;
197
198 assert(kvm_enabled());
199
200 intr.cpu = -1;
201
202 if (level) {
203 intr.irq = irq;
204 } else {
205 intr.irq = -irq;
206 }
207
208 kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
209
210 return 0;
211 }
212
213 int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level)
214 {
215 CPUState *cs = current_cpu;
216 CPUState *dest_cs = CPU(cpu);
217 struct kvm_mips_interrupt intr;
218
219 assert(kvm_enabled());
220
221 intr.cpu = dest_cs->cpu_index;
222
223 if (level) {
224 intr.irq = irq;
225 } else {
226 intr.irq = -irq;
227 }
228
229 DPRINTF("%s: CPU %d, IRQ: %d\n", __func__, intr.cpu, intr.irq);
230
231 kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
232
233 return 0;
234 }
235
236 #define MIPS_CP0_32(_R, _S) \
237 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
238
239 #define MIPS_CP0_64(_R, _S) \
240 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
241
242 #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
243 #define KVM_REG_MIPS_CP0_RANDOM MIPS_CP0_32(1, 0)
244 #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
245 #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
246 #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
247 #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
248 #define KVM_REG_MIPS_CP0_PWBASE MIPS_CP0_64(5, 5)
249 #define KVM_REG_MIPS_CP0_PWFIELD MIPS_CP0_64(5, 6)
250 #define KVM_REG_MIPS_CP0_PWSIZE MIPS_CP0_64(5, 7)
251 #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
252 #define KVM_REG_MIPS_CP0_PWCTL MIPS_CP0_32(6, 6)
253 #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
254 #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
255 #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
256 #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
257 #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
258 #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
259 #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
260 #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
261 #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
262 #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
263 #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
264 #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
265 #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
266 #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
267 #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
268 #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
269 #define KVM_REG_MIPS_CP0_CONFIG6 MIPS_CP0_32(16, 6)
270 #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
271 #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
272 #define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
273 #define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
274 #define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4)
275 #define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5)
276 #define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6)
277 #define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
278
279 static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id,
280 int32_t *addr)
281 {
282 struct kvm_one_reg cp0reg = {
283 .id = reg_id,
284 .addr = (uintptr_t)addr
285 };
286
287 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
288 }
289
290 static inline int kvm_mips_put_one_ureg(CPUState *cs, uint64_t reg_id,
291 uint32_t *addr)
292 {
293 struct kvm_one_reg cp0reg = {
294 .id = reg_id,
295 .addr = (uintptr_t)addr
296 };
297
298 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
299 }
300
301 static inline int kvm_mips_put_one_ulreg(CPUState *cs, uint64_t reg_id,
302 target_ulong *addr)
303 {
304 uint64_t val64 = *addr;
305 struct kvm_one_reg cp0reg = {
306 .id = reg_id,
307 .addr = (uintptr_t)&val64
308 };
309
310 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
311 }
312
313 static inline int kvm_mips_put_one_reg64(CPUState *cs, uint64_t reg_id,
314 int64_t *addr)
315 {
316 struct kvm_one_reg cp0reg = {
317 .id = reg_id,
318 .addr = (uintptr_t)addr
319 };
320
321 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
322 }
323
324 static inline int kvm_mips_put_one_ureg64(CPUState *cs, uint64_t reg_id,
325 uint64_t *addr)
326 {
327 struct kvm_one_reg cp0reg = {
328 .id = reg_id,
329 .addr = (uintptr_t)addr
330 };
331
332 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
333 }
334
335 static inline int kvm_mips_get_one_reg(CPUState *cs, uint64_t reg_id,
336 int32_t *addr)
337 {
338 struct kvm_one_reg cp0reg = {
339 .id = reg_id,
340 .addr = (uintptr_t)addr
341 };
342
343 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
344 }
345
346 static inline int kvm_mips_get_one_ureg(CPUState *cs, uint64_t reg_id,
347 uint32_t *addr)
348 {
349 struct kvm_one_reg cp0reg = {
350 .id = reg_id,
351 .addr = (uintptr_t)addr
352 };
353
354 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
355 }
356
357 static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64_t reg_id,
358 target_ulong *addr)
359 {
360 int ret;
361 uint64_t val64 = 0;
362 struct kvm_one_reg cp0reg = {
363 .id = reg_id,
364 .addr = (uintptr_t)&val64
365 };
366
367 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
368 if (ret >= 0) {
369 *addr = val64;
370 }
371 return ret;
372 }
373
374 static inline int kvm_mips_get_one_reg64(CPUState *cs, uint64_t reg_id,
375 int64_t *addr)
376 {
377 struct kvm_one_reg cp0reg = {
378 .id = reg_id,
379 .addr = (uintptr_t)addr
380 };
381
382 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
383 }
384
385 static inline int kvm_mips_get_one_ureg64(CPUState *cs, uint64_t reg_id,
386 uint64_t *addr)
387 {
388 struct kvm_one_reg cp0reg = {
389 .id = reg_id,
390 .addr = (uintptr_t)addr
391 };
392
393 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
394 }
395
396 #define KVM_REG_MIPS_CP0_CONFIG_MASK (1U << CP0C0_M)
397 #define KVM_REG_MIPS_CP0_CONFIG1_MASK ((1U << CP0C1_M) | \
398 (1U << CP0C1_FP))
399 #define KVM_REG_MIPS_CP0_CONFIG2_MASK (1U << CP0C2_M)
400 #define KVM_REG_MIPS_CP0_CONFIG3_MASK ((1U << CP0C3_M) | \
401 (1U << CP0C3_MSAP))
402 #define KVM_REG_MIPS_CP0_CONFIG4_MASK (1U << CP0C4_M)
403 #define KVM_REG_MIPS_CP0_CONFIG5_MASK ((1U << CP0C5_MSAEn) | \
404 (1U << CP0C5_UFE) | \
405 (1U << CP0C5_FRE) | \
406 (1U << CP0C5_UFR))
407 #define KVM_REG_MIPS_CP0_CONFIG6_MASK ((1U << CP0C6_BPPASS) | \
408 (0x3fU << CP0C6_KPOS) | \
409 (1U << CP0C6_KE) | \
410 (1U << CP0C6_VTLBONLY) | \
411 (1U << CP0C6_LASX) | \
412 (1U << CP0C6_SSEN) | \
413 (1U << CP0C6_DISDRTIME) | \
414 (1U << CP0C6_PIXNUEN) | \
415 (1U << CP0C6_SCRAND) | \
416 (1U << CP0C6_LLEXCEN) | \
417 (1U << CP0C6_DISVC) | \
418 (1U << CP0C6_VCLRU) | \
419 (1U << CP0C6_DCLRU) | \
420 (1U << CP0C6_PIXUEN) | \
421 (1U << CP0C6_DISBLKLYEN) | \
422 (1U << CP0C6_UMEMUALEN) | \
423 (1U << CP0C6_SFBEN) | \
424 (1U << CP0C6_FLTINT) | \
425 (1U << CP0C6_VLTINT) | \
426 (1U << CP0C6_DISBTB) | \
427 (3U << CP0C6_STPREFCTL) | \
428 (1U << CP0C6_INSTPREF) | \
429 (1U << CP0C6_DATAPREF))
430
431 static inline int kvm_mips_change_one_reg(CPUState *cs, uint64_t reg_id,
432 int32_t *addr, int32_t mask)
433 {
434 int err;
435 int32_t tmp, change;
436
437 err = kvm_mips_get_one_reg(cs, reg_id, &tmp);
438 if (err < 0) {
439 return err;
440 }
441
442 /* only change bits in mask */
443 change = (*addr ^ tmp) & mask;
444 if (!change) {
445 return 0;
446 }
447
448 tmp = tmp ^ change;
449 return kvm_mips_put_one_reg(cs, reg_id, &tmp);
450 }
451
452 /*
453 * We freeze the KVM timer when either the VM clock is stopped or the state is
454 * saved (the state is dirty).
455 */
456
457 /*
458 * Save the state of the KVM timer when VM clock is stopped or state is synced
459 * to QEMU.
460 */
461 static int kvm_mips_save_count(CPUState *cs)
462 {
463 MIPSCPU *cpu = MIPS_CPU(cs);
464 CPUMIPSState *env = &cpu->env;
465 uint64_t count_ctl;
466 int err, ret = 0;
467
468 /* freeze KVM timer */
469 err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
470 if (err < 0) {
471 DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err);
472 ret = err;
473 } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
474 count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
475 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
476 if (err < 0) {
477 DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
478 ret = err;
479 }
480 }
481
482 /* read CP0_Cause */
483 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause);
484 if (err < 0) {
485 DPRINTF("%s: Failed to get CP0_CAUSE (%d)\n", __func__, err);
486 ret = err;
487 }
488
489 /* read CP0_Count */
490 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count);
491 if (err < 0) {
492 DPRINTF("%s: Failed to get CP0_COUNT (%d)\n", __func__, err);
493 ret = err;
494 }
495
496 return ret;
497 }
498
499 /*
500 * Restore the state of the KVM timer when VM clock is restarted or state is
501 * synced to KVM.
502 */
503 static int kvm_mips_restore_count(CPUState *cs)
504 {
505 MIPSCPU *cpu = MIPS_CPU(cs);
506 CPUMIPSState *env = &cpu->env;
507 uint64_t count_ctl;
508 int err_dc, err, ret = 0;
509
510 /* check the timer is frozen */
511 err_dc = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
512 if (err_dc < 0) {
513 DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err_dc);
514 ret = err_dc;
515 } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
516 /* freeze timer (sets COUNT_RESUME for us) */
517 count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
518 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
519 if (err < 0) {
520 DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
521 ret = err;
522 }
523 }
524
525 /* load CP0_Cause */
526 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause);
527 if (err < 0) {
528 DPRINTF("%s: Failed to put CP0_CAUSE (%d)\n", __func__, err);
529 ret = err;
530 }
531
532 /* load CP0_Count */
533 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count);
534 if (err < 0) {
535 DPRINTF("%s: Failed to put CP0_COUNT (%d)\n", __func__, err);
536 ret = err;
537 }
538
539 /* resume KVM timer */
540 if (err_dc >= 0) {
541 count_ctl &= ~KVM_REG_MIPS_COUNT_CTL_DC;
542 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
543 if (err < 0) {
544 DPRINTF("%s: Failed to set COUNT_CTL.DC=0 (%d)\n", __func__, err);
545 ret = err;
546 }
547 }
548
549 return ret;
550 }
551
552 /*
553 * Handle the VM clock being started or stopped
554 */
555 static void kvm_mips_update_state(void *opaque, bool running, RunState state)
556 {
557 CPUState *cs = opaque;
558 int ret;
559 uint64_t count_resume;
560
561 /*
562 * If state is already dirty (synced to QEMU) then the KVM timer state is
563 * already saved and can be restored when it is synced back to KVM.
564 */
565 if (!running) {
566 if (!cs->vcpu_dirty) {
567 ret = kvm_mips_save_count(cs);
568 if (ret < 0) {
569 warn_report("Failed saving count");
570 }
571 }
572 } else {
573 /* Set clock restore time to now */
574 count_resume = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
575 ret = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_RESUME,
576 &count_resume);
577 if (ret < 0) {
578 warn_report("Failed setting COUNT_RESUME");
579 return;
580 }
581
582 if (!cs->vcpu_dirty) {
583 ret = kvm_mips_restore_count(cs);
584 if (ret < 0) {
585 warn_report("Failed restoring count");
586 }
587 }
588 }
589 }
590
591 static int kvm_mips_put_fpu_registers(CPUState *cs, int level)
592 {
593 MIPSCPU *cpu = MIPS_CPU(cs);
594 CPUMIPSState *env = &cpu->env;
595 int err, ret = 0;
596 unsigned int i;
597
598 /* Only put FPU state if we're emulating a CPU with an FPU */
599 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
600 /* FPU Control Registers */
601 if (level == KVM_PUT_FULL_STATE) {
602 err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
603 &env->active_fpu.fcr0);
604 if (err < 0) {
605 DPRINTF("%s: Failed to put FCR_IR (%d)\n", __func__, err);
606 ret = err;
607 }
608 }
609 err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
610 &env->active_fpu.fcr31);
611 if (err < 0) {
612 DPRINTF("%s: Failed to put FCR_CSR (%d)\n", __func__, err);
613 ret = err;
614 }
615
616 /*
617 * FPU register state is a subset of MSA vector state, so don't put FPU
618 * registers if we're emulating a CPU with MSA.
619 */
620 if (!ase_msa_available(env)) {
621 /* Floating point registers */
622 for (i = 0; i < 32; ++i) {
623 if (env->CP0_Status & (1 << CP0St_FR)) {
624 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
625 &env->active_fpu.fpr[i].d);
626 } else {
627 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
628 &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
629 }
630 if (err < 0) {
631 DPRINTF("%s: Failed to put FPR%u (%d)\n", __func__, i, err);
632 ret = err;
633 }
634 }
635 }
636 }
637
638 /* Only put MSA state if we're emulating a CPU with MSA */
639 if (ase_msa_available(env)) {
640 /* MSA Control Registers */
641 if (level == KVM_PUT_FULL_STATE) {
642 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_IR,
643 &env->msair);
644 if (err < 0) {
645 DPRINTF("%s: Failed to put MSA_IR (%d)\n", __func__, err);
646 ret = err;
647 }
648 }
649 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
650 &env->active_tc.msacsr);
651 if (err < 0) {
652 DPRINTF("%s: Failed to put MSA_CSR (%d)\n", __func__, err);
653 ret = err;
654 }
655
656 /* Vector registers (includes FP registers) */
657 for (i = 0; i < 32; ++i) {
658 /* Big endian MSA not supported by QEMU yet anyway */
659 err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
660 env->active_fpu.fpr[i].wr.d);
661 if (err < 0) {
662 DPRINTF("%s: Failed to put VEC%u (%d)\n", __func__, i, err);
663 ret = err;
664 }
665 }
666 }
667
668 return ret;
669 }
670
671 static int kvm_mips_get_fpu_registers(CPUState *cs)
672 {
673 MIPSCPU *cpu = MIPS_CPU(cs);
674 CPUMIPSState *env = &cpu->env;
675 int err, ret = 0;
676 unsigned int i;
677
678 /* Only get FPU state if we're emulating a CPU with an FPU */
679 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
680 /* FPU Control Registers */
681 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
682 &env->active_fpu.fcr0);
683 if (err < 0) {
684 DPRINTF("%s: Failed to get FCR_IR (%d)\n", __func__, err);
685 ret = err;
686 }
687 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
688 &env->active_fpu.fcr31);
689 if (err < 0) {
690 DPRINTF("%s: Failed to get FCR_CSR (%d)\n", __func__, err);
691 ret = err;
692 } else {
693 restore_fp_status(env);
694 }
695
696 /*
697 * FPU register state is a subset of MSA vector state, so don't save FPU
698 * registers if we're emulating a CPU with MSA.
699 */
700 if (!ase_msa_available(env)) {
701 /* Floating point registers */
702 for (i = 0; i < 32; ++i) {
703 if (env->CP0_Status & (1 << CP0St_FR)) {
704 err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
705 &env->active_fpu.fpr[i].d);
706 } else {
707 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
708 &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
709 }
710 if (err < 0) {
711 DPRINTF("%s: Failed to get FPR%u (%d)\n", __func__, i, err);
712 ret = err;
713 }
714 }
715 }
716 }
717
718 /* Only get MSA state if we're emulating a CPU with MSA */
719 if (ase_msa_available(env)) {
720 /* MSA Control Registers */
721 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_IR,
722 &env->msair);
723 if (err < 0) {
724 DPRINTF("%s: Failed to get MSA_IR (%d)\n", __func__, err);
725 ret = err;
726 }
727 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
728 &env->active_tc.msacsr);
729 if (err < 0) {
730 DPRINTF("%s: Failed to get MSA_CSR (%d)\n", __func__, err);
731 ret = err;
732 } else {
733 restore_msa_fp_status(env);
734 }
735
736 /* Vector registers (includes FP registers) */
737 for (i = 0; i < 32; ++i) {
738 /* Big endian MSA not supported by QEMU yet anyway */
739 err = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
740 env->active_fpu.fpr[i].wr.d);
741 if (err < 0) {
742 DPRINTF("%s: Failed to get VEC%u (%d)\n", __func__, i, err);
743 ret = err;
744 }
745 }
746 }
747
748 return ret;
749 }
750
751
752 static int kvm_mips_put_cp0_registers(CPUState *cs, int level)
753 {
754 MIPSCPU *cpu = MIPS_CPU(cs);
755 CPUMIPSState *env = &cpu->env;
756 int err, ret = 0;
757
758 (void)level;
759
760 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index);
761 if (err < 0) {
762 DPRINTF("%s: Failed to put CP0_INDEX (%d)\n", __func__, err);
763 ret = err;
764 }
765 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_RANDOM, &env->CP0_Random);
766 if (err < 0) {
767 DPRINTF("%s: Failed to put CP0_RANDOM (%d)\n", __func__, err);
768 ret = err;
769 }
770 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT,
771 &env->CP0_Context);
772 if (err < 0) {
773 DPRINTF("%s: Failed to put CP0_CONTEXT (%d)\n", __func__, err);
774 ret = err;
775 }
776 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL,
777 &env->active_tc.CP0_UserLocal);
778 if (err < 0) {
779 DPRINTF("%s: Failed to put CP0_USERLOCAL (%d)\n", __func__, err);
780 ret = err;
781 }
782 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK,
783 &env->CP0_PageMask);
784 if (err < 0) {
785 DPRINTF("%s: Failed to put CP0_PAGEMASK (%d)\n", __func__, err);
786 ret = err;
787 }
788 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEGRAIN,
789 &env->CP0_PageGrain);
790 if (err < 0) {
791 DPRINTF("%s: Failed to put CP0_PAGEGRAIN (%d)\n", __func__, err);
792 ret = err;
793 }
794 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWBASE,
795 &env->CP0_PWBase);
796 if (err < 0) {
797 DPRINTF("%s: Failed to put CP0_PWBASE (%d)\n", __func__, err);
798 ret = err;
799 }
800 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWFIELD,
801 &env->CP0_PWField);
802 if (err < 0) {
803 DPRINTF("%s: Failed to put CP0_PWField (%d)\n", __func__, err);
804 ret = err;
805 }
806 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWSIZE,
807 &env->CP0_PWSize);
808 if (err < 0) {
809 DPRINTF("%s: Failed to put CP0_PWSIZE (%d)\n", __func__, err);
810 ret = err;
811 }
812 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired);
813 if (err < 0) {
814 DPRINTF("%s: Failed to put CP0_WIRED (%d)\n", __func__, err);
815 ret = err;
816 }
817 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PWCTL, &env->CP0_PWCtl);
818 if (err < 0) {
819 DPRINTF("%s: Failed to put CP0_PWCTL (%d)\n", __func__, err);
820 ret = err;
821 }
822 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna);
823 if (err < 0) {
824 DPRINTF("%s: Failed to put CP0_HWRENA (%d)\n", __func__, err);
825 ret = err;
826 }
827 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR,
828 &env->CP0_BadVAddr);
829 if (err < 0) {
830 DPRINTF("%s: Failed to put CP0_BADVADDR (%d)\n", __func__, err);
831 ret = err;
832 }
833
834 /* If VM clock stopped then state will be restored when it is restarted */
835 if (runstate_is_running()) {
836 err = kvm_mips_restore_count(cs);
837 if (err < 0) {
838 ret = err;
839 }
840 }
841
842 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI,
843 &env->CP0_EntryHi);
844 if (err < 0) {
845 DPRINTF("%s: Failed to put CP0_ENTRYHI (%d)\n", __func__, err);
846 ret = err;
847 }
848 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE,
849 &env->CP0_Compare);
850 if (err < 0) {
851 DPRINTF("%s: Failed to put CP0_COMPARE (%d)\n", __func__, err);
852 ret = err;
853 }
854 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status);
855 if (err < 0) {
856 DPRINTF("%s: Failed to put CP0_STATUS (%d)\n", __func__, err);
857 ret = err;
858 }
859 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC);
860 if (err < 0) {
861 DPRINTF("%s: Failed to put CP0_EPC (%d)\n", __func__, err);
862 ret = err;
863 }
864 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
865 if (err < 0) {
866 DPRINTF("%s: Failed to put CP0_PRID (%d)\n", __func__, err);
867 ret = err;
868 }
869 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EBASE, &env->CP0_EBase);
870 if (err < 0) {
871 DPRINTF("%s: Failed to put CP0_EBASE (%d)\n", __func__, err);
872 ret = err;
873 }
874 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG,
875 &env->CP0_Config0,
876 KVM_REG_MIPS_CP0_CONFIG_MASK);
877 if (err < 0) {
878 DPRINTF("%s: Failed to change CP0_CONFIG (%d)\n", __func__, err);
879 ret = err;
880 }
881 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1,
882 &env->CP0_Config1,
883 KVM_REG_MIPS_CP0_CONFIG1_MASK);
884 if (err < 0) {
885 DPRINTF("%s: Failed to change CP0_CONFIG1 (%d)\n", __func__, err);
886 ret = err;
887 }
888 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2,
889 &env->CP0_Config2,
890 KVM_REG_MIPS_CP0_CONFIG2_MASK);
891 if (err < 0) {
892 DPRINTF("%s: Failed to change CP0_CONFIG2 (%d)\n", __func__, err);
893 ret = err;
894 }
895 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3,
896 &env->CP0_Config3,
897 KVM_REG_MIPS_CP0_CONFIG3_MASK);
898 if (err < 0) {
899 DPRINTF("%s: Failed to change CP0_CONFIG3 (%d)\n", __func__, err);
900 ret = err;
901 }
902 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4,
903 &env->CP0_Config4,
904 KVM_REG_MIPS_CP0_CONFIG4_MASK);
905 if (err < 0) {
906 DPRINTF("%s: Failed to change CP0_CONFIG4 (%d)\n", __func__, err);
907 ret = err;
908 }
909 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5,
910 &env->CP0_Config5,
911 KVM_REG_MIPS_CP0_CONFIG5_MASK);
912 if (err < 0) {
913 DPRINTF("%s: Failed to change CP0_CONFIG5 (%d)\n", __func__, err);
914 ret = err;
915 }
916 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG6,
917 &env->CP0_Config6,
918 KVM_REG_MIPS_CP0_CONFIG6_MASK);
919 if (err < 0) {
920 DPRINTF("%s: Failed to change CP0_CONFIG6 (%d)\n", __func__, err);
921 ret = err;
922 }
923 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_XCONTEXT,
924 &env->CP0_XContext);
925 if (err < 0) {
926 DPRINTF("%s: Failed to put CP0_XCONTEXT (%d)\n", __func__, err);
927 ret = err;
928 }
929 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
930 &env->CP0_ErrorEPC);
931 if (err < 0) {
932 DPRINTF("%s: Failed to put CP0_ERROREPC (%d)\n", __func__, err);
933 ret = err;
934 }
935 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH1,
936 &env->CP0_KScratch[0]);
937 if (err < 0) {
938 DPRINTF("%s: Failed to put CP0_KSCRATCH1 (%d)\n", __func__, err);
939 ret = err;
940 }
941 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH2,
942 &env->CP0_KScratch[1]);
943 if (err < 0) {
944 DPRINTF("%s: Failed to put CP0_KSCRATCH2 (%d)\n", __func__, err);
945 ret = err;
946 }
947 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH3,
948 &env->CP0_KScratch[2]);
949 if (err < 0) {
950 DPRINTF("%s: Failed to put CP0_KSCRATCH3 (%d)\n", __func__, err);
951 ret = err;
952 }
953 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH4,
954 &env->CP0_KScratch[3]);
955 if (err < 0) {
956 DPRINTF("%s: Failed to put CP0_KSCRATCH4 (%d)\n", __func__, err);
957 ret = err;
958 }
959 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH5,
960 &env->CP0_KScratch[4]);
961 if (err < 0) {
962 DPRINTF("%s: Failed to put CP0_KSCRATCH5 (%d)\n", __func__, err);
963 ret = err;
964 }
965 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH6,
966 &env->CP0_KScratch[5]);
967 if (err < 0) {
968 DPRINTF("%s: Failed to put CP0_KSCRATCH6 (%d)\n", __func__, err);
969 ret = err;
970 }
971
972 return ret;
973 }
974
975 static int kvm_mips_get_cp0_registers(CPUState *cs)
976 {
977 MIPSCPU *cpu = MIPS_CPU(cs);
978 CPUMIPSState *env = &cpu->env;
979 int err, ret = 0;
980
981 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index);
982 if (err < 0) {
983 DPRINTF("%s: Failed to get CP0_INDEX (%d)\n", __func__, err);
984 ret = err;
985 }
986 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_RANDOM, &env->CP0_Random);
987 if (err < 0) {
988 DPRINTF("%s: Failed to get CP0_RANDOM (%d)\n", __func__, err);
989 ret = err;
990 }
991 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT,
992 &env->CP0_Context);
993 if (err < 0) {
994 DPRINTF("%s: Failed to get CP0_CONTEXT (%d)\n", __func__, err);
995 ret = err;
996 }
997 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL,
998 &env->active_tc.CP0_UserLocal);
999 if (err < 0) {
1000 DPRINTF("%s: Failed to get CP0_USERLOCAL (%d)\n", __func__, err);
1001 ret = err;
1002 }
1003 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK,
1004 &env->CP0_PageMask);
1005 if (err < 0) {
1006 DPRINTF("%s: Failed to get CP0_PAGEMASK (%d)\n", __func__, err);
1007 ret = err;
1008 }
1009 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEGRAIN,
1010 &env->CP0_PageGrain);
1011 if (err < 0) {
1012 DPRINTF("%s: Failed to get CP0_PAGEGRAIN (%d)\n", __func__, err);
1013 ret = err;
1014 }
1015 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWBASE,
1016 &env->CP0_PWBase);
1017 if (err < 0) {
1018 DPRINTF("%s: Failed to get CP0_PWBASE (%d)\n", __func__, err);
1019 ret = err;
1020 }
1021 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWFIELD,
1022 &env->CP0_PWField);
1023 if (err < 0) {
1024 DPRINTF("%s: Failed to get CP0_PWFIELD (%d)\n", __func__, err);
1025 ret = err;
1026 }
1027 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWSIZE,
1028 &env->CP0_PWSize);
1029 if (err < 0) {
1030 DPRINTF("%s: Failed to get CP0_PWSIZE (%d)\n", __func__, err);
1031 ret = err;
1032 }
1033 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired);
1034 if (err < 0) {
1035 DPRINTF("%s: Failed to get CP0_WIRED (%d)\n", __func__, err);
1036 ret = err;
1037 }
1038 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PWCTL, &env->CP0_PWCtl);
1039 if (err < 0) {
1040 DPRINTF("%s: Failed to get CP0_PWCtl (%d)\n", __func__, err);
1041 ret = err;
1042 }
1043 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna);
1044 if (err < 0) {
1045 DPRINTF("%s: Failed to get CP0_HWRENA (%d)\n", __func__, err);
1046 ret = err;
1047 }
1048 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR,
1049 &env->CP0_BadVAddr);
1050 if (err < 0) {
1051 DPRINTF("%s: Failed to get CP0_BADVADDR (%d)\n", __func__, err);
1052 ret = err;
1053 }
1054 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI,
1055 &env->CP0_EntryHi);
1056 if (err < 0) {
1057 DPRINTF("%s: Failed to get CP0_ENTRYHI (%d)\n", __func__, err);
1058 ret = err;
1059 }
1060 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE,
1061 &env->CP0_Compare);
1062 if (err < 0) {
1063 DPRINTF("%s: Failed to get CP0_COMPARE (%d)\n", __func__, err);
1064 ret = err;
1065 }
1066 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status);
1067 if (err < 0) {
1068 DPRINTF("%s: Failed to get CP0_STATUS (%d)\n", __func__, err);
1069 ret = err;
1070 }
1071
1072 /* If VM clock stopped then state was already saved when it was stopped */
1073 if (runstate_is_running()) {
1074 err = kvm_mips_save_count(cs);
1075 if (err < 0) {
1076 ret = err;
1077 }
1078 }
1079
1080 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC);
1081 if (err < 0) {
1082 DPRINTF("%s: Failed to get CP0_EPC (%d)\n", __func__, err);
1083 ret = err;
1084 }
1085 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
1086 if (err < 0) {
1087 DPRINTF("%s: Failed to get CP0_PRID (%d)\n", __func__, err);
1088 ret = err;
1089 }
1090 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EBASE, &env->CP0_EBase);
1091 if (err < 0) {
1092 DPRINTF("%s: Failed to get CP0_EBASE (%d)\n", __func__, err);
1093 ret = err;
1094 }
1095 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, &env->CP0_Config0);
1096 if (err < 0) {
1097 DPRINTF("%s: Failed to get CP0_CONFIG (%d)\n", __func__, err);
1098 ret = err;
1099 }
1100 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, &env->CP0_Config1);
1101 if (err < 0) {
1102 DPRINTF("%s: Failed to get CP0_CONFIG1 (%d)\n", __func__, err);
1103 ret = err;
1104 }
1105 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, &env->CP0_Config2);
1106 if (err < 0) {
1107 DPRINTF("%s: Failed to get CP0_CONFIG2 (%d)\n", __func__, err);
1108 ret = err;
1109 }
1110 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, &env->CP0_Config3);
1111 if (err < 0) {
1112 DPRINTF("%s: Failed to get CP0_CONFIG3 (%d)\n", __func__, err);
1113 ret = err;
1114 }
1115 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, &env->CP0_Config4);
1116 if (err < 0) {
1117 DPRINTF("%s: Failed to get CP0_CONFIG4 (%d)\n", __func__, err);
1118 ret = err;
1119 }
1120 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, &env->CP0_Config5);
1121 if (err < 0) {
1122 DPRINTF("%s: Failed to get CP0_CONFIG5 (%d)\n", __func__, err);
1123 ret = err;
1124 }
1125 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG6, &env->CP0_Config6);
1126 if (err < 0) {
1127 DPRINTF("%s: Failed to get CP0_CONFIG6 (%d)\n", __func__, err);
1128 ret = err;
1129 }
1130 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_XCONTEXT,
1131 &env->CP0_XContext);
1132 if (err < 0) {
1133 DPRINTF("%s: Failed to get CP0_XCONTEXT (%d)\n", __func__, err);
1134 ret = err;
1135 }
1136 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
1137 &env->CP0_ErrorEPC);
1138 if (err < 0) {
1139 DPRINTF("%s: Failed to get CP0_ERROREPC (%d)\n", __func__, err);
1140 ret = err;
1141 }
1142 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH1,
1143 &env->CP0_KScratch[0]);
1144 if (err < 0) {
1145 DPRINTF("%s: Failed to get CP0_KSCRATCH1 (%d)\n", __func__, err);
1146 ret = err;
1147 }
1148 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH2,
1149 &env->CP0_KScratch[1]);
1150 if (err < 0) {
1151 DPRINTF("%s: Failed to get CP0_KSCRATCH2 (%d)\n", __func__, err);
1152 ret = err;
1153 }
1154 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH3,
1155 &env->CP0_KScratch[2]);
1156 if (err < 0) {
1157 DPRINTF("%s: Failed to get CP0_KSCRATCH3 (%d)\n", __func__, err);
1158 ret = err;
1159 }
1160 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH4,
1161 &env->CP0_KScratch[3]);
1162 if (err < 0) {
1163 DPRINTF("%s: Failed to get CP0_KSCRATCH4 (%d)\n", __func__, err);
1164 ret = err;
1165 }
1166 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH5,
1167 &env->CP0_KScratch[4]);
1168 if (err < 0) {
1169 DPRINTF("%s: Failed to get CP0_KSCRATCH5 (%d)\n", __func__, err);
1170 ret = err;
1171 }
1172 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH6,
1173 &env->CP0_KScratch[5]);
1174 if (err < 0) {
1175 DPRINTF("%s: Failed to get CP0_KSCRATCH6 (%d)\n", __func__, err);
1176 ret = err;
1177 }
1178
1179 return ret;
1180 }
1181
1182 int kvm_arch_put_registers(CPUState *cs, int level)
1183 {
1184 MIPSCPU *cpu = MIPS_CPU(cs);
1185 CPUMIPSState *env = &cpu->env;
1186 struct kvm_regs regs;
1187 int ret;
1188 int i;
1189
1190 /* Set the registers based on QEMU's view of things */
1191 for (i = 0; i < 32; i++) {
1192 regs.gpr[i] = (int64_t)(target_long)env->active_tc.gpr[i];
1193 }
1194
1195 regs.hi = (int64_t)(target_long)env->active_tc.HI[0];
1196 regs.lo = (int64_t)(target_long)env->active_tc.LO[0];
1197 regs.pc = (int64_t)(target_long)env->active_tc.PC;
1198
1199 ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
1200
1201 if (ret < 0) {
1202 return ret;
1203 }
1204
1205 ret = kvm_mips_put_cp0_registers(cs, level);
1206 if (ret < 0) {
1207 return ret;
1208 }
1209
1210 ret = kvm_mips_put_fpu_registers(cs, level);
1211 if (ret < 0) {
1212 return ret;
1213 }
1214
1215 return ret;
1216 }
1217
1218 int kvm_arch_get_registers(CPUState *cs)
1219 {
1220 MIPSCPU *cpu = MIPS_CPU(cs);
1221 CPUMIPSState *env = &cpu->env;
1222 int ret = 0;
1223 struct kvm_regs regs;
1224 int i;
1225
1226 /* Get the current register set as KVM seems it */
1227 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
1228
1229 if (ret < 0) {
1230 return ret;
1231 }
1232
1233 for (i = 0; i < 32; i++) {
1234 env->active_tc.gpr[i] = regs.gpr[i];
1235 }
1236
1237 env->active_tc.HI[0] = regs.hi;
1238 env->active_tc.LO[0] = regs.lo;
1239 env->active_tc.PC = regs.pc;
1240
1241 kvm_mips_get_cp0_registers(cs);
1242 kvm_mips_get_fpu_registers(cs);
1243
1244 return ret;
1245 }
1246
1247 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
1248 uint64_t address, uint32_t data, PCIDevice *dev)
1249 {
1250 return 0;
1251 }
1252
1253 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
1254 int vector, PCIDevice *dev)
1255 {
1256 return 0;
1257 }
1258
1259 int kvm_arch_release_virq_post(int virq)
1260 {
1261 return 0;
1262 }
1263
1264 int kvm_arch_msi_data_to_gsi(uint32_t data)
1265 {
1266 abort();
1267 }
1268
1269 int mips_kvm_type(MachineState *machine, const char *vm_type)
1270 {
1271 #if defined(KVM_CAP_MIPS_VZ) || defined(KVM_CAP_MIPS_TE)
1272 int r;
1273 KVMState *s = KVM_STATE(machine->accelerator);
1274 #endif
1275
1276 #if defined(KVM_CAP_MIPS_VZ)
1277 r = kvm_check_extension(s, KVM_CAP_MIPS_VZ);
1278 if (r > 0) {
1279 return KVM_VM_MIPS_VZ;
1280 }
1281 #endif
1282
1283 #if defined(KVM_CAP_MIPS_TE)
1284 r = kvm_check_extension(s, KVM_CAP_MIPS_TE);
1285 if (r > 0) {
1286 return KVM_VM_MIPS_TE;
1287 }
1288 #endif
1289
1290 return -1;
1291 }
1292
1293 bool kvm_arch_cpu_check_are_resettable(void)
1294 {
1295 return true;
1296 }