]> git.proxmox.com Git - mirror_qemu.git/blob - target/mips/kvm.c
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
[mirror_qemu.git] / target / mips / kvm.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: MIPS specific KVM APIs
7 *
8 * Copyright (C) 2012-2014 Imagination Technologies Ltd.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12 #include "qemu/osdep.h"
13 #include <sys/ioctl.h>
14
15 #include <linux/kvm.h>
16
17 #include "qemu-common.h"
18 #include "cpu.h"
19 #include "internal.h"
20 #include "qemu/error-report.h"
21 #include "qemu/main-loop.h"
22 #include "sysemu/kvm.h"
23 #include "sysemu/kvm_int.h"
24 #include "sysemu/runstate.h"
25 #include "kvm_mips.h"
26 #include "hw/boards.h"
27 #include "fpu_helper.h"
28
29 #define DEBUG_KVM 0
30
31 #define DPRINTF(fmt, ...) \
32 do { if (DEBUG_KVM) { fprintf(stderr, fmt, ## __VA_ARGS__); } } while (0)
33
34 static int kvm_mips_fpu_cap;
35 static int kvm_mips_msa_cap;
36
37 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
38 KVM_CAP_LAST_INFO
39 };
40
41 static void kvm_mips_update_state(void *opaque, int running, RunState state);
42
43 unsigned long kvm_arch_vcpu_id(CPUState *cs)
44 {
45 return cs->cpu_index;
46 }
47
48 int kvm_arch_init(MachineState *ms, KVMState *s)
49 {
50 /* MIPS has 128 signals */
51 kvm_set_sigmask_len(s, 16);
52
53 kvm_mips_fpu_cap = kvm_check_extension(s, KVM_CAP_MIPS_FPU);
54 kvm_mips_msa_cap = kvm_check_extension(s, KVM_CAP_MIPS_MSA);
55
56 DPRINTF("%s\n", __func__);
57 return 0;
58 }
59
60 int kvm_arch_irqchip_create(KVMState *s)
61 {
62 return 0;
63 }
64
65 int kvm_arch_init_vcpu(CPUState *cs)
66 {
67 MIPSCPU *cpu = MIPS_CPU(cs);
68 CPUMIPSState *env = &cpu->env;
69 int ret = 0;
70
71 qemu_add_vm_change_state_handler(kvm_mips_update_state, cs);
72
73 if (kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
74 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_FPU, 0, 0);
75 if (ret < 0) {
76 /* mark unsupported so it gets disabled on reset */
77 kvm_mips_fpu_cap = 0;
78 ret = 0;
79 }
80 }
81
82 if (kvm_mips_msa_cap && ase_msa_available(env)) {
83 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_MSA, 0, 0);
84 if (ret < 0) {
85 /* mark unsupported so it gets disabled on reset */
86 kvm_mips_msa_cap = 0;
87 ret = 0;
88 }
89 }
90
91 DPRINTF("%s\n", __func__);
92 return ret;
93 }
94
95 int kvm_arch_destroy_vcpu(CPUState *cs)
96 {
97 return 0;
98 }
99
100 void kvm_mips_reset_vcpu(MIPSCPU *cpu)
101 {
102 CPUMIPSState *env = &cpu->env;
103
104 if (!kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
105 warn_report("KVM does not support FPU, disabling");
106 env->CP0_Config1 &= ~(1 << CP0C1_FP);
107 }
108 if (!kvm_mips_msa_cap && ase_msa_available(env)) {
109 warn_report("KVM does not support MSA, disabling");
110 env->CP0_Config3 &= ~(1 << CP0C3_MSAP);
111 }
112
113 DPRINTF("%s\n", __func__);
114 }
115
116 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
117 {
118 DPRINTF("%s\n", __func__);
119 return 0;
120 }
121
122 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
123 {
124 DPRINTF("%s\n", __func__);
125 return 0;
126 }
127
128 static inline int cpu_mips_io_interrupts_pending(MIPSCPU *cpu)
129 {
130 CPUMIPSState *env = &cpu->env;
131
132 return env->CP0_Cause & (0x1 << (2 + CP0Ca_IP));
133 }
134
135
136 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
137 {
138 MIPSCPU *cpu = MIPS_CPU(cs);
139 int r;
140 struct kvm_mips_interrupt intr;
141
142 qemu_mutex_lock_iothread();
143
144 if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
145 cpu_mips_io_interrupts_pending(cpu)) {
146 intr.cpu = -1;
147 intr.irq = 2;
148 r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
149 if (r < 0) {
150 error_report("%s: cpu %d: failed to inject IRQ %x",
151 __func__, cs->cpu_index, intr.irq);
152 }
153 }
154
155 qemu_mutex_unlock_iothread();
156 }
157
158 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
159 {
160 return MEMTXATTRS_UNSPECIFIED;
161 }
162
163 int kvm_arch_process_async_events(CPUState *cs)
164 {
165 return cs->halted;
166 }
167
168 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
169 {
170 int ret;
171
172 DPRINTF("%s\n", __func__);
173 switch (run->exit_reason) {
174 default:
175 error_report("%s: unknown exit reason %d",
176 __func__, run->exit_reason);
177 ret = -1;
178 break;
179 }
180
181 return ret;
182 }
183
184 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
185 {
186 DPRINTF("%s\n", __func__);
187 return true;
188 }
189
190 void kvm_arch_init_irq_routing(KVMState *s)
191 {
192 }
193
194 int kvm_mips_set_interrupt(MIPSCPU *cpu, int irq, int level)
195 {
196 CPUState *cs = CPU(cpu);
197 struct kvm_mips_interrupt intr;
198
199 assert(kvm_enabled());
200
201 intr.cpu = -1;
202
203 if (level) {
204 intr.irq = irq;
205 } else {
206 intr.irq = -irq;
207 }
208
209 kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
210
211 return 0;
212 }
213
214 int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level)
215 {
216 CPUState *cs = current_cpu;
217 CPUState *dest_cs = CPU(cpu);
218 struct kvm_mips_interrupt intr;
219
220 assert(kvm_enabled());
221
222 intr.cpu = dest_cs->cpu_index;
223
224 if (level) {
225 intr.irq = irq;
226 } else {
227 intr.irq = -irq;
228 }
229
230 DPRINTF("%s: CPU %d, IRQ: %d\n", __func__, intr.cpu, intr.irq);
231
232 kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
233
234 return 0;
235 }
236
237 #define MIPS_CP0_32(_R, _S) \
238 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
239
240 #define MIPS_CP0_64(_R, _S) \
241 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
242
243 #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
244 #define KVM_REG_MIPS_CP0_RANDOM MIPS_CP0_32(1, 0)
245 #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
246 #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
247 #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
248 #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
249 #define KVM_REG_MIPS_CP0_PWBASE MIPS_CP0_64(5, 5)
250 #define KVM_REG_MIPS_CP0_PWFIELD MIPS_CP0_64(5, 6)
251 #define KVM_REG_MIPS_CP0_PWSIZE MIPS_CP0_64(5, 7)
252 #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
253 #define KVM_REG_MIPS_CP0_PWCTL MIPS_CP0_32(6, 6)
254 #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
255 #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
256 #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
257 #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
258 #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
259 #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
260 #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
261 #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
262 #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
263 #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
264 #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
265 #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
266 #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
267 #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
268 #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
269 #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
270 #define KVM_REG_MIPS_CP0_CONFIG6 MIPS_CP0_32(16, 6)
271 #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
272 #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
273 #define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
274 #define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
275 #define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4)
276 #define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5)
277 #define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6)
278 #define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
279
280 static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id,
281 int32_t *addr)
282 {
283 struct kvm_one_reg cp0reg = {
284 .id = reg_id,
285 .addr = (uintptr_t)addr
286 };
287
288 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
289 }
290
291 static inline int kvm_mips_put_one_ureg(CPUState *cs, uint64_t reg_id,
292 uint32_t *addr)
293 {
294 struct kvm_one_reg cp0reg = {
295 .id = reg_id,
296 .addr = (uintptr_t)addr
297 };
298
299 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
300 }
301
302 static inline int kvm_mips_put_one_ulreg(CPUState *cs, uint64_t reg_id,
303 target_ulong *addr)
304 {
305 uint64_t val64 = *addr;
306 struct kvm_one_reg cp0reg = {
307 .id = reg_id,
308 .addr = (uintptr_t)&val64
309 };
310
311 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
312 }
313
314 static inline int kvm_mips_put_one_reg64(CPUState *cs, uint64_t reg_id,
315 int64_t *addr)
316 {
317 struct kvm_one_reg cp0reg = {
318 .id = reg_id,
319 .addr = (uintptr_t)addr
320 };
321
322 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
323 }
324
325 static inline int kvm_mips_put_one_ureg64(CPUState *cs, uint64_t reg_id,
326 uint64_t *addr)
327 {
328 struct kvm_one_reg cp0reg = {
329 .id = reg_id,
330 .addr = (uintptr_t)addr
331 };
332
333 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
334 }
335
336 static inline int kvm_mips_get_one_reg(CPUState *cs, uint64_t reg_id,
337 int32_t *addr)
338 {
339 struct kvm_one_reg cp0reg = {
340 .id = reg_id,
341 .addr = (uintptr_t)addr
342 };
343
344 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
345 }
346
347 static inline int kvm_mips_get_one_ureg(CPUState *cs, uint64_t reg_id,
348 uint32_t *addr)
349 {
350 struct kvm_one_reg cp0reg = {
351 .id = reg_id,
352 .addr = (uintptr_t)addr
353 };
354
355 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
356 }
357
358 static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64_t reg_id,
359 target_ulong *addr)
360 {
361 int ret;
362 uint64_t val64 = 0;
363 struct kvm_one_reg cp0reg = {
364 .id = reg_id,
365 .addr = (uintptr_t)&val64
366 };
367
368 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
369 if (ret >= 0) {
370 *addr = val64;
371 }
372 return ret;
373 }
374
375 static inline int kvm_mips_get_one_reg64(CPUState *cs, uint64_t reg_id,
376 int64_t *addr)
377 {
378 struct kvm_one_reg cp0reg = {
379 .id = reg_id,
380 .addr = (uintptr_t)addr
381 };
382
383 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
384 }
385
386 static inline int kvm_mips_get_one_ureg64(CPUState *cs, uint64_t reg_id,
387 uint64_t *addr)
388 {
389 struct kvm_one_reg cp0reg = {
390 .id = reg_id,
391 .addr = (uintptr_t)addr
392 };
393
394 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
395 }
396
397 #define KVM_REG_MIPS_CP0_CONFIG_MASK (1U << CP0C0_M)
398 #define KVM_REG_MIPS_CP0_CONFIG1_MASK ((1U << CP0C1_M) | \
399 (1U << CP0C1_FP))
400 #define KVM_REG_MIPS_CP0_CONFIG2_MASK (1U << CP0C2_M)
401 #define KVM_REG_MIPS_CP0_CONFIG3_MASK ((1U << CP0C3_M) | \
402 (1U << CP0C3_MSAP))
403 #define KVM_REG_MIPS_CP0_CONFIG4_MASK (1U << CP0C4_M)
404 #define KVM_REG_MIPS_CP0_CONFIG5_MASK ((1U << CP0C5_MSAEn) | \
405 (1U << CP0C5_UFE) | \
406 (1U << CP0C5_FRE) | \
407 (1U << CP0C5_UFR))
408 #define KVM_REG_MIPS_CP0_CONFIG6_MASK ((1U << CP0C6_BPPASS) | \
409 (0x3fU << CP0C6_KPOS) | \
410 (1U << CP0C6_KE) | \
411 (1U << CP0C6_VTLBONLY) | \
412 (1U << CP0C6_LASX) | \
413 (1U << CP0C6_SSEN) | \
414 (1U << CP0C6_DISDRTIME) | \
415 (1U << CP0C6_PIXNUEN) | \
416 (1U << CP0C6_SCRAND) | \
417 (1U << CP0C6_LLEXCEN) | \
418 (1U << CP0C6_DISVC) | \
419 (1U << CP0C6_VCLRU) | \
420 (1U << CP0C6_DCLRU) | \
421 (1U << CP0C6_PIXUEN) | \
422 (1U << CP0C6_DISBLKLYEN) | \
423 (1U << CP0C6_UMEMUALEN) | \
424 (1U << CP0C6_SFBEN) | \
425 (1U << CP0C6_FLTINT) | \
426 (1U << CP0C6_VLTINT) | \
427 (1U << CP0C6_DISBTB) | \
428 (3U << CP0C6_STPREFCTL) | \
429 (1U << CP0C6_INSTPREF) | \
430 (1U << CP0C6_DATAPREF))
431
432 static inline int kvm_mips_change_one_reg(CPUState *cs, uint64_t reg_id,
433 int32_t *addr, int32_t mask)
434 {
435 int err;
436 int32_t tmp, change;
437
438 err = kvm_mips_get_one_reg(cs, reg_id, &tmp);
439 if (err < 0) {
440 return err;
441 }
442
443 /* only change bits in mask */
444 change = (*addr ^ tmp) & mask;
445 if (!change) {
446 return 0;
447 }
448
449 tmp = tmp ^ change;
450 return kvm_mips_put_one_reg(cs, reg_id, &tmp);
451 }
452
453 /*
454 * We freeze the KVM timer when either the VM clock is stopped or the state is
455 * saved (the state is dirty).
456 */
457
458 /*
459 * Save the state of the KVM timer when VM clock is stopped or state is synced
460 * to QEMU.
461 */
462 static int kvm_mips_save_count(CPUState *cs)
463 {
464 MIPSCPU *cpu = MIPS_CPU(cs);
465 CPUMIPSState *env = &cpu->env;
466 uint64_t count_ctl;
467 int err, ret = 0;
468
469 /* freeze KVM timer */
470 err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
471 if (err < 0) {
472 DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err);
473 ret = err;
474 } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
475 count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
476 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
477 if (err < 0) {
478 DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
479 ret = err;
480 }
481 }
482
483 /* read CP0_Cause */
484 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause);
485 if (err < 0) {
486 DPRINTF("%s: Failed to get CP0_CAUSE (%d)\n", __func__, err);
487 ret = err;
488 }
489
490 /* read CP0_Count */
491 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count);
492 if (err < 0) {
493 DPRINTF("%s: Failed to get CP0_COUNT (%d)\n", __func__, err);
494 ret = err;
495 }
496
497 return ret;
498 }
499
500 /*
501 * Restore the state of the KVM timer when VM clock is restarted or state is
502 * synced to KVM.
503 */
504 static int kvm_mips_restore_count(CPUState *cs)
505 {
506 MIPSCPU *cpu = MIPS_CPU(cs);
507 CPUMIPSState *env = &cpu->env;
508 uint64_t count_ctl;
509 int err_dc, err, ret = 0;
510
511 /* check the timer is frozen */
512 err_dc = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
513 if (err_dc < 0) {
514 DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err_dc);
515 ret = err_dc;
516 } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
517 /* freeze timer (sets COUNT_RESUME for us) */
518 count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
519 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
520 if (err < 0) {
521 DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
522 ret = err;
523 }
524 }
525
526 /* load CP0_Cause */
527 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause);
528 if (err < 0) {
529 DPRINTF("%s: Failed to put CP0_CAUSE (%d)\n", __func__, err);
530 ret = err;
531 }
532
533 /* load CP0_Count */
534 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count);
535 if (err < 0) {
536 DPRINTF("%s: Failed to put CP0_COUNT (%d)\n", __func__, err);
537 ret = err;
538 }
539
540 /* resume KVM timer */
541 if (err_dc >= 0) {
542 count_ctl &= ~KVM_REG_MIPS_COUNT_CTL_DC;
543 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
544 if (err < 0) {
545 DPRINTF("%s: Failed to set COUNT_CTL.DC=0 (%d)\n", __func__, err);
546 ret = err;
547 }
548 }
549
550 return ret;
551 }
552
553 /*
554 * Handle the VM clock being started or stopped
555 */
556 static void kvm_mips_update_state(void *opaque, int running, RunState state)
557 {
558 CPUState *cs = opaque;
559 int ret;
560 uint64_t count_resume;
561
562 /*
563 * If state is already dirty (synced to QEMU) then the KVM timer state is
564 * already saved and can be restored when it is synced back to KVM.
565 */
566 if (!running) {
567 if (!cs->vcpu_dirty) {
568 ret = kvm_mips_save_count(cs);
569 if (ret < 0) {
570 warn_report("Failed saving count");
571 }
572 }
573 } else {
574 /* Set clock restore time to now */
575 count_resume = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
576 ret = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_RESUME,
577 &count_resume);
578 if (ret < 0) {
579 warn_report("Failed setting COUNT_RESUME");
580 return;
581 }
582
583 if (!cs->vcpu_dirty) {
584 ret = kvm_mips_restore_count(cs);
585 if (ret < 0) {
586 warn_report("Failed restoring count");
587 }
588 }
589 }
590 }
591
592 static int kvm_mips_put_fpu_registers(CPUState *cs, int level)
593 {
594 MIPSCPU *cpu = MIPS_CPU(cs);
595 CPUMIPSState *env = &cpu->env;
596 int err, ret = 0;
597 unsigned int i;
598
599 /* Only put FPU state if we're emulating a CPU with an FPU */
600 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
601 /* FPU Control Registers */
602 if (level == KVM_PUT_FULL_STATE) {
603 err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
604 &env->active_fpu.fcr0);
605 if (err < 0) {
606 DPRINTF("%s: Failed to put FCR_IR (%d)\n", __func__, err);
607 ret = err;
608 }
609 }
610 err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
611 &env->active_fpu.fcr31);
612 if (err < 0) {
613 DPRINTF("%s: Failed to put FCR_CSR (%d)\n", __func__, err);
614 ret = err;
615 }
616
617 /*
618 * FPU register state is a subset of MSA vector state, so don't put FPU
619 * registers if we're emulating a CPU with MSA.
620 */
621 if (!ase_msa_available(env)) {
622 /* Floating point registers */
623 for (i = 0; i < 32; ++i) {
624 if (env->CP0_Status & (1 << CP0St_FR)) {
625 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
626 &env->active_fpu.fpr[i].d);
627 } else {
628 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
629 &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
630 }
631 if (err < 0) {
632 DPRINTF("%s: Failed to put FPR%u (%d)\n", __func__, i, err);
633 ret = err;
634 }
635 }
636 }
637 }
638
639 /* Only put MSA state if we're emulating a CPU with MSA */
640 if (ase_msa_available(env)) {
641 /* MSA Control Registers */
642 if (level == KVM_PUT_FULL_STATE) {
643 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_IR,
644 &env->msair);
645 if (err < 0) {
646 DPRINTF("%s: Failed to put MSA_IR (%d)\n", __func__, err);
647 ret = err;
648 }
649 }
650 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
651 &env->active_tc.msacsr);
652 if (err < 0) {
653 DPRINTF("%s: Failed to put MSA_CSR (%d)\n", __func__, err);
654 ret = err;
655 }
656
657 /* Vector registers (includes FP registers) */
658 for (i = 0; i < 32; ++i) {
659 /* Big endian MSA not supported by QEMU yet anyway */
660 err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
661 env->active_fpu.fpr[i].wr.d);
662 if (err < 0) {
663 DPRINTF("%s: Failed to put VEC%u (%d)\n", __func__, i, err);
664 ret = err;
665 }
666 }
667 }
668
669 return ret;
670 }
671
672 static int kvm_mips_get_fpu_registers(CPUState *cs)
673 {
674 MIPSCPU *cpu = MIPS_CPU(cs);
675 CPUMIPSState *env = &cpu->env;
676 int err, ret = 0;
677 unsigned int i;
678
679 /* Only get FPU state if we're emulating a CPU with an FPU */
680 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
681 /* FPU Control Registers */
682 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
683 &env->active_fpu.fcr0);
684 if (err < 0) {
685 DPRINTF("%s: Failed to get FCR_IR (%d)\n", __func__, err);
686 ret = err;
687 }
688 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
689 &env->active_fpu.fcr31);
690 if (err < 0) {
691 DPRINTF("%s: Failed to get FCR_CSR (%d)\n", __func__, err);
692 ret = err;
693 } else {
694 restore_fp_status(env);
695 }
696
697 /*
698 * FPU register state is a subset of MSA vector state, so don't save FPU
699 * registers if we're emulating a CPU with MSA.
700 */
701 if (!ase_msa_available(env)) {
702 /* Floating point registers */
703 for (i = 0; i < 32; ++i) {
704 if (env->CP0_Status & (1 << CP0St_FR)) {
705 err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
706 &env->active_fpu.fpr[i].d);
707 } else {
708 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
709 &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
710 }
711 if (err < 0) {
712 DPRINTF("%s: Failed to get FPR%u (%d)\n", __func__, i, err);
713 ret = err;
714 }
715 }
716 }
717 }
718
719 /* Only get MSA state if we're emulating a CPU with MSA */
720 if (ase_msa_available(env)) {
721 /* MSA Control Registers */
722 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_IR,
723 &env->msair);
724 if (err < 0) {
725 DPRINTF("%s: Failed to get MSA_IR (%d)\n", __func__, err);
726 ret = err;
727 }
728 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
729 &env->active_tc.msacsr);
730 if (err < 0) {
731 DPRINTF("%s: Failed to get MSA_CSR (%d)\n", __func__, err);
732 ret = err;
733 } else {
734 restore_msa_fp_status(env);
735 }
736
737 /* Vector registers (includes FP registers) */
738 for (i = 0; i < 32; ++i) {
739 /* Big endian MSA not supported by QEMU yet anyway */
740 err = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
741 env->active_fpu.fpr[i].wr.d);
742 if (err < 0) {
743 DPRINTF("%s: Failed to get VEC%u (%d)\n", __func__, i, err);
744 ret = err;
745 }
746 }
747 }
748
749 return ret;
750 }
751
752
753 static int kvm_mips_put_cp0_registers(CPUState *cs, int level)
754 {
755 MIPSCPU *cpu = MIPS_CPU(cs);
756 CPUMIPSState *env = &cpu->env;
757 int err, ret = 0;
758
759 (void)level;
760
761 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index);
762 if (err < 0) {
763 DPRINTF("%s: Failed to put CP0_INDEX (%d)\n", __func__, err);
764 ret = err;
765 }
766 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_RANDOM, &env->CP0_Random);
767 if (err < 0) {
768 DPRINTF("%s: Failed to put CP0_RANDOM (%d)\n", __func__, err);
769 ret = err;
770 }
771 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT,
772 &env->CP0_Context);
773 if (err < 0) {
774 DPRINTF("%s: Failed to put CP0_CONTEXT (%d)\n", __func__, err);
775 ret = err;
776 }
777 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL,
778 &env->active_tc.CP0_UserLocal);
779 if (err < 0) {
780 DPRINTF("%s: Failed to put CP0_USERLOCAL (%d)\n", __func__, err);
781 ret = err;
782 }
783 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK,
784 &env->CP0_PageMask);
785 if (err < 0) {
786 DPRINTF("%s: Failed to put CP0_PAGEMASK (%d)\n", __func__, err);
787 ret = err;
788 }
789 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEGRAIN,
790 &env->CP0_PageGrain);
791 if (err < 0) {
792 DPRINTF("%s: Failed to put CP0_PAGEGRAIN (%d)\n", __func__, err);
793 ret = err;
794 }
795 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWBASE,
796 &env->CP0_PWBase);
797 if (err < 0) {
798 DPRINTF("%s: Failed to put CP0_PWBASE (%d)\n", __func__, err);
799 ret = err;
800 }
801 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWFIELD,
802 &env->CP0_PWField);
803 if (err < 0) {
804 DPRINTF("%s: Failed to put CP0_PWField (%d)\n", __func__, err);
805 ret = err;
806 }
807 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWSIZE,
808 &env->CP0_PWSize);
809 if (err < 0) {
810 DPRINTF("%s: Failed to put CP0_PWSIZE (%d)\n", __func__, err);
811 ret = err;
812 }
813 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired);
814 if (err < 0) {
815 DPRINTF("%s: Failed to put CP0_WIRED (%d)\n", __func__, err);
816 ret = err;
817 }
818 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PWCTL, &env->CP0_PWCtl);
819 if (err < 0) {
820 DPRINTF("%s: Failed to put CP0_PWCTL (%d)\n", __func__, err);
821 ret = err;
822 }
823 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna);
824 if (err < 0) {
825 DPRINTF("%s: Failed to put CP0_HWRENA (%d)\n", __func__, err);
826 ret = err;
827 }
828 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR,
829 &env->CP0_BadVAddr);
830 if (err < 0) {
831 DPRINTF("%s: Failed to put CP0_BADVADDR (%d)\n", __func__, err);
832 ret = err;
833 }
834
835 /* If VM clock stopped then state will be restored when it is restarted */
836 if (runstate_is_running()) {
837 err = kvm_mips_restore_count(cs);
838 if (err < 0) {
839 ret = err;
840 }
841 }
842
843 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI,
844 &env->CP0_EntryHi);
845 if (err < 0) {
846 DPRINTF("%s: Failed to put CP0_ENTRYHI (%d)\n", __func__, err);
847 ret = err;
848 }
849 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE,
850 &env->CP0_Compare);
851 if (err < 0) {
852 DPRINTF("%s: Failed to put CP0_COMPARE (%d)\n", __func__, err);
853 ret = err;
854 }
855 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status);
856 if (err < 0) {
857 DPRINTF("%s: Failed to put CP0_STATUS (%d)\n", __func__, err);
858 ret = err;
859 }
860 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC);
861 if (err < 0) {
862 DPRINTF("%s: Failed to put CP0_EPC (%d)\n", __func__, err);
863 ret = err;
864 }
865 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
866 if (err < 0) {
867 DPRINTF("%s: Failed to put CP0_PRID (%d)\n", __func__, err);
868 ret = err;
869 }
870 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EBASE, &env->CP0_EBase);
871 if (err < 0) {
872 DPRINTF("%s: Failed to put CP0_EBASE (%d)\n", __func__, err);
873 ret = err;
874 }
875 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG,
876 &env->CP0_Config0,
877 KVM_REG_MIPS_CP0_CONFIG_MASK);
878 if (err < 0) {
879 DPRINTF("%s: Failed to change CP0_CONFIG (%d)\n", __func__, err);
880 ret = err;
881 }
882 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1,
883 &env->CP0_Config1,
884 KVM_REG_MIPS_CP0_CONFIG1_MASK);
885 if (err < 0) {
886 DPRINTF("%s: Failed to change CP0_CONFIG1 (%d)\n", __func__, err);
887 ret = err;
888 }
889 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2,
890 &env->CP0_Config2,
891 KVM_REG_MIPS_CP0_CONFIG2_MASK);
892 if (err < 0) {
893 DPRINTF("%s: Failed to change CP0_CONFIG2 (%d)\n", __func__, err);
894 ret = err;
895 }
896 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3,
897 &env->CP0_Config3,
898 KVM_REG_MIPS_CP0_CONFIG3_MASK);
899 if (err < 0) {
900 DPRINTF("%s: Failed to change CP0_CONFIG3 (%d)\n", __func__, err);
901 ret = err;
902 }
903 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4,
904 &env->CP0_Config4,
905 KVM_REG_MIPS_CP0_CONFIG4_MASK);
906 if (err < 0) {
907 DPRINTF("%s: Failed to change CP0_CONFIG4 (%d)\n", __func__, err);
908 ret = err;
909 }
910 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5,
911 &env->CP0_Config5,
912 KVM_REG_MIPS_CP0_CONFIG5_MASK);
913 if (err < 0) {
914 DPRINTF("%s: Failed to change CP0_CONFIG5 (%d)\n", __func__, err);
915 ret = err;
916 }
917 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG6,
918 &env->CP0_Config6,
919 KVM_REG_MIPS_CP0_CONFIG6_MASK);
920 if (err < 0) {
921 DPRINTF("%s: Failed to change CP0_CONFIG6 (%d)\n", __func__, err);
922 ret = err;
923 }
924 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_XCONTEXT,
925 &env->CP0_XContext);
926 if (err < 0) {
927 DPRINTF("%s: Failed to put CP0_XCONTEXT (%d)\n", __func__, err);
928 ret = err;
929 }
930 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
931 &env->CP0_ErrorEPC);
932 if (err < 0) {
933 DPRINTF("%s: Failed to put CP0_ERROREPC (%d)\n", __func__, err);
934 ret = err;
935 }
936 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH1,
937 &env->CP0_KScratch[0]);
938 if (err < 0) {
939 DPRINTF("%s: Failed to put CP0_KSCRATCH1 (%d)\n", __func__, err);
940 ret = err;
941 }
942 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH2,
943 &env->CP0_KScratch[1]);
944 if (err < 0) {
945 DPRINTF("%s: Failed to put CP0_KSCRATCH2 (%d)\n", __func__, err);
946 ret = err;
947 }
948 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH3,
949 &env->CP0_KScratch[2]);
950 if (err < 0) {
951 DPRINTF("%s: Failed to put CP0_KSCRATCH3 (%d)\n", __func__, err);
952 ret = err;
953 }
954 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH4,
955 &env->CP0_KScratch[3]);
956 if (err < 0) {
957 DPRINTF("%s: Failed to put CP0_KSCRATCH4 (%d)\n", __func__, err);
958 ret = err;
959 }
960 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH5,
961 &env->CP0_KScratch[4]);
962 if (err < 0) {
963 DPRINTF("%s: Failed to put CP0_KSCRATCH5 (%d)\n", __func__, err);
964 ret = err;
965 }
966 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH6,
967 &env->CP0_KScratch[5]);
968 if (err < 0) {
969 DPRINTF("%s: Failed to put CP0_KSCRATCH6 (%d)\n", __func__, err);
970 ret = err;
971 }
972
973 return ret;
974 }
975
976 static int kvm_mips_get_cp0_registers(CPUState *cs)
977 {
978 MIPSCPU *cpu = MIPS_CPU(cs);
979 CPUMIPSState *env = &cpu->env;
980 int err, ret = 0;
981
982 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index);
983 if (err < 0) {
984 DPRINTF("%s: Failed to get CP0_INDEX (%d)\n", __func__, err);
985 ret = err;
986 }
987 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_RANDOM, &env->CP0_Random);
988 if (err < 0) {
989 DPRINTF("%s: Failed to get CP0_RANDOM (%d)\n", __func__, err);
990 ret = err;
991 }
992 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT,
993 &env->CP0_Context);
994 if (err < 0) {
995 DPRINTF("%s: Failed to get CP0_CONTEXT (%d)\n", __func__, err);
996 ret = err;
997 }
998 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL,
999 &env->active_tc.CP0_UserLocal);
1000 if (err < 0) {
1001 DPRINTF("%s: Failed to get CP0_USERLOCAL (%d)\n", __func__, err);
1002 ret = err;
1003 }
1004 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK,
1005 &env->CP0_PageMask);
1006 if (err < 0) {
1007 DPRINTF("%s: Failed to get CP0_PAGEMASK (%d)\n", __func__, err);
1008 ret = err;
1009 }
1010 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEGRAIN,
1011 &env->CP0_PageGrain);
1012 if (err < 0) {
1013 DPRINTF("%s: Failed to get CP0_PAGEGRAIN (%d)\n", __func__, err);
1014 ret = err;
1015 }
1016 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWBASE,
1017 &env->CP0_PWBase);
1018 if (err < 0) {
1019 DPRINTF("%s: Failed to get CP0_PWBASE (%d)\n", __func__, err);
1020 ret = err;
1021 }
1022 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWFIELD,
1023 &env->CP0_PWField);
1024 if (err < 0) {
1025 DPRINTF("%s: Failed to get CP0_PWFIELD (%d)\n", __func__, err);
1026 ret = err;
1027 }
1028 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWSIZE,
1029 &env->CP0_PWSize);
1030 if (err < 0) {
1031 DPRINTF("%s: Failed to get CP0_PWSIZE (%d)\n", __func__, err);
1032 ret = err;
1033 }
1034 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired);
1035 if (err < 0) {
1036 DPRINTF("%s: Failed to get CP0_WIRED (%d)\n", __func__, err);
1037 ret = err;
1038 }
1039 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PWCTL, &env->CP0_PWCtl);
1040 if (err < 0) {
1041 DPRINTF("%s: Failed to get CP0_PWCtl (%d)\n", __func__, err);
1042 ret = err;
1043 }
1044 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna);
1045 if (err < 0) {
1046 DPRINTF("%s: Failed to get CP0_HWRENA (%d)\n", __func__, err);
1047 ret = err;
1048 }
1049 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR,
1050 &env->CP0_BadVAddr);
1051 if (err < 0) {
1052 DPRINTF("%s: Failed to get CP0_BADVADDR (%d)\n", __func__, err);
1053 ret = err;
1054 }
1055 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI,
1056 &env->CP0_EntryHi);
1057 if (err < 0) {
1058 DPRINTF("%s: Failed to get CP0_ENTRYHI (%d)\n", __func__, err);
1059 ret = err;
1060 }
1061 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE,
1062 &env->CP0_Compare);
1063 if (err < 0) {
1064 DPRINTF("%s: Failed to get CP0_COMPARE (%d)\n", __func__, err);
1065 ret = err;
1066 }
1067 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status);
1068 if (err < 0) {
1069 DPRINTF("%s: Failed to get CP0_STATUS (%d)\n", __func__, err);
1070 ret = err;
1071 }
1072
1073 /* If VM clock stopped then state was already saved when it was stopped */
1074 if (runstate_is_running()) {
1075 err = kvm_mips_save_count(cs);
1076 if (err < 0) {
1077 ret = err;
1078 }
1079 }
1080
1081 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC);
1082 if (err < 0) {
1083 DPRINTF("%s: Failed to get CP0_EPC (%d)\n", __func__, err);
1084 ret = err;
1085 }
1086 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
1087 if (err < 0) {
1088 DPRINTF("%s: Failed to get CP0_PRID (%d)\n", __func__, err);
1089 ret = err;
1090 }
1091 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EBASE, &env->CP0_EBase);
1092 if (err < 0) {
1093 DPRINTF("%s: Failed to get CP0_EBASE (%d)\n", __func__, err);
1094 ret = err;
1095 }
1096 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, &env->CP0_Config0);
1097 if (err < 0) {
1098 DPRINTF("%s: Failed to get CP0_CONFIG (%d)\n", __func__, err);
1099 ret = err;
1100 }
1101 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, &env->CP0_Config1);
1102 if (err < 0) {
1103 DPRINTF("%s: Failed to get CP0_CONFIG1 (%d)\n", __func__, err);
1104 ret = err;
1105 }
1106 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, &env->CP0_Config2);
1107 if (err < 0) {
1108 DPRINTF("%s: Failed to get CP0_CONFIG2 (%d)\n", __func__, err);
1109 ret = err;
1110 }
1111 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, &env->CP0_Config3);
1112 if (err < 0) {
1113 DPRINTF("%s: Failed to get CP0_CONFIG3 (%d)\n", __func__, err);
1114 ret = err;
1115 }
1116 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, &env->CP0_Config4);
1117 if (err < 0) {
1118 DPRINTF("%s: Failed to get CP0_CONFIG4 (%d)\n", __func__, err);
1119 ret = err;
1120 }
1121 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, &env->CP0_Config5);
1122 if (err < 0) {
1123 DPRINTF("%s: Failed to get CP0_CONFIG5 (%d)\n", __func__, err);
1124 ret = err;
1125 }
1126 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG6, &env->CP0_Config6);
1127 if (err < 0) {
1128 DPRINTF("%s: Failed to get CP0_CONFIG6 (%d)\n", __func__, err);
1129 ret = err;
1130 }
1131 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_XCONTEXT,
1132 &env->CP0_XContext);
1133 if (err < 0) {
1134 DPRINTF("%s: Failed to get CP0_XCONTEXT (%d)\n", __func__, err);
1135 ret = err;
1136 }
1137 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
1138 &env->CP0_ErrorEPC);
1139 if (err < 0) {
1140 DPRINTF("%s: Failed to get CP0_ERROREPC (%d)\n", __func__, err);
1141 ret = err;
1142 }
1143 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH1,
1144 &env->CP0_KScratch[0]);
1145 if (err < 0) {
1146 DPRINTF("%s: Failed to get CP0_KSCRATCH1 (%d)\n", __func__, err);
1147 ret = err;
1148 }
1149 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH2,
1150 &env->CP0_KScratch[1]);
1151 if (err < 0) {
1152 DPRINTF("%s: Failed to get CP0_KSCRATCH2 (%d)\n", __func__, err);
1153 ret = err;
1154 }
1155 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH3,
1156 &env->CP0_KScratch[2]);
1157 if (err < 0) {
1158 DPRINTF("%s: Failed to get CP0_KSCRATCH3 (%d)\n", __func__, err);
1159 ret = err;
1160 }
1161 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH4,
1162 &env->CP0_KScratch[3]);
1163 if (err < 0) {
1164 DPRINTF("%s: Failed to get CP0_KSCRATCH4 (%d)\n", __func__, err);
1165 ret = err;
1166 }
1167 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH5,
1168 &env->CP0_KScratch[4]);
1169 if (err < 0) {
1170 DPRINTF("%s: Failed to get CP0_KSCRATCH5 (%d)\n", __func__, err);
1171 ret = err;
1172 }
1173 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH6,
1174 &env->CP0_KScratch[5]);
1175 if (err < 0) {
1176 DPRINTF("%s: Failed to get CP0_KSCRATCH6 (%d)\n", __func__, err);
1177 ret = err;
1178 }
1179
1180 return ret;
1181 }
1182
1183 int kvm_arch_put_registers(CPUState *cs, int level)
1184 {
1185 MIPSCPU *cpu = MIPS_CPU(cs);
1186 CPUMIPSState *env = &cpu->env;
1187 struct kvm_regs regs;
1188 int ret;
1189 int i;
1190
1191 /* Set the registers based on QEMU's view of things */
1192 for (i = 0; i < 32; i++) {
1193 regs.gpr[i] = (int64_t)(target_long)env->active_tc.gpr[i];
1194 }
1195
1196 regs.hi = (int64_t)(target_long)env->active_tc.HI[0];
1197 regs.lo = (int64_t)(target_long)env->active_tc.LO[0];
1198 regs.pc = (int64_t)(target_long)env->active_tc.PC;
1199
1200 ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
1201
1202 if (ret < 0) {
1203 return ret;
1204 }
1205
1206 ret = kvm_mips_put_cp0_registers(cs, level);
1207 if (ret < 0) {
1208 return ret;
1209 }
1210
1211 ret = kvm_mips_put_fpu_registers(cs, level);
1212 if (ret < 0) {
1213 return ret;
1214 }
1215
1216 return ret;
1217 }
1218
1219 int kvm_arch_get_registers(CPUState *cs)
1220 {
1221 MIPSCPU *cpu = MIPS_CPU(cs);
1222 CPUMIPSState *env = &cpu->env;
1223 int ret = 0;
1224 struct kvm_regs regs;
1225 int i;
1226
1227 /* Get the current register set as KVM seems it */
1228 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
1229
1230 if (ret < 0) {
1231 return ret;
1232 }
1233
1234 for (i = 0; i < 32; i++) {
1235 env->active_tc.gpr[i] = regs.gpr[i];
1236 }
1237
1238 env->active_tc.HI[0] = regs.hi;
1239 env->active_tc.LO[0] = regs.lo;
1240 env->active_tc.PC = regs.pc;
1241
1242 kvm_mips_get_cp0_registers(cs);
1243 kvm_mips_get_fpu_registers(cs);
1244
1245 return ret;
1246 }
1247
1248 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
1249 uint64_t address, uint32_t data, PCIDevice *dev)
1250 {
1251 return 0;
1252 }
1253
1254 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
1255 int vector, PCIDevice *dev)
1256 {
1257 return 0;
1258 }
1259
1260 int kvm_arch_release_virq_post(int virq)
1261 {
1262 return 0;
1263 }
1264
1265 int kvm_arch_msi_data_to_gsi(uint32_t data)
1266 {
1267 abort();
1268 }
1269
1270 int mips_kvm_type(MachineState *machine, const char *vm_type)
1271 {
1272 #if defined(KVM_CAP_MIPS_VZ) || defined(KVM_CAP_MIPS_TE)
1273 int r;
1274 KVMState *s = KVM_STATE(machine->accelerator);
1275 #endif
1276
1277 #if defined(KVM_CAP_MIPS_VZ)
1278 r = kvm_check_extension(s, KVM_CAP_MIPS_VZ);
1279 if (r > 0) {
1280 return KVM_VM_MIPS_VZ;
1281 }
1282 #endif
1283
1284 #if defined(KVM_CAP_MIPS_TE)
1285 r = kvm_check_extension(s, KVM_CAP_MIPS_TE);
1286 if (r > 0) {
1287 return KVM_VM_MIPS_TE;
1288 }
1289 #endif
1290
1291 return -1;
1292 }