]> git.proxmox.com Git - mirror_qemu.git/blob - target/mips/kvm.c
Merge remote-tracking branch 'remotes/amarkovic/tags/mips-queue-june-01-2020' into...
[mirror_qemu.git] / target / mips / kvm.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: MIPS specific KVM APIs
7 *
8 * Copyright (C) 2012-2014 Imagination Technologies Ltd.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12 #include "qemu/osdep.h"
13 #include <sys/ioctl.h>
14
15 #include <linux/kvm.h>
16
17 #include "qemu-common.h"
18 #include "cpu.h"
19 #include "internal.h"
20 #include "qemu/error-report.h"
21 #include "qemu/main-loop.h"
22 #include "qemu/timer.h"
23 #include "sysemu/kvm.h"
24 #include "sysemu/runstate.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_mips.h"
27 #include "exec/memattrs.h"
28
29 #define DEBUG_KVM 0
30
31 #define DPRINTF(fmt, ...) \
32 do { if (DEBUG_KVM) { fprintf(stderr, fmt, ## __VA_ARGS__); } } while (0)
33
34 static int kvm_mips_fpu_cap;
35 static int kvm_mips_msa_cap;
36
37 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
38 KVM_CAP_LAST_INFO
39 };
40
41 static void kvm_mips_update_state(void *opaque, int running, RunState state);
42
43 unsigned long kvm_arch_vcpu_id(CPUState *cs)
44 {
45 return cs->cpu_index;
46 }
47
48 int kvm_arch_init(MachineState *ms, KVMState *s)
49 {
50 /* MIPS has 128 signals */
51 kvm_set_sigmask_len(s, 16);
52
53 kvm_mips_fpu_cap = kvm_check_extension(s, KVM_CAP_MIPS_FPU);
54 kvm_mips_msa_cap = kvm_check_extension(s, KVM_CAP_MIPS_MSA);
55
56 DPRINTF("%s\n", __func__);
57 return 0;
58 }
59
60 int kvm_arch_irqchip_create(KVMState *s)
61 {
62 return 0;
63 }
64
65 int kvm_arch_init_vcpu(CPUState *cs)
66 {
67 MIPSCPU *cpu = MIPS_CPU(cs);
68 CPUMIPSState *env = &cpu->env;
69 int ret = 0;
70
71 qemu_add_vm_change_state_handler(kvm_mips_update_state, cs);
72
73 if (kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
74 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_FPU, 0, 0);
75 if (ret < 0) {
76 /* mark unsupported so it gets disabled on reset */
77 kvm_mips_fpu_cap = 0;
78 ret = 0;
79 }
80 }
81
82 if (kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) {
83 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_MSA, 0, 0);
84 if (ret < 0) {
85 /* mark unsupported so it gets disabled on reset */
86 kvm_mips_msa_cap = 0;
87 ret = 0;
88 }
89 }
90
91 DPRINTF("%s\n", __func__);
92 return ret;
93 }
94
95 int kvm_arch_destroy_vcpu(CPUState *cs)
96 {
97 return 0;
98 }
99
100 void kvm_mips_reset_vcpu(MIPSCPU *cpu)
101 {
102 CPUMIPSState *env = &cpu->env;
103
104 if (!kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
105 warn_report("KVM does not support FPU, disabling");
106 env->CP0_Config1 &= ~(1 << CP0C1_FP);
107 }
108 if (!kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) {
109 warn_report("KVM does not support MSA, disabling");
110 env->CP0_Config3 &= ~(1 << CP0C3_MSAP);
111 }
112
113 DPRINTF("%s\n", __func__);
114 }
115
116 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
117 {
118 DPRINTF("%s\n", __func__);
119 return 0;
120 }
121
122 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
123 {
124 DPRINTF("%s\n", __func__);
125 return 0;
126 }
127
128 static inline int cpu_mips_io_interrupts_pending(MIPSCPU *cpu)
129 {
130 CPUMIPSState *env = &cpu->env;
131
132 return env->CP0_Cause & (0x1 << (2 + CP0Ca_IP));
133 }
134
135
136 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
137 {
138 MIPSCPU *cpu = MIPS_CPU(cs);
139 int r;
140 struct kvm_mips_interrupt intr;
141
142 qemu_mutex_lock_iothread();
143
144 if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
145 cpu_mips_io_interrupts_pending(cpu)) {
146 intr.cpu = -1;
147 intr.irq = 2;
148 r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
149 if (r < 0) {
150 error_report("%s: cpu %d: failed to inject IRQ %x",
151 __func__, cs->cpu_index, intr.irq);
152 }
153 }
154
155 qemu_mutex_unlock_iothread();
156 }
157
158 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
159 {
160 return MEMTXATTRS_UNSPECIFIED;
161 }
162
163 int kvm_arch_process_async_events(CPUState *cs)
164 {
165 return cs->halted;
166 }
167
168 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
169 {
170 int ret;
171
172 DPRINTF("%s\n", __func__);
173 switch (run->exit_reason) {
174 default:
175 error_report("%s: unknown exit reason %d",
176 __func__, run->exit_reason);
177 ret = -1;
178 break;
179 }
180
181 return ret;
182 }
183
184 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
185 {
186 DPRINTF("%s\n", __func__);
187 return true;
188 }
189
190 void kvm_arch_init_irq_routing(KVMState *s)
191 {
192 }
193
194 int kvm_mips_set_interrupt(MIPSCPU *cpu, int irq, int level)
195 {
196 CPUState *cs = CPU(cpu);
197 struct kvm_mips_interrupt intr;
198
199 if (!kvm_enabled()) {
200 return 0;
201 }
202
203 intr.cpu = -1;
204
205 if (level) {
206 intr.irq = irq;
207 } else {
208 intr.irq = -irq;
209 }
210
211 kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
212
213 return 0;
214 }
215
216 int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level)
217 {
218 CPUState *cs = current_cpu;
219 CPUState *dest_cs = CPU(cpu);
220 struct kvm_mips_interrupt intr;
221
222 if (!kvm_enabled()) {
223 return 0;
224 }
225
226 intr.cpu = dest_cs->cpu_index;
227
228 if (level) {
229 intr.irq = irq;
230 } else {
231 intr.irq = -irq;
232 }
233
234 DPRINTF("%s: CPU %d, IRQ: %d\n", __func__, intr.cpu, intr.irq);
235
236 kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
237
238 return 0;
239 }
240
241 #define MIPS_CP0_32(_R, _S) \
242 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
243
244 #define MIPS_CP0_64(_R, _S) \
245 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
246
247 #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
248 #define KVM_REG_MIPS_CP0_RANDOM MIPS_CP0_32(1, 0)
249 #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
250 #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
251 #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
252 #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
253 #define KVM_REG_MIPS_CP0_PWBASE MIPS_CP0_64(5, 5)
254 #define KVM_REG_MIPS_CP0_PWFIELD MIPS_CP0_64(5, 6)
255 #define KVM_REG_MIPS_CP0_PWSIZE MIPS_CP0_64(5, 7)
256 #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
257 #define KVM_REG_MIPS_CP0_PWCTL MIPS_CP0_32(6, 6)
258 #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
259 #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
260 #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
261 #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
262 #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
263 #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
264 #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
265 #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
266 #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
267 #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
268 #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
269 #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
270 #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
271 #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
272 #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
273 #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
274 #define KVM_REG_MIPS_CP0_CONFIG6 MIPS_CP0_32(16, 6)
275 #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
276 #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
277 #define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
278 #define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
279 #define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4)
280 #define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5)
281 #define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6)
282 #define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
283
284 static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id,
285 int32_t *addr)
286 {
287 struct kvm_one_reg cp0reg = {
288 .id = reg_id,
289 .addr = (uintptr_t)addr
290 };
291
292 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
293 }
294
295 static inline int kvm_mips_put_one_ureg(CPUState *cs, uint64_t reg_id,
296 uint32_t *addr)
297 {
298 struct kvm_one_reg cp0reg = {
299 .id = reg_id,
300 .addr = (uintptr_t)addr
301 };
302
303 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
304 }
305
306 static inline int kvm_mips_put_one_ulreg(CPUState *cs, uint64_t reg_id,
307 target_ulong *addr)
308 {
309 uint64_t val64 = *addr;
310 struct kvm_one_reg cp0reg = {
311 .id = reg_id,
312 .addr = (uintptr_t)&val64
313 };
314
315 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
316 }
317
318 static inline int kvm_mips_put_one_reg64(CPUState *cs, uint64_t reg_id,
319 int64_t *addr)
320 {
321 struct kvm_one_reg cp0reg = {
322 .id = reg_id,
323 .addr = (uintptr_t)addr
324 };
325
326 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
327 }
328
329 static inline int kvm_mips_put_one_ureg64(CPUState *cs, uint64_t reg_id,
330 uint64_t *addr)
331 {
332 struct kvm_one_reg cp0reg = {
333 .id = reg_id,
334 .addr = (uintptr_t)addr
335 };
336
337 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
338 }
339
340 static inline int kvm_mips_get_one_reg(CPUState *cs, uint64_t reg_id,
341 int32_t *addr)
342 {
343 struct kvm_one_reg cp0reg = {
344 .id = reg_id,
345 .addr = (uintptr_t)addr
346 };
347
348 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
349 }
350
351 static inline int kvm_mips_get_one_ureg(CPUState *cs, uint64_t reg_id,
352 uint32_t *addr)
353 {
354 struct kvm_one_reg cp0reg = {
355 .id = reg_id,
356 .addr = (uintptr_t)addr
357 };
358
359 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
360 }
361
362 static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64_t reg_id,
363 target_ulong *addr)
364 {
365 int ret;
366 uint64_t val64 = 0;
367 struct kvm_one_reg cp0reg = {
368 .id = reg_id,
369 .addr = (uintptr_t)&val64
370 };
371
372 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
373 if (ret >= 0) {
374 *addr = val64;
375 }
376 return ret;
377 }
378
379 static inline int kvm_mips_get_one_reg64(CPUState *cs, uint64_t reg_id,
380 int64_t *addr)
381 {
382 struct kvm_one_reg cp0reg = {
383 .id = reg_id,
384 .addr = (uintptr_t)addr
385 };
386
387 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
388 }
389
390 static inline int kvm_mips_get_one_ureg64(CPUState *cs, uint64_t reg_id,
391 uint64_t *addr)
392 {
393 struct kvm_one_reg cp0reg = {
394 .id = reg_id,
395 .addr = (uintptr_t)addr
396 };
397
398 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
399 }
400
401 #define KVM_REG_MIPS_CP0_CONFIG_MASK (1U << CP0C0_M)
402 #define KVM_REG_MIPS_CP0_CONFIG1_MASK ((1U << CP0C1_M) | \
403 (1U << CP0C1_FP))
404 #define KVM_REG_MIPS_CP0_CONFIG2_MASK (1U << CP0C2_M)
405 #define KVM_REG_MIPS_CP0_CONFIG3_MASK ((1U << CP0C3_M) | \
406 (1U << CP0C3_MSAP))
407 #define KVM_REG_MIPS_CP0_CONFIG4_MASK (1U << CP0C4_M)
408 #define KVM_REG_MIPS_CP0_CONFIG5_MASK ((1U << CP0C5_MSAEn) | \
409 (1U << CP0C5_UFE) | \
410 (1U << CP0C5_FRE) | \
411 (1U << CP0C5_UFR))
412 #define KVM_REG_MIPS_CP0_CONFIG6_MASK ((1U << CP0C6_BPPASS) | \
413 (0x3fU << CP0C6_KPOS) | \
414 (1U << CP0C6_KE) | \
415 (1U << CP0C6_VTLBONLY) | \
416 (1U << CP0C6_LASX) | \
417 (1U << CP0C6_SSEN) | \
418 (1U << CP0C6_DISDRTIME) | \
419 (1U << CP0C6_PIXNUEN) | \
420 (1U << CP0C6_SCRAND) | \
421 (1U << CP0C6_LLEXCEN) | \
422 (1U << CP0C6_DISVC) | \
423 (1U << CP0C6_VCLRU) | \
424 (1U << CP0C6_DCLRU) | \
425 (1U << CP0C6_PIXUEN) | \
426 (1U << CP0C6_DISBLKLYEN) | \
427 (1U << CP0C6_UMEMUALEN) | \
428 (1U << CP0C6_SFBEN) | \
429 (1U << CP0C6_FLTINT) | \
430 (1U << CP0C6_VLTINT) | \
431 (1U << CP0C6_DISBTB) | \
432 (3U << CP0C6_STPREFCTL) | \
433 (1U << CP0C6_INSTPREF) | \
434 (1U << CP0C6_DATAPREF))
435
436 static inline int kvm_mips_change_one_reg(CPUState *cs, uint64_t reg_id,
437 int32_t *addr, int32_t mask)
438 {
439 int err;
440 int32_t tmp, change;
441
442 err = kvm_mips_get_one_reg(cs, reg_id, &tmp);
443 if (err < 0) {
444 return err;
445 }
446
447 /* only change bits in mask */
448 change = (*addr ^ tmp) & mask;
449 if (!change) {
450 return 0;
451 }
452
453 tmp = tmp ^ change;
454 return kvm_mips_put_one_reg(cs, reg_id, &tmp);
455 }
456
457 /*
458 * We freeze the KVM timer when either the VM clock is stopped or the state is
459 * saved (the state is dirty).
460 */
461
462 /*
463 * Save the state of the KVM timer when VM clock is stopped or state is synced
464 * to QEMU.
465 */
466 static int kvm_mips_save_count(CPUState *cs)
467 {
468 MIPSCPU *cpu = MIPS_CPU(cs);
469 CPUMIPSState *env = &cpu->env;
470 uint64_t count_ctl;
471 int err, ret = 0;
472
473 /* freeze KVM timer */
474 err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
475 if (err < 0) {
476 DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err);
477 ret = err;
478 } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
479 count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
480 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
481 if (err < 0) {
482 DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
483 ret = err;
484 }
485 }
486
487 /* read CP0_Cause */
488 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause);
489 if (err < 0) {
490 DPRINTF("%s: Failed to get CP0_CAUSE (%d)\n", __func__, err);
491 ret = err;
492 }
493
494 /* read CP0_Count */
495 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count);
496 if (err < 0) {
497 DPRINTF("%s: Failed to get CP0_COUNT (%d)\n", __func__, err);
498 ret = err;
499 }
500
501 return ret;
502 }
503
504 /*
505 * Restore the state of the KVM timer when VM clock is restarted or state is
506 * synced to KVM.
507 */
508 static int kvm_mips_restore_count(CPUState *cs)
509 {
510 MIPSCPU *cpu = MIPS_CPU(cs);
511 CPUMIPSState *env = &cpu->env;
512 uint64_t count_ctl;
513 int err_dc, err, ret = 0;
514
515 /* check the timer is frozen */
516 err_dc = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
517 if (err_dc < 0) {
518 DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err_dc);
519 ret = err_dc;
520 } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
521 /* freeze timer (sets COUNT_RESUME for us) */
522 count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
523 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
524 if (err < 0) {
525 DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
526 ret = err;
527 }
528 }
529
530 /* load CP0_Cause */
531 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause);
532 if (err < 0) {
533 DPRINTF("%s: Failed to put CP0_CAUSE (%d)\n", __func__, err);
534 ret = err;
535 }
536
537 /* load CP0_Count */
538 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count);
539 if (err < 0) {
540 DPRINTF("%s: Failed to put CP0_COUNT (%d)\n", __func__, err);
541 ret = err;
542 }
543
544 /* resume KVM timer */
545 if (err_dc >= 0) {
546 count_ctl &= ~KVM_REG_MIPS_COUNT_CTL_DC;
547 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
548 if (err < 0) {
549 DPRINTF("%s: Failed to set COUNT_CTL.DC=0 (%d)\n", __func__, err);
550 ret = err;
551 }
552 }
553
554 return ret;
555 }
556
557 /*
558 * Handle the VM clock being started or stopped
559 */
560 static void kvm_mips_update_state(void *opaque, int running, RunState state)
561 {
562 CPUState *cs = opaque;
563 int ret;
564 uint64_t count_resume;
565
566 /*
567 * If state is already dirty (synced to QEMU) then the KVM timer state is
568 * already saved and can be restored when it is synced back to KVM.
569 */
570 if (!running) {
571 if (!cs->vcpu_dirty) {
572 ret = kvm_mips_save_count(cs);
573 if (ret < 0) {
574 warn_report("Failed saving count");
575 }
576 }
577 } else {
578 /* Set clock restore time to now */
579 count_resume = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
580 ret = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_RESUME,
581 &count_resume);
582 if (ret < 0) {
583 warn_report("Failed setting COUNT_RESUME");
584 return;
585 }
586
587 if (!cs->vcpu_dirty) {
588 ret = kvm_mips_restore_count(cs);
589 if (ret < 0) {
590 warn_report("Failed restoring count");
591 }
592 }
593 }
594 }
595
596 static int kvm_mips_put_fpu_registers(CPUState *cs, int level)
597 {
598 MIPSCPU *cpu = MIPS_CPU(cs);
599 CPUMIPSState *env = &cpu->env;
600 int err, ret = 0;
601 unsigned int i;
602
603 /* Only put FPU state if we're emulating a CPU with an FPU */
604 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
605 /* FPU Control Registers */
606 if (level == KVM_PUT_FULL_STATE) {
607 err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
608 &env->active_fpu.fcr0);
609 if (err < 0) {
610 DPRINTF("%s: Failed to put FCR_IR (%d)\n", __func__, err);
611 ret = err;
612 }
613 }
614 err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
615 &env->active_fpu.fcr31);
616 if (err < 0) {
617 DPRINTF("%s: Failed to put FCR_CSR (%d)\n", __func__, err);
618 ret = err;
619 }
620
621 /*
622 * FPU register state is a subset of MSA vector state, so don't put FPU
623 * registers if we're emulating a CPU with MSA.
624 */
625 if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) {
626 /* Floating point registers */
627 for (i = 0; i < 32; ++i) {
628 if (env->CP0_Status & (1 << CP0St_FR)) {
629 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
630 &env->active_fpu.fpr[i].d);
631 } else {
632 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
633 &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
634 }
635 if (err < 0) {
636 DPRINTF("%s: Failed to put FPR%u (%d)\n", __func__, i, err);
637 ret = err;
638 }
639 }
640 }
641 }
642
643 /* Only put MSA state if we're emulating a CPU with MSA */
644 if (env->CP0_Config3 & (1 << CP0C3_MSAP)) {
645 /* MSA Control Registers */
646 if (level == KVM_PUT_FULL_STATE) {
647 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_IR,
648 &env->msair);
649 if (err < 0) {
650 DPRINTF("%s: Failed to put MSA_IR (%d)\n", __func__, err);
651 ret = err;
652 }
653 }
654 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
655 &env->active_tc.msacsr);
656 if (err < 0) {
657 DPRINTF("%s: Failed to put MSA_CSR (%d)\n", __func__, err);
658 ret = err;
659 }
660
661 /* Vector registers (includes FP registers) */
662 for (i = 0; i < 32; ++i) {
663 /* Big endian MSA not supported by QEMU yet anyway */
664 err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
665 env->active_fpu.fpr[i].wr.d);
666 if (err < 0) {
667 DPRINTF("%s: Failed to put VEC%u (%d)\n", __func__, i, err);
668 ret = err;
669 }
670 }
671 }
672
673 return ret;
674 }
675
676 static int kvm_mips_get_fpu_registers(CPUState *cs)
677 {
678 MIPSCPU *cpu = MIPS_CPU(cs);
679 CPUMIPSState *env = &cpu->env;
680 int err, ret = 0;
681 unsigned int i;
682
683 /* Only get FPU state if we're emulating a CPU with an FPU */
684 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
685 /* FPU Control Registers */
686 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
687 &env->active_fpu.fcr0);
688 if (err < 0) {
689 DPRINTF("%s: Failed to get FCR_IR (%d)\n", __func__, err);
690 ret = err;
691 }
692 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
693 &env->active_fpu.fcr31);
694 if (err < 0) {
695 DPRINTF("%s: Failed to get FCR_CSR (%d)\n", __func__, err);
696 ret = err;
697 } else {
698 restore_fp_status(env);
699 }
700
701 /*
702 * FPU register state is a subset of MSA vector state, so don't save FPU
703 * registers if we're emulating a CPU with MSA.
704 */
705 if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) {
706 /* Floating point registers */
707 for (i = 0; i < 32; ++i) {
708 if (env->CP0_Status & (1 << CP0St_FR)) {
709 err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
710 &env->active_fpu.fpr[i].d);
711 } else {
712 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
713 &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
714 }
715 if (err < 0) {
716 DPRINTF("%s: Failed to get FPR%u (%d)\n", __func__, i, err);
717 ret = err;
718 }
719 }
720 }
721 }
722
723 /* Only get MSA state if we're emulating a CPU with MSA */
724 if (env->CP0_Config3 & (1 << CP0C3_MSAP)) {
725 /* MSA Control Registers */
726 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_IR,
727 &env->msair);
728 if (err < 0) {
729 DPRINTF("%s: Failed to get MSA_IR (%d)\n", __func__, err);
730 ret = err;
731 }
732 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
733 &env->active_tc.msacsr);
734 if (err < 0) {
735 DPRINTF("%s: Failed to get MSA_CSR (%d)\n", __func__, err);
736 ret = err;
737 } else {
738 restore_msa_fp_status(env);
739 }
740
741 /* Vector registers (includes FP registers) */
742 for (i = 0; i < 32; ++i) {
743 /* Big endian MSA not supported by QEMU yet anyway */
744 err = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
745 env->active_fpu.fpr[i].wr.d);
746 if (err < 0) {
747 DPRINTF("%s: Failed to get VEC%u (%d)\n", __func__, i, err);
748 ret = err;
749 }
750 }
751 }
752
753 return ret;
754 }
755
756
757 static int kvm_mips_put_cp0_registers(CPUState *cs, int level)
758 {
759 MIPSCPU *cpu = MIPS_CPU(cs);
760 CPUMIPSState *env = &cpu->env;
761 int err, ret = 0;
762
763 (void)level;
764
765 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index);
766 if (err < 0) {
767 DPRINTF("%s: Failed to put CP0_INDEX (%d)\n", __func__, err);
768 ret = err;
769 }
770 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_RANDOM, &env->CP0_Random);
771 if (err < 0) {
772 DPRINTF("%s: Failed to put CP0_RANDOM (%d)\n", __func__, err);
773 ret = err;
774 }
775 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT,
776 &env->CP0_Context);
777 if (err < 0) {
778 DPRINTF("%s: Failed to put CP0_CONTEXT (%d)\n", __func__, err);
779 ret = err;
780 }
781 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL,
782 &env->active_tc.CP0_UserLocal);
783 if (err < 0) {
784 DPRINTF("%s: Failed to put CP0_USERLOCAL (%d)\n", __func__, err);
785 ret = err;
786 }
787 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK,
788 &env->CP0_PageMask);
789 if (err < 0) {
790 DPRINTF("%s: Failed to put CP0_PAGEMASK (%d)\n", __func__, err);
791 ret = err;
792 }
793 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEGRAIN,
794 &env->CP0_PageGrain);
795 if (err < 0) {
796 DPRINTF("%s: Failed to put CP0_PAGEGRAIN (%d)\n", __func__, err);
797 ret = err;
798 }
799 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWBASE,
800 &env->CP0_PWBase);
801 if (err < 0) {
802 DPRINTF("%s: Failed to put CP0_PWBASE (%d)\n", __func__, err);
803 ret = err;
804 }
805 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWFIELD,
806 &env->CP0_PWField);
807 if (err < 0) {
808 DPRINTF("%s: Failed to put CP0_PWField (%d)\n", __func__, err);
809 ret = err;
810 }
811 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWSIZE,
812 &env->CP0_PWSize);
813 if (err < 0) {
814 DPRINTF("%s: Failed to put CP0_PWSIZE (%d)\n", __func__, err);
815 ret = err;
816 }
817 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired);
818 if (err < 0) {
819 DPRINTF("%s: Failed to put CP0_WIRED (%d)\n", __func__, err);
820 ret = err;
821 }
822 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PWCTL, &env->CP0_PWCtl);
823 if (err < 0) {
824 DPRINTF("%s: Failed to put CP0_PWCTL (%d)\n", __func__, err);
825 ret = err;
826 }
827 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna);
828 if (err < 0) {
829 DPRINTF("%s: Failed to put CP0_HWRENA (%d)\n", __func__, err);
830 ret = err;
831 }
832 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR,
833 &env->CP0_BadVAddr);
834 if (err < 0) {
835 DPRINTF("%s: Failed to put CP0_BADVADDR (%d)\n", __func__, err);
836 ret = err;
837 }
838
839 /* If VM clock stopped then state will be restored when it is restarted */
840 if (runstate_is_running()) {
841 err = kvm_mips_restore_count(cs);
842 if (err < 0) {
843 ret = err;
844 }
845 }
846
847 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI,
848 &env->CP0_EntryHi);
849 if (err < 0) {
850 DPRINTF("%s: Failed to put CP0_ENTRYHI (%d)\n", __func__, err);
851 ret = err;
852 }
853 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE,
854 &env->CP0_Compare);
855 if (err < 0) {
856 DPRINTF("%s: Failed to put CP0_COMPARE (%d)\n", __func__, err);
857 ret = err;
858 }
859 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status);
860 if (err < 0) {
861 DPRINTF("%s: Failed to put CP0_STATUS (%d)\n", __func__, err);
862 ret = err;
863 }
864 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC);
865 if (err < 0) {
866 DPRINTF("%s: Failed to put CP0_EPC (%d)\n", __func__, err);
867 ret = err;
868 }
869 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
870 if (err < 0) {
871 DPRINTF("%s: Failed to put CP0_PRID (%d)\n", __func__, err);
872 ret = err;
873 }
874 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EBASE, &env->CP0_EBase);
875 if (err < 0) {
876 DPRINTF("%s: Failed to put CP0_EBASE (%d)\n", __func__, err);
877 ret = err;
878 }
879 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG,
880 &env->CP0_Config0,
881 KVM_REG_MIPS_CP0_CONFIG_MASK);
882 if (err < 0) {
883 DPRINTF("%s: Failed to change CP0_CONFIG (%d)\n", __func__, err);
884 ret = err;
885 }
886 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1,
887 &env->CP0_Config1,
888 KVM_REG_MIPS_CP0_CONFIG1_MASK);
889 if (err < 0) {
890 DPRINTF("%s: Failed to change CP0_CONFIG1 (%d)\n", __func__, err);
891 ret = err;
892 }
893 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2,
894 &env->CP0_Config2,
895 KVM_REG_MIPS_CP0_CONFIG2_MASK);
896 if (err < 0) {
897 DPRINTF("%s: Failed to change CP0_CONFIG2 (%d)\n", __func__, err);
898 ret = err;
899 }
900 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3,
901 &env->CP0_Config3,
902 KVM_REG_MIPS_CP0_CONFIG3_MASK);
903 if (err < 0) {
904 DPRINTF("%s: Failed to change CP0_CONFIG3 (%d)\n", __func__, err);
905 ret = err;
906 }
907 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4,
908 &env->CP0_Config4,
909 KVM_REG_MIPS_CP0_CONFIG4_MASK);
910 if (err < 0) {
911 DPRINTF("%s: Failed to change CP0_CONFIG4 (%d)\n", __func__, err);
912 ret = err;
913 }
914 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5,
915 &env->CP0_Config5,
916 KVM_REG_MIPS_CP0_CONFIG5_MASK);
917 if (err < 0) {
918 DPRINTF("%s: Failed to change CP0_CONFIG5 (%d)\n", __func__, err);
919 ret = err;
920 }
921 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG6,
922 &env->CP0_Config6,
923 KVM_REG_MIPS_CP0_CONFIG6_MASK);
924 if (err < 0) {
925 DPRINTF("%s: Failed to change CP0_CONFIG6 (%d)\n", __func__, err);
926 ret = err;
927 }
928 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_XCONTEXT,
929 &env->CP0_XContext);
930 if (err < 0) {
931 DPRINTF("%s: Failed to put CP0_XCONTEXT (%d)\n", __func__, err);
932 ret = err;
933 }
934 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
935 &env->CP0_ErrorEPC);
936 if (err < 0) {
937 DPRINTF("%s: Failed to put CP0_ERROREPC (%d)\n", __func__, err);
938 ret = err;
939 }
940 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH1,
941 &env->CP0_KScratch[0]);
942 if (err < 0) {
943 DPRINTF("%s: Failed to put CP0_KSCRATCH1 (%d)\n", __func__, err);
944 ret = err;
945 }
946 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH2,
947 &env->CP0_KScratch[1]);
948 if (err < 0) {
949 DPRINTF("%s: Failed to put CP0_KSCRATCH2 (%d)\n", __func__, err);
950 ret = err;
951 }
952 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH3,
953 &env->CP0_KScratch[2]);
954 if (err < 0) {
955 DPRINTF("%s: Failed to put CP0_KSCRATCH3 (%d)\n", __func__, err);
956 ret = err;
957 }
958 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH4,
959 &env->CP0_KScratch[3]);
960 if (err < 0) {
961 DPRINTF("%s: Failed to put CP0_KSCRATCH4 (%d)\n", __func__, err);
962 ret = err;
963 }
964 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH5,
965 &env->CP0_KScratch[4]);
966 if (err < 0) {
967 DPRINTF("%s: Failed to put CP0_KSCRATCH5 (%d)\n", __func__, err);
968 ret = err;
969 }
970 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH6,
971 &env->CP0_KScratch[5]);
972 if (err < 0) {
973 DPRINTF("%s: Failed to put CP0_KSCRATCH6 (%d)\n", __func__, err);
974 ret = err;
975 }
976
977 return ret;
978 }
979
980 static int kvm_mips_get_cp0_registers(CPUState *cs)
981 {
982 MIPSCPU *cpu = MIPS_CPU(cs);
983 CPUMIPSState *env = &cpu->env;
984 int err, ret = 0;
985
986 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index);
987 if (err < 0) {
988 DPRINTF("%s: Failed to get CP0_INDEX (%d)\n", __func__, err);
989 ret = err;
990 }
991 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_RANDOM, &env->CP0_Random);
992 if (err < 0) {
993 DPRINTF("%s: Failed to get CP0_RANDOM (%d)\n", __func__, err);
994 ret = err;
995 }
996 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT,
997 &env->CP0_Context);
998 if (err < 0) {
999 DPRINTF("%s: Failed to get CP0_CONTEXT (%d)\n", __func__, err);
1000 ret = err;
1001 }
1002 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL,
1003 &env->active_tc.CP0_UserLocal);
1004 if (err < 0) {
1005 DPRINTF("%s: Failed to get CP0_USERLOCAL (%d)\n", __func__, err);
1006 ret = err;
1007 }
1008 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK,
1009 &env->CP0_PageMask);
1010 if (err < 0) {
1011 DPRINTF("%s: Failed to get CP0_PAGEMASK (%d)\n", __func__, err);
1012 ret = err;
1013 }
1014 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEGRAIN,
1015 &env->CP0_PageGrain);
1016 if (err < 0) {
1017 DPRINTF("%s: Failed to get CP0_PAGEGRAIN (%d)\n", __func__, err);
1018 ret = err;
1019 }
1020 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWBASE,
1021 &env->CP0_PWBase);
1022 if (err < 0) {
1023 DPRINTF("%s: Failed to get CP0_PWBASE (%d)\n", __func__, err);
1024 ret = err;
1025 }
1026 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWFIELD,
1027 &env->CP0_PWField);
1028 if (err < 0) {
1029 DPRINTF("%s: Failed to get CP0_PWFIELD (%d)\n", __func__, err);
1030 ret = err;
1031 }
1032 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWSIZE,
1033 &env->CP0_PWSize);
1034 if (err < 0) {
1035 DPRINTF("%s: Failed to get CP0_PWSIZE (%d)\n", __func__, err);
1036 ret = err;
1037 }
1038 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired);
1039 if (err < 0) {
1040 DPRINTF("%s: Failed to get CP0_WIRED (%d)\n", __func__, err);
1041 ret = err;
1042 }
1043 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PWCTL, &env->CP0_PWCtl);
1044 if (err < 0) {
1045 DPRINTF("%s: Failed to get CP0_PWCtl (%d)\n", __func__, err);
1046 ret = err;
1047 }
1048 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna);
1049 if (err < 0) {
1050 DPRINTF("%s: Failed to get CP0_HWRENA (%d)\n", __func__, err);
1051 ret = err;
1052 }
1053 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR,
1054 &env->CP0_BadVAddr);
1055 if (err < 0) {
1056 DPRINTF("%s: Failed to get CP0_BADVADDR (%d)\n", __func__, err);
1057 ret = err;
1058 }
1059 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI,
1060 &env->CP0_EntryHi);
1061 if (err < 0) {
1062 DPRINTF("%s: Failed to get CP0_ENTRYHI (%d)\n", __func__, err);
1063 ret = err;
1064 }
1065 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE,
1066 &env->CP0_Compare);
1067 if (err < 0) {
1068 DPRINTF("%s: Failed to get CP0_COMPARE (%d)\n", __func__, err);
1069 ret = err;
1070 }
1071 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status);
1072 if (err < 0) {
1073 DPRINTF("%s: Failed to get CP0_STATUS (%d)\n", __func__, err);
1074 ret = err;
1075 }
1076
1077 /* If VM clock stopped then state was already saved when it was stopped */
1078 if (runstate_is_running()) {
1079 err = kvm_mips_save_count(cs);
1080 if (err < 0) {
1081 ret = err;
1082 }
1083 }
1084
1085 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC);
1086 if (err < 0) {
1087 DPRINTF("%s: Failed to get CP0_EPC (%d)\n", __func__, err);
1088 ret = err;
1089 }
1090 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
1091 if (err < 0) {
1092 DPRINTF("%s: Failed to get CP0_PRID (%d)\n", __func__, err);
1093 ret = err;
1094 }
1095 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EBASE, &env->CP0_EBase);
1096 if (err < 0) {
1097 DPRINTF("%s: Failed to get CP0_EBASE (%d)\n", __func__, err);
1098 ret = err;
1099 }
1100 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, &env->CP0_Config0);
1101 if (err < 0) {
1102 DPRINTF("%s: Failed to get CP0_CONFIG (%d)\n", __func__, err);
1103 ret = err;
1104 }
1105 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, &env->CP0_Config1);
1106 if (err < 0) {
1107 DPRINTF("%s: Failed to get CP0_CONFIG1 (%d)\n", __func__, err);
1108 ret = err;
1109 }
1110 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, &env->CP0_Config2);
1111 if (err < 0) {
1112 DPRINTF("%s: Failed to get CP0_CONFIG2 (%d)\n", __func__, err);
1113 ret = err;
1114 }
1115 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, &env->CP0_Config3);
1116 if (err < 0) {
1117 DPRINTF("%s: Failed to get CP0_CONFIG3 (%d)\n", __func__, err);
1118 ret = err;
1119 }
1120 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, &env->CP0_Config4);
1121 if (err < 0) {
1122 DPRINTF("%s: Failed to get CP0_CONFIG4 (%d)\n", __func__, err);
1123 ret = err;
1124 }
1125 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, &env->CP0_Config5);
1126 if (err < 0) {
1127 DPRINTF("%s: Failed to get CP0_CONFIG5 (%d)\n", __func__, err);
1128 ret = err;
1129 }
1130 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG6, &env->CP0_Config6);
1131 if (err < 0) {
1132 DPRINTF("%s: Failed to get CP0_CONFIG6 (%d)\n", __func__, err);
1133 ret = err;
1134 }
1135 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_XCONTEXT,
1136 &env->CP0_XContext);
1137 if (err < 0) {
1138 DPRINTF("%s: Failed to get CP0_XCONTEXT (%d)\n", __func__, err);
1139 ret = err;
1140 }
1141 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
1142 &env->CP0_ErrorEPC);
1143 if (err < 0) {
1144 DPRINTF("%s: Failed to get CP0_ERROREPC (%d)\n", __func__, err);
1145 ret = err;
1146 }
1147 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH1,
1148 &env->CP0_KScratch[0]);
1149 if (err < 0) {
1150 DPRINTF("%s: Failed to get CP0_KSCRATCH1 (%d)\n", __func__, err);
1151 ret = err;
1152 }
1153 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH2,
1154 &env->CP0_KScratch[1]);
1155 if (err < 0) {
1156 DPRINTF("%s: Failed to get CP0_KSCRATCH2 (%d)\n", __func__, err);
1157 ret = err;
1158 }
1159 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH3,
1160 &env->CP0_KScratch[2]);
1161 if (err < 0) {
1162 DPRINTF("%s: Failed to get CP0_KSCRATCH3 (%d)\n", __func__, err);
1163 ret = err;
1164 }
1165 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH4,
1166 &env->CP0_KScratch[3]);
1167 if (err < 0) {
1168 DPRINTF("%s: Failed to get CP0_KSCRATCH4 (%d)\n", __func__, err);
1169 ret = err;
1170 }
1171 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH5,
1172 &env->CP0_KScratch[4]);
1173 if (err < 0) {
1174 DPRINTF("%s: Failed to get CP0_KSCRATCH5 (%d)\n", __func__, err);
1175 ret = err;
1176 }
1177 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH6,
1178 &env->CP0_KScratch[5]);
1179 if (err < 0) {
1180 DPRINTF("%s: Failed to get CP0_KSCRATCH6 (%d)\n", __func__, err);
1181 ret = err;
1182 }
1183
1184 return ret;
1185 }
1186
1187 int kvm_arch_put_registers(CPUState *cs, int level)
1188 {
1189 MIPSCPU *cpu = MIPS_CPU(cs);
1190 CPUMIPSState *env = &cpu->env;
1191 struct kvm_regs regs;
1192 int ret;
1193 int i;
1194
1195 /* Set the registers based on QEMU's view of things */
1196 for (i = 0; i < 32; i++) {
1197 regs.gpr[i] = (int64_t)(target_long)env->active_tc.gpr[i];
1198 }
1199
1200 regs.hi = (int64_t)(target_long)env->active_tc.HI[0];
1201 regs.lo = (int64_t)(target_long)env->active_tc.LO[0];
1202 regs.pc = (int64_t)(target_long)env->active_tc.PC;
1203
1204 ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
1205
1206 if (ret < 0) {
1207 return ret;
1208 }
1209
1210 ret = kvm_mips_put_cp0_registers(cs, level);
1211 if (ret < 0) {
1212 return ret;
1213 }
1214
1215 ret = kvm_mips_put_fpu_registers(cs, level);
1216 if (ret < 0) {
1217 return ret;
1218 }
1219
1220 return ret;
1221 }
1222
1223 int kvm_arch_get_registers(CPUState *cs)
1224 {
1225 MIPSCPU *cpu = MIPS_CPU(cs);
1226 CPUMIPSState *env = &cpu->env;
1227 int ret = 0;
1228 struct kvm_regs regs;
1229 int i;
1230
1231 /* Get the current register set as KVM seems it */
1232 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
1233
1234 if (ret < 0) {
1235 return ret;
1236 }
1237
1238 for (i = 0; i < 32; i++) {
1239 env->active_tc.gpr[i] = regs.gpr[i];
1240 }
1241
1242 env->active_tc.HI[0] = regs.hi;
1243 env->active_tc.LO[0] = regs.lo;
1244 env->active_tc.PC = regs.pc;
1245
1246 kvm_mips_get_cp0_registers(cs);
1247 kvm_mips_get_fpu_registers(cs);
1248
1249 return ret;
1250 }
1251
1252 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
1253 uint64_t address, uint32_t data, PCIDevice *dev)
1254 {
1255 return 0;
1256 }
1257
1258 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
1259 int vector, PCIDevice *dev)
1260 {
1261 return 0;
1262 }
1263
1264 int kvm_arch_release_virq_post(int virq)
1265 {
1266 return 0;
1267 }
1268
1269 int kvm_arch_msi_data_to_gsi(uint32_t data)
1270 {
1271 abort();
1272 }