]> git.proxmox.com Git - mirror_qemu.git/blame - target-mips/kvm.c
acpi: do not use TARGET_PAGE_SIZE
[mirror_qemu.git] / target-mips / kvm.c
CommitLineData
e2132e0b
SL
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: MIPS specific KVM APIs
7 *
8 * Copyright (C) 2012-2014 Imagination Technologies Ltd.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
c684822a 12#include "qemu/osdep.h"
e2132e0b
SL
13#include <sys/ioctl.h>
14#include <sys/mman.h>
15
16#include <linux/kvm.h>
17
18#include "qemu-common.h"
19#include "qemu/error-report.h"
20#include "qemu/timer.h"
21#include "sysemu/sysemu.h"
22#include "sysemu/kvm.h"
23#include "cpu.h"
24#include "sysemu/cpus.h"
25#include "kvm_mips.h"
4c663752 26#include "exec/memattrs.h"
e2132e0b
SL
27
28#define DEBUG_KVM 0
29
30#define DPRINTF(fmt, ...) \
31 do { if (DEBUG_KVM) { fprintf(stderr, fmt, ## __VA_ARGS__); } } while (0)
32
152db36a 33static int kvm_mips_fpu_cap;
bee62662 34static int kvm_mips_msa_cap;
152db36a 35
e2132e0b
SL
36const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
37 KVM_CAP_LAST_INFO
38};
39
40static void kvm_mips_update_state(void *opaque, int running, RunState state);
41
42unsigned long kvm_arch_vcpu_id(CPUState *cs)
43{
44 return cs->cpu_index;
45}
46
b16565b3 47int kvm_arch_init(MachineState *ms, KVMState *s)
e2132e0b
SL
48{
49 /* MIPS has 128 signals */
50 kvm_set_sigmask_len(s, 16);
51
152db36a 52 kvm_mips_fpu_cap = kvm_check_extension(s, KVM_CAP_MIPS_FPU);
bee62662 53 kvm_mips_msa_cap = kvm_check_extension(s, KVM_CAP_MIPS_MSA);
152db36a 54
e2132e0b
SL
55 DPRINTF("%s\n", __func__);
56 return 0;
57}
58
59int kvm_arch_init_vcpu(CPUState *cs)
60{
152db36a
JH
61 MIPSCPU *cpu = MIPS_CPU(cs);
62 CPUMIPSState *env = &cpu->env;
e2132e0b
SL
63 int ret = 0;
64
65 qemu_add_vm_change_state_handler(kvm_mips_update_state, cs);
66
152db36a
JH
67 if (kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
68 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_FPU, 0, 0);
69 if (ret < 0) {
70 /* mark unsupported so it gets disabled on reset */
71 kvm_mips_fpu_cap = 0;
72 ret = 0;
73 }
74 }
75
bee62662
JH
76 if (kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) {
77 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_MSA, 0, 0);
78 if (ret < 0) {
79 /* mark unsupported so it gets disabled on reset */
80 kvm_mips_msa_cap = 0;
81 ret = 0;
82 }
83 }
84
e2132e0b
SL
85 DPRINTF("%s\n", __func__);
86 return ret;
87}
88
89void kvm_mips_reset_vcpu(MIPSCPU *cpu)
90{
0e928b12
JH
91 CPUMIPSState *env = &cpu->env;
92
152db36a
JH
93 if (!kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
94 fprintf(stderr, "Warning: KVM does not support FPU, disabling\n");
0e928b12
JH
95 env->CP0_Config1 &= ~(1 << CP0C1_FP);
96 }
bee62662
JH
97 if (!kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) {
98 fprintf(stderr, "Warning: KVM does not support MSA, disabling\n");
99 env->CP0_Config3 &= ~(1 << CP0C3_MSAP);
100 }
0e928b12 101
e2132e0b
SL
102 DPRINTF("%s\n", __func__);
103}
104
105int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
106{
107 DPRINTF("%s\n", __func__);
108 return 0;
109}
110
111int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
112{
113 DPRINTF("%s\n", __func__);
114 return 0;
115}
116
117static inline int cpu_mips_io_interrupts_pending(MIPSCPU *cpu)
118{
119 CPUMIPSState *env = &cpu->env;
120
e2132e0b
SL
121 return env->CP0_Cause & (0x1 << (2 + CP0Ca_IP));
122}
123
124
125void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
126{
127 MIPSCPU *cpu = MIPS_CPU(cs);
128 int r;
129 struct kvm_mips_interrupt intr;
130
4b8523ee
JK
131 qemu_mutex_lock_iothread();
132
e2132e0b
SL
133 if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
134 cpu_mips_io_interrupts_pending(cpu)) {
135 intr.cpu = -1;
136 intr.irq = 2;
137 r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
138 if (r < 0) {
139 error_report("%s: cpu %d: failed to inject IRQ %x",
140 __func__, cs->cpu_index, intr.irq);
141 }
142 }
4b8523ee
JK
143
144 qemu_mutex_unlock_iothread();
e2132e0b
SL
145}
146
4c663752 147MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
e2132e0b 148{
4c663752 149 return MEMTXATTRS_UNSPECIFIED;
e2132e0b
SL
150}
151
152int kvm_arch_process_async_events(CPUState *cs)
153{
154 return cs->halted;
155}
156
157int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
158{
159 int ret;
160
161 DPRINTF("%s\n", __func__);
162 switch (run->exit_reason) {
163 default:
164 error_report("%s: unknown exit reason %d",
165 __func__, run->exit_reason);
166 ret = -1;
167 break;
168 }
169
170 return ret;
171}
172
173bool kvm_arch_stop_on_emulation_error(CPUState *cs)
174{
175 DPRINTF("%s\n", __func__);
176 return true;
177}
178
179int kvm_arch_on_sigbus_vcpu(CPUState *cs, int code, void *addr)
180{
181 DPRINTF("%s\n", __func__);
182 return 1;
183}
184
185int kvm_arch_on_sigbus(int code, void *addr)
186{
187 DPRINTF("%s\n", __func__);
188 return 1;
189}
190
191void kvm_arch_init_irq_routing(KVMState *s)
192{
193}
194
195int kvm_mips_set_interrupt(MIPSCPU *cpu, int irq, int level)
196{
197 CPUState *cs = CPU(cpu);
198 struct kvm_mips_interrupt intr;
199
200 if (!kvm_enabled()) {
201 return 0;
202 }
203
204 intr.cpu = -1;
205
206 if (level) {
207 intr.irq = irq;
208 } else {
209 intr.irq = -irq;
210 }
211
212 kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
213
214 return 0;
215}
216
217int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level)
218{
219 CPUState *cs = current_cpu;
220 CPUState *dest_cs = CPU(cpu);
221 struct kvm_mips_interrupt intr;
222
223 if (!kvm_enabled()) {
224 return 0;
225 }
226
227 intr.cpu = dest_cs->cpu_index;
228
229 if (level) {
230 intr.irq = irq;
231 } else {
232 intr.irq = -irq;
233 }
234
235 DPRINTF("%s: CPU %d, IRQ: %d\n", __func__, intr.cpu, intr.irq);
236
237 kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
238
239 return 0;
240}
241
242#define MIPS_CP0_32(_R, _S) \
5a2db896 243 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
e2132e0b
SL
244
245#define MIPS_CP0_64(_R, _S) \
5a2db896 246 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
e2132e0b
SL
247
248#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
249#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
250#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
251#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
252#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
253#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
254#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
255#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
256#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
257#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
258#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
259#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
260#define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
461a1582 261#define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
03cbfd7b
JH
262#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
263#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
264#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
265#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
266#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
267#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
e2132e0b
SL
268#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
269
e2132e0b
SL
270static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id,
271 int32_t *addr)
272{
e2132e0b
SL
273 struct kvm_one_reg cp0reg = {
274 .id = reg_id,
f8b3e48b 275 .addr = (uintptr_t)addr
e2132e0b
SL
276 };
277
278 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
279}
280
0759487b
JH
281static inline int kvm_mips_put_one_ureg(CPUState *cs, uint64_t reg_id,
282 uint32_t *addr)
283{
284 struct kvm_one_reg cp0reg = {
285 .id = reg_id,
286 .addr = (uintptr_t)addr
287 };
288
289 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
290}
291
e2132e0b
SL
292static inline int kvm_mips_put_one_ulreg(CPUState *cs, uint64_t reg_id,
293 target_ulong *addr)
294{
295 uint64_t val64 = *addr;
296 struct kvm_one_reg cp0reg = {
297 .id = reg_id,
298 .addr = (uintptr_t)&val64
299 };
300
301 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
302}
303
304static inline int kvm_mips_put_one_reg64(CPUState *cs, uint64_t reg_id,
d319f83f
JH
305 int64_t *addr)
306{
307 struct kvm_one_reg cp0reg = {
308 .id = reg_id,
309 .addr = (uintptr_t)addr
310 };
311
312 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
313}
314
315static inline int kvm_mips_put_one_ureg64(CPUState *cs, uint64_t reg_id,
316 uint64_t *addr)
e2132e0b
SL
317{
318 struct kvm_one_reg cp0reg = {
319 .id = reg_id,
320 .addr = (uintptr_t)addr
321 };
322
323 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
324}
325
326static inline int kvm_mips_get_one_reg(CPUState *cs, uint64_t reg_id,
327 int32_t *addr)
328{
e2132e0b
SL
329 struct kvm_one_reg cp0reg = {
330 .id = reg_id,
f8b3e48b 331 .addr = (uintptr_t)addr
e2132e0b
SL
332 };
333
f8b3e48b 334 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
e2132e0b
SL
335}
336
0759487b
JH
337static inline int kvm_mips_get_one_ureg(CPUState *cs, uint64_t reg_id,
338 uint32_t *addr)
339{
340 struct kvm_one_reg cp0reg = {
341 .id = reg_id,
342 .addr = (uintptr_t)addr
343 };
344
345 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
346}
347
182f42fd 348static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64_t reg_id,
e2132e0b
SL
349 target_ulong *addr)
350{
351 int ret;
352 uint64_t val64 = 0;
353 struct kvm_one_reg cp0reg = {
354 .id = reg_id,
355 .addr = (uintptr_t)&val64
356 };
357
358 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
359 if (ret >= 0) {
360 *addr = val64;
361 }
362 return ret;
363}
364
182f42fd 365static inline int kvm_mips_get_one_reg64(CPUState *cs, uint64_t reg_id,
d319f83f
JH
366 int64_t *addr)
367{
368 struct kvm_one_reg cp0reg = {
369 .id = reg_id,
370 .addr = (uintptr_t)addr
371 };
372
373 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
374}
375
376static inline int kvm_mips_get_one_ureg64(CPUState *cs, uint64_t reg_id,
377 uint64_t *addr)
e2132e0b
SL
378{
379 struct kvm_one_reg cp0reg = {
380 .id = reg_id,
381 .addr = (uintptr_t)addr
382 };
383
384 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
385}
386
03cbfd7b 387#define KVM_REG_MIPS_CP0_CONFIG_MASK (1U << CP0C0_M)
152db36a
JH
388#define KVM_REG_MIPS_CP0_CONFIG1_MASK ((1U << CP0C1_M) | \
389 (1U << CP0C1_FP))
03cbfd7b 390#define KVM_REG_MIPS_CP0_CONFIG2_MASK (1U << CP0C2_M)
bee62662
JH
391#define KVM_REG_MIPS_CP0_CONFIG3_MASK ((1U << CP0C3_M) | \
392 (1U << CP0C3_MSAP))
03cbfd7b 393#define KVM_REG_MIPS_CP0_CONFIG4_MASK (1U << CP0C4_M)
bee62662
JH
394#define KVM_REG_MIPS_CP0_CONFIG5_MASK ((1U << CP0C5_MSAEn) | \
395 (1U << CP0C5_UFE) | \
152db36a
JH
396 (1U << CP0C5_FRE) | \
397 (1U << CP0C5_UFR))
03cbfd7b
JH
398
399static inline int kvm_mips_change_one_reg(CPUState *cs, uint64_t reg_id,
400 int32_t *addr, int32_t mask)
401{
402 int err;
403 int32_t tmp, change;
404
405 err = kvm_mips_get_one_reg(cs, reg_id, &tmp);
406 if (err < 0) {
407 return err;
408 }
409
410 /* only change bits in mask */
411 change = (*addr ^ tmp) & mask;
412 if (!change) {
413 return 0;
414 }
415
416 tmp = tmp ^ change;
417 return kvm_mips_put_one_reg(cs, reg_id, &tmp);
418}
419
e2132e0b
SL
420/*
421 * We freeze the KVM timer when either the VM clock is stopped or the state is
422 * saved (the state is dirty).
423 */
424
425/*
426 * Save the state of the KVM timer when VM clock is stopped or state is synced
427 * to QEMU.
428 */
429static int kvm_mips_save_count(CPUState *cs)
430{
431 MIPSCPU *cpu = MIPS_CPU(cs);
432 CPUMIPSState *env = &cpu->env;
433 uint64_t count_ctl;
434 int err, ret = 0;
435
436 /* freeze KVM timer */
d319f83f 437 err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
e2132e0b
SL
438 if (err < 0) {
439 DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err);
440 ret = err;
441 } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
442 count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
d319f83f 443 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
e2132e0b
SL
444 if (err < 0) {
445 DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
446 ret = err;
447 }
448 }
449
450 /* read CP0_Cause */
451 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause);
452 if (err < 0) {
453 DPRINTF("%s: Failed to get CP0_CAUSE (%d)\n", __func__, err);
454 ret = err;
455 }
456
457 /* read CP0_Count */
458 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count);
459 if (err < 0) {
460 DPRINTF("%s: Failed to get CP0_COUNT (%d)\n", __func__, err);
461 ret = err;
462 }
463
464 return ret;
465}
466
467/*
468 * Restore the state of the KVM timer when VM clock is restarted or state is
469 * synced to KVM.
470 */
471static int kvm_mips_restore_count(CPUState *cs)
472{
473 MIPSCPU *cpu = MIPS_CPU(cs);
474 CPUMIPSState *env = &cpu->env;
475 uint64_t count_ctl;
476 int err_dc, err, ret = 0;
477
478 /* check the timer is frozen */
d319f83f 479 err_dc = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
e2132e0b
SL
480 if (err_dc < 0) {
481 DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err_dc);
482 ret = err_dc;
483 } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
484 /* freeze timer (sets COUNT_RESUME for us) */
485 count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
d319f83f 486 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
e2132e0b
SL
487 if (err < 0) {
488 DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
489 ret = err;
490 }
491 }
492
493 /* load CP0_Cause */
494 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause);
495 if (err < 0) {
496 DPRINTF("%s: Failed to put CP0_CAUSE (%d)\n", __func__, err);
497 ret = err;
498 }
499
500 /* load CP0_Count */
501 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count);
502 if (err < 0) {
503 DPRINTF("%s: Failed to put CP0_COUNT (%d)\n", __func__, err);
504 ret = err;
505 }
506
507 /* resume KVM timer */
508 if (err_dc >= 0) {
509 count_ctl &= ~KVM_REG_MIPS_COUNT_CTL_DC;
d319f83f 510 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
e2132e0b
SL
511 if (err < 0) {
512 DPRINTF("%s: Failed to set COUNT_CTL.DC=0 (%d)\n", __func__, err);
513 ret = err;
514 }
515 }
516
517 return ret;
518}
519
520/*
521 * Handle the VM clock being started or stopped
522 */
523static void kvm_mips_update_state(void *opaque, int running, RunState state)
524{
525 CPUState *cs = opaque;
526 int ret;
527 uint64_t count_resume;
528
529 /*
530 * If state is already dirty (synced to QEMU) then the KVM timer state is
531 * already saved and can be restored when it is synced back to KVM.
532 */
533 if (!running) {
534 if (!cs->kvm_vcpu_dirty) {
535 ret = kvm_mips_save_count(cs);
536 if (ret < 0) {
537 fprintf(stderr, "Failed saving count\n");
538 }
539 }
540 } else {
541 /* Set clock restore time to now */
906b53a2 542 count_resume = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
d319f83f
JH
543 ret = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_RESUME,
544 &count_resume);
e2132e0b
SL
545 if (ret < 0) {
546 fprintf(stderr, "Failed setting COUNT_RESUME\n");
547 return;
548 }
549
550 if (!cs->kvm_vcpu_dirty) {
551 ret = kvm_mips_restore_count(cs);
552 if (ret < 0) {
553 fprintf(stderr, "Failed restoring count\n");
554 }
555 }
556 }
557}
558
152db36a
JH
559static int kvm_mips_put_fpu_registers(CPUState *cs, int level)
560{
561 MIPSCPU *cpu = MIPS_CPU(cs);
562 CPUMIPSState *env = &cpu->env;
563 int err, ret = 0;
564 unsigned int i;
565
566 /* Only put FPU state if we're emulating a CPU with an FPU */
567 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
568 /* FPU Control Registers */
569 if (level == KVM_PUT_FULL_STATE) {
570 err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
571 &env->active_fpu.fcr0);
572 if (err < 0) {
573 DPRINTF("%s: Failed to put FCR_IR (%d)\n", __func__, err);
574 ret = err;
575 }
576 }
577 err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
578 &env->active_fpu.fcr31);
579 if (err < 0) {
580 DPRINTF("%s: Failed to put FCR_CSR (%d)\n", __func__, err);
581 ret = err;
582 }
583
bee62662
JH
584 /*
585 * FPU register state is a subset of MSA vector state, so don't put FPU
586 * registers if we're emulating a CPU with MSA.
587 */
588 if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) {
589 /* Floating point registers */
590 for (i = 0; i < 32; ++i) {
591 if (env->CP0_Status & (1 << CP0St_FR)) {
592 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
593 &env->active_fpu.fpr[i].d);
594 } else {
595 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
596 &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
597 }
598 if (err < 0) {
599 DPRINTF("%s: Failed to put FPR%u (%d)\n", __func__, i, err);
600 ret = err;
601 }
152db36a 602 }
bee62662
JH
603 }
604 }
605
606 /* Only put MSA state if we're emulating a CPU with MSA */
607 if (env->CP0_Config3 & (1 << CP0C3_MSAP)) {
608 /* MSA Control Registers */
609 if (level == KVM_PUT_FULL_STATE) {
610 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_IR,
611 &env->msair);
612 if (err < 0) {
613 DPRINTF("%s: Failed to put MSA_IR (%d)\n", __func__, err);
614 ret = err;
615 }
616 }
617 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
618 &env->active_tc.msacsr);
619 if (err < 0) {
620 DPRINTF("%s: Failed to put MSA_CSR (%d)\n", __func__, err);
621 ret = err;
622 }
623
624 /* Vector registers (includes FP registers) */
625 for (i = 0; i < 32; ++i) {
626 /* Big endian MSA not supported by QEMU yet anyway */
627 err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
628 env->active_fpu.fpr[i].wr.d);
152db36a 629 if (err < 0) {
bee62662 630 DPRINTF("%s: Failed to put VEC%u (%d)\n", __func__, i, err);
152db36a
JH
631 ret = err;
632 }
633 }
634 }
635
636 return ret;
637}
638
639static int kvm_mips_get_fpu_registers(CPUState *cs)
640{
641 MIPSCPU *cpu = MIPS_CPU(cs);
642 CPUMIPSState *env = &cpu->env;
643 int err, ret = 0;
644 unsigned int i;
645
646 /* Only get FPU state if we're emulating a CPU with an FPU */
647 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
648 /* FPU Control Registers */
649 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
650 &env->active_fpu.fcr0);
651 if (err < 0) {
652 DPRINTF("%s: Failed to get FCR_IR (%d)\n", __func__, err);
653 ret = err;
654 }
655 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
656 &env->active_fpu.fcr31);
657 if (err < 0) {
658 DPRINTF("%s: Failed to get FCR_CSR (%d)\n", __func__, err);
659 ret = err;
660 } else {
661 restore_fp_status(env);
662 }
663
bee62662
JH
664 /*
665 * FPU register state is a subset of MSA vector state, so don't save FPU
666 * registers if we're emulating a CPU with MSA.
667 */
668 if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) {
669 /* Floating point registers */
670 for (i = 0; i < 32; ++i) {
671 if (env->CP0_Status & (1 << CP0St_FR)) {
672 err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
673 &env->active_fpu.fpr[i].d);
674 } else {
675 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
676 &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
677 }
678 if (err < 0) {
679 DPRINTF("%s: Failed to get FPR%u (%d)\n", __func__, i, err);
680 ret = err;
681 }
152db36a 682 }
bee62662
JH
683 }
684 }
685
686 /* Only get MSA state if we're emulating a CPU with MSA */
687 if (env->CP0_Config3 & (1 << CP0C3_MSAP)) {
688 /* MSA Control Registers */
689 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_IR,
690 &env->msair);
691 if (err < 0) {
692 DPRINTF("%s: Failed to get MSA_IR (%d)\n", __func__, err);
693 ret = err;
694 }
695 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
696 &env->active_tc.msacsr);
697 if (err < 0) {
698 DPRINTF("%s: Failed to get MSA_CSR (%d)\n", __func__, err);
699 ret = err;
700 } else {
701 restore_msa_fp_status(env);
702 }
703
704 /* Vector registers (includes FP registers) */
705 for (i = 0; i < 32; ++i) {
706 /* Big endian MSA not supported by QEMU yet anyway */
707 err = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
708 env->active_fpu.fpr[i].wr.d);
152db36a 709 if (err < 0) {
bee62662 710 DPRINTF("%s: Failed to get VEC%u (%d)\n", __func__, i, err);
152db36a
JH
711 ret = err;
712 }
713 }
714 }
715
716 return ret;
717}
718
719
e2132e0b
SL
720static int kvm_mips_put_cp0_registers(CPUState *cs, int level)
721{
722 MIPSCPU *cpu = MIPS_CPU(cs);
723 CPUMIPSState *env = &cpu->env;
724 int err, ret = 0;
725
726 (void)level;
727
728 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index);
729 if (err < 0) {
730 DPRINTF("%s: Failed to put CP0_INDEX (%d)\n", __func__, err);
731 ret = err;
732 }
733 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT,
734 &env->CP0_Context);
735 if (err < 0) {
736 DPRINTF("%s: Failed to put CP0_CONTEXT (%d)\n", __func__, err);
737 ret = err;
738 }
739 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL,
740 &env->active_tc.CP0_UserLocal);
741 if (err < 0) {
742 DPRINTF("%s: Failed to put CP0_USERLOCAL (%d)\n", __func__, err);
743 ret = err;
744 }
745 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK,
746 &env->CP0_PageMask);
747 if (err < 0) {
748 DPRINTF("%s: Failed to put CP0_PAGEMASK (%d)\n", __func__, err);
749 ret = err;
750 }
751 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired);
752 if (err < 0) {
753 DPRINTF("%s: Failed to put CP0_WIRED (%d)\n", __func__, err);
754 ret = err;
755 }
756 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna);
757 if (err < 0) {
758 DPRINTF("%s: Failed to put CP0_HWRENA (%d)\n", __func__, err);
759 ret = err;
760 }
761 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR,
762 &env->CP0_BadVAddr);
763 if (err < 0) {
764 DPRINTF("%s: Failed to put CP0_BADVADDR (%d)\n", __func__, err);
765 ret = err;
766 }
767
768 /* If VM clock stopped then state will be restored when it is restarted */
769 if (runstate_is_running()) {
770 err = kvm_mips_restore_count(cs);
771 if (err < 0) {
772 ret = err;
773 }
774 }
775
776 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI,
777 &env->CP0_EntryHi);
778 if (err < 0) {
779 DPRINTF("%s: Failed to put CP0_ENTRYHI (%d)\n", __func__, err);
780 ret = err;
781 }
782 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE,
783 &env->CP0_Compare);
784 if (err < 0) {
785 DPRINTF("%s: Failed to put CP0_COMPARE (%d)\n", __func__, err);
786 ret = err;
787 }
788 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status);
789 if (err < 0) {
790 DPRINTF("%s: Failed to put CP0_STATUS (%d)\n", __func__, err);
791 ret = err;
792 }
793 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC);
794 if (err < 0) {
795 DPRINTF("%s: Failed to put CP0_EPC (%d)\n", __func__, err);
796 ret = err;
797 }
461a1582
JH
798 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
799 if (err < 0) {
800 DPRINTF("%s: Failed to put CP0_PRID (%d)\n", __func__, err);
801 ret = err;
802 }
03cbfd7b
JH
803 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG,
804 &env->CP0_Config0,
805 KVM_REG_MIPS_CP0_CONFIG_MASK);
806 if (err < 0) {
807 DPRINTF("%s: Failed to change CP0_CONFIG (%d)\n", __func__, err);
808 ret = err;
809 }
810 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1,
811 &env->CP0_Config1,
812 KVM_REG_MIPS_CP0_CONFIG1_MASK);
813 if (err < 0) {
814 DPRINTF("%s: Failed to change CP0_CONFIG1 (%d)\n", __func__, err);
815 ret = err;
816 }
817 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2,
818 &env->CP0_Config2,
819 KVM_REG_MIPS_CP0_CONFIG2_MASK);
820 if (err < 0) {
821 DPRINTF("%s: Failed to change CP0_CONFIG2 (%d)\n", __func__, err);
822 ret = err;
823 }
824 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3,
825 &env->CP0_Config3,
826 KVM_REG_MIPS_CP0_CONFIG3_MASK);
827 if (err < 0) {
828 DPRINTF("%s: Failed to change CP0_CONFIG3 (%d)\n", __func__, err);
829 ret = err;
830 }
831 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4,
832 &env->CP0_Config4,
833 KVM_REG_MIPS_CP0_CONFIG4_MASK);
834 if (err < 0) {
835 DPRINTF("%s: Failed to change CP0_CONFIG4 (%d)\n", __func__, err);
836 ret = err;
837 }
838 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5,
839 &env->CP0_Config5,
840 KVM_REG_MIPS_CP0_CONFIG5_MASK);
841 if (err < 0) {
842 DPRINTF("%s: Failed to change CP0_CONFIG5 (%d)\n", __func__, err);
843 ret = err;
844 }
e2132e0b
SL
845 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
846 &env->CP0_ErrorEPC);
847 if (err < 0) {
848 DPRINTF("%s: Failed to put CP0_ERROREPC (%d)\n", __func__, err);
849 ret = err;
850 }
851
852 return ret;
853}
854
855static int kvm_mips_get_cp0_registers(CPUState *cs)
856{
857 MIPSCPU *cpu = MIPS_CPU(cs);
858 CPUMIPSState *env = &cpu->env;
859 int err, ret = 0;
860
861 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index);
862 if (err < 0) {
863 DPRINTF("%s: Failed to get CP0_INDEX (%d)\n", __func__, err);
864 ret = err;
865 }
866 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT,
867 &env->CP0_Context);
868 if (err < 0) {
869 DPRINTF("%s: Failed to get CP0_CONTEXT (%d)\n", __func__, err);
870 ret = err;
871 }
872 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL,
873 &env->active_tc.CP0_UserLocal);
874 if (err < 0) {
875 DPRINTF("%s: Failed to get CP0_USERLOCAL (%d)\n", __func__, err);
876 ret = err;
877 }
878 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK,
879 &env->CP0_PageMask);
880 if (err < 0) {
881 DPRINTF("%s: Failed to get CP0_PAGEMASK (%d)\n", __func__, err);
882 ret = err;
883 }
884 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired);
885 if (err < 0) {
886 DPRINTF("%s: Failed to get CP0_WIRED (%d)\n", __func__, err);
887 ret = err;
888 }
889 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna);
890 if (err < 0) {
891 DPRINTF("%s: Failed to get CP0_HWRENA (%d)\n", __func__, err);
892 ret = err;
893 }
894 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR,
895 &env->CP0_BadVAddr);
896 if (err < 0) {
897 DPRINTF("%s: Failed to get CP0_BADVADDR (%d)\n", __func__, err);
898 ret = err;
899 }
900 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI,
901 &env->CP0_EntryHi);
902 if (err < 0) {
903 DPRINTF("%s: Failed to get CP0_ENTRYHI (%d)\n", __func__, err);
904 ret = err;
905 }
906 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE,
907 &env->CP0_Compare);
908 if (err < 0) {
909 DPRINTF("%s: Failed to get CP0_COMPARE (%d)\n", __func__, err);
910 ret = err;
911 }
912 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status);
913 if (err < 0) {
914 DPRINTF("%s: Failed to get CP0_STATUS (%d)\n", __func__, err);
915 ret = err;
916 }
917
918 /* If VM clock stopped then state was already saved when it was stopped */
919 if (runstate_is_running()) {
920 err = kvm_mips_save_count(cs);
921 if (err < 0) {
922 ret = err;
923 }
924 }
925
926 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC);
927 if (err < 0) {
928 DPRINTF("%s: Failed to get CP0_EPC (%d)\n", __func__, err);
929 ret = err;
930 }
461a1582
JH
931 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
932 if (err < 0) {
933 DPRINTF("%s: Failed to get CP0_PRID (%d)\n", __func__, err);
934 ret = err;
935 }
03cbfd7b
JH
936 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, &env->CP0_Config0);
937 if (err < 0) {
938 DPRINTF("%s: Failed to get CP0_CONFIG (%d)\n", __func__, err);
939 ret = err;
940 }
941 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, &env->CP0_Config1);
942 if (err < 0) {
943 DPRINTF("%s: Failed to get CP0_CONFIG1 (%d)\n", __func__, err);
944 ret = err;
945 }
946 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, &env->CP0_Config2);
947 if (err < 0) {
948 DPRINTF("%s: Failed to get CP0_CONFIG2 (%d)\n", __func__, err);
949 ret = err;
950 }
951 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, &env->CP0_Config3);
952 if (err < 0) {
953 DPRINTF("%s: Failed to get CP0_CONFIG3 (%d)\n", __func__, err);
954 ret = err;
955 }
956 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, &env->CP0_Config4);
957 if (err < 0) {
958 DPRINTF("%s: Failed to get CP0_CONFIG4 (%d)\n", __func__, err);
959 ret = err;
960 }
961 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, &env->CP0_Config5);
962 if (err < 0) {
963 DPRINTF("%s: Failed to get CP0_CONFIG5 (%d)\n", __func__, err);
964 ret = err;
965 }
e2132e0b
SL
966 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
967 &env->CP0_ErrorEPC);
968 if (err < 0) {
969 DPRINTF("%s: Failed to get CP0_ERROREPC (%d)\n", __func__, err);
970 ret = err;
971 }
972
973 return ret;
974}
975
976int kvm_arch_put_registers(CPUState *cs, int level)
977{
978 MIPSCPU *cpu = MIPS_CPU(cs);
979 CPUMIPSState *env = &cpu->env;
980 struct kvm_regs regs;
981 int ret;
982 int i;
983
984 /* Set the registers based on QEMU's view of things */
985 for (i = 0; i < 32; i++) {
02dae26a 986 regs.gpr[i] = (int64_t)(target_long)env->active_tc.gpr[i];
e2132e0b
SL
987 }
988
02dae26a
JH
989 regs.hi = (int64_t)(target_long)env->active_tc.HI[0];
990 regs.lo = (int64_t)(target_long)env->active_tc.LO[0];
991 regs.pc = (int64_t)(target_long)env->active_tc.PC;
e2132e0b
SL
992
993 ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
994
995 if (ret < 0) {
996 return ret;
997 }
998
999 ret = kvm_mips_put_cp0_registers(cs, level);
1000 if (ret < 0) {
1001 return ret;
1002 }
1003
152db36a
JH
1004 ret = kvm_mips_put_fpu_registers(cs, level);
1005 if (ret < 0) {
1006 return ret;
1007 }
1008
e2132e0b
SL
1009 return ret;
1010}
1011
1012int kvm_arch_get_registers(CPUState *cs)
1013{
1014 MIPSCPU *cpu = MIPS_CPU(cs);
1015 CPUMIPSState *env = &cpu->env;
1016 int ret = 0;
1017 struct kvm_regs regs;
1018 int i;
1019
1020 /* Get the current register set as KVM seems it */
1021 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
1022
1023 if (ret < 0) {
1024 return ret;
1025 }
1026
1027 for (i = 0; i < 32; i++) {
1028 env->active_tc.gpr[i] = regs.gpr[i];
1029 }
1030
1031 env->active_tc.HI[0] = regs.hi;
1032 env->active_tc.LO[0] = regs.lo;
1033 env->active_tc.PC = regs.pc;
1034
1035 kvm_mips_get_cp0_registers(cs);
152db36a 1036 kvm_mips_get_fpu_registers(cs);
e2132e0b
SL
1037
1038 return ret;
1039}
9e03a040
FB
1040
1041int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
dc9f06ca 1042 uint64_t address, uint32_t data, PCIDevice *dev)
9e03a040
FB
1043{
1044 return 0;
1045}
1850b6b7
EA
1046
1047int kvm_arch_msi_data_to_gsi(uint32_t data)
1048{
1049 abort();
1050}