]> git.proxmox.com Git - qemu.git/blob - target-ppc/kvm.c
monitor: add PPC BookE SPRs
[qemu.git] / target-ppc / kvm.c
1 /*
2 * PowerPC implementation of KVM hooks
3 *
4 * Copyright IBM Corp. 2007
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
6 *
7 * Authors:
8 * Jerone Young <jyoung5@us.ibm.com>
9 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10 * Hollis Blanchard <hollisb@us.ibm.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2 or later.
13 * See the COPYING file in the top-level directory.
14 *
15 */
16
17 #include <sys/types.h>
18 #include <sys/ioctl.h>
19 #include <sys/mman.h>
20
21 #include <linux/kvm.h>
22
23 #include "qemu-common.h"
24 #include "qemu-timer.h"
25 #include "sysemu.h"
26 #include "kvm.h"
27 #include "kvm_ppc.h"
28 #include "cpu.h"
29 #include "device_tree.h"
30
31 //#define DEBUG_KVM
32
33 #ifdef DEBUG_KVM
34 #define dprintf(fmt, ...) \
35 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
36 #else
37 #define dprintf(fmt, ...) \
38 do { } while (0)
39 #endif
40
41 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
42 KVM_CAP_LAST_INFO
43 };
44
45 static int cap_interrupt_unset = false;
46 static int cap_interrupt_level = false;
47 static int cap_segstate;
48 #ifdef KVM_CAP_PPC_BOOKE_SREGS
49 static int cap_booke_sregs;
50 #endif
51
52 /* XXX We have a race condition where we actually have a level triggered
53 * interrupt, but the infrastructure can't expose that yet, so the guest
54 * takes but ignores it, goes to sleep and never gets notified that there's
55 * still an interrupt pending.
56 *
57 * As a quick workaround, let's just wake up again 20 ms after we injected
58 * an interrupt. That way we can assure that we're always reinjecting
59 * interrupts in case the guest swallowed them.
60 */
61 static QEMUTimer *idle_timer;
62
63 static void kvm_kick_env(void *env)
64 {
65 qemu_cpu_kick(env);
66 }
67
68 int kvm_arch_init(KVMState *s)
69 {
70 #ifdef KVM_CAP_PPC_UNSET_IRQ
71 cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
72 #endif
73 #ifdef KVM_CAP_PPC_IRQ_LEVEL
74 cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
75 #endif
76 #ifdef KVM_CAP_PPC_SEGSTATE
77 cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
78 #endif
79 #ifdef KVM_CAP_PPC_BOOKE_SREGS
80 cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
81 #endif
82
83 if (!cap_interrupt_level) {
84 fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
85 "VM to stall at times!\n");
86 }
87
88 return 0;
89 }
90
91 static int kvm_arch_sync_sregs(CPUState *cenv)
92 {
93 struct kvm_sregs sregs;
94 int ret;
95
96 if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
97 return 0;
98 } else {
99 if (!cap_segstate) {
100 return 0;
101 }
102 }
103
104 ret = kvm_vcpu_ioctl(cenv, KVM_GET_SREGS, &sregs);
105 if (ret) {
106 return ret;
107 }
108
109 sregs.pvr = cenv->spr[SPR_PVR];
110 return kvm_vcpu_ioctl(cenv, KVM_SET_SREGS, &sregs);
111 }
112
113 int kvm_arch_init_vcpu(CPUState *cenv)
114 {
115 int ret;
116
117 ret = kvm_arch_sync_sregs(cenv);
118 if (ret) {
119 return ret;
120 }
121
122 idle_timer = qemu_new_timer_ns(vm_clock, kvm_kick_env, cenv);
123
124 return ret;
125 }
126
127 void kvm_arch_reset_vcpu(CPUState *env)
128 {
129 }
130
131 int kvm_arch_put_registers(CPUState *env, int level)
132 {
133 struct kvm_regs regs;
134 int ret;
135 int i;
136
137 ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
138 if (ret < 0)
139 return ret;
140
141 regs.ctr = env->ctr;
142 regs.lr = env->lr;
143 regs.xer = env->xer;
144 regs.msr = env->msr;
145 regs.pc = env->nip;
146
147 regs.srr0 = env->spr[SPR_SRR0];
148 regs.srr1 = env->spr[SPR_SRR1];
149
150 regs.sprg0 = env->spr[SPR_SPRG0];
151 regs.sprg1 = env->spr[SPR_SPRG1];
152 regs.sprg2 = env->spr[SPR_SPRG2];
153 regs.sprg3 = env->spr[SPR_SPRG3];
154 regs.sprg4 = env->spr[SPR_SPRG4];
155 regs.sprg5 = env->spr[SPR_SPRG5];
156 regs.sprg6 = env->spr[SPR_SPRG6];
157 regs.sprg7 = env->spr[SPR_SPRG7];
158
159 regs.pid = env->spr[SPR_BOOKE_PID];
160
161 for (i = 0;i < 32; i++)
162 regs.gpr[i] = env->gpr[i];
163
164 ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, &regs);
165 if (ret < 0)
166 return ret;
167
168 return ret;
169 }
170
171 int kvm_arch_get_registers(CPUState *env)
172 {
173 struct kvm_regs regs;
174 struct kvm_sregs sregs;
175 uint32_t cr;
176 int i, ret;
177
178 ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
179 if (ret < 0)
180 return ret;
181
182 cr = regs.cr;
183 for (i = 7; i >= 0; i--) {
184 env->crf[i] = cr & 15;
185 cr >>= 4;
186 }
187
188 env->ctr = regs.ctr;
189 env->lr = regs.lr;
190 env->xer = regs.xer;
191 env->msr = regs.msr;
192 env->nip = regs.pc;
193
194 env->spr[SPR_SRR0] = regs.srr0;
195 env->spr[SPR_SRR1] = regs.srr1;
196
197 env->spr[SPR_SPRG0] = regs.sprg0;
198 env->spr[SPR_SPRG1] = regs.sprg1;
199 env->spr[SPR_SPRG2] = regs.sprg2;
200 env->spr[SPR_SPRG3] = regs.sprg3;
201 env->spr[SPR_SPRG4] = regs.sprg4;
202 env->spr[SPR_SPRG5] = regs.sprg5;
203 env->spr[SPR_SPRG6] = regs.sprg6;
204 env->spr[SPR_SPRG7] = regs.sprg7;
205
206 env->spr[SPR_BOOKE_PID] = regs.pid;
207
208 for (i = 0;i < 32; i++)
209 env->gpr[i] = regs.gpr[i];
210
211 #ifdef KVM_CAP_PPC_BOOKE_SREGS
212 if (cap_booke_sregs) {
213 ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
214 if (ret < 0) {
215 return ret;
216 }
217
218 if (sregs.u.e.features & KVM_SREGS_E_BASE) {
219 env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
220 env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
221 env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
222 env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
223 env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
224 env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
225 env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
226 env->spr[SPR_DECR] = sregs.u.e.dec;
227 env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
228 env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
229 env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
230 }
231
232 if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
233 env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
234 env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
235 env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
236 env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
237 env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
238 }
239
240 if (sregs.u.e.features & KVM_SREGS_E_64) {
241 env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
242 }
243
244 if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
245 env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
246 }
247
248 if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
249 env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
250 env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
251 env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
252 env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
253 env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
254 env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
255 env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
256 env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
257 env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
258 env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
259 env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
260 env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
261 env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
262 env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
263 env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
264 env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
265
266 if (sregs.u.e.features & KVM_SREGS_E_SPE) {
267 env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
268 env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
269 env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
270 }
271
272 if (sregs.u.e.features & KVM_SREGS_E_PM) {
273 env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
274 }
275
276 if (sregs.u.e.features & KVM_SREGS_E_PC) {
277 env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
278 env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
279 }
280 }
281
282 if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
283 env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
284 env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
285 env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
286 env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
287 env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
288 env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
289 env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
290 env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
291 env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
292 env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
293 }
294
295 if (sregs.u.e.features & KVM_SREGS_EXP) {
296 env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
297 }
298
299 if (sregs.u.e.features & KVM_SREGS_E_PD) {
300 env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
301 env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
302 }
303
304 if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
305 env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
306 env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
307 env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
308
309 if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
310 env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
311 env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
312 }
313 }
314 }
315 #endif
316
317 #ifdef KVM_CAP_PPC_SEGSTATE
318 if (cap_segstate) {
319 ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
320 if (ret < 0) {
321 return ret;
322 }
323
324 ppc_store_sdr1(env, sregs.u.s.sdr1);
325
326 /* Sync SLB */
327 #ifdef TARGET_PPC64
328 for (i = 0; i < 64; i++) {
329 ppc_store_slb(env, sregs.u.s.ppc64.slb[i].slbe,
330 sregs.u.s.ppc64.slb[i].slbv);
331 }
332 #endif
333
334 /* Sync SRs */
335 for (i = 0; i < 16; i++) {
336 env->sr[i] = sregs.u.s.ppc32.sr[i];
337 }
338
339 /* Sync BATs */
340 for (i = 0; i < 8; i++) {
341 env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
342 env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
343 env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
344 env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
345 }
346 }
347 #endif
348
349 return 0;
350 }
351
352 int kvmppc_set_interrupt(CPUState *env, int irq, int level)
353 {
354 unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
355
356 if (irq != PPC_INTERRUPT_EXT) {
357 return 0;
358 }
359
360 if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) {
361 return 0;
362 }
363
364 kvm_vcpu_ioctl(env, KVM_INTERRUPT, &virq);
365
366 return 0;
367 }
368
369 #if defined(TARGET_PPCEMB)
370 #define PPC_INPUT_INT PPC40x_INPUT_INT
371 #elif defined(TARGET_PPC64)
372 #define PPC_INPUT_INT PPC970_INPUT_INT
373 #else
374 #define PPC_INPUT_INT PPC6xx_INPUT_INT
375 #endif
376
377 void kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
378 {
379 int r;
380 unsigned irq;
381
382 /* PowerPC Qemu tracks the various core input pins (interrupt, critical
383 * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
384 if (!cap_interrupt_level &&
385 run->ready_for_interrupt_injection &&
386 (env->interrupt_request & CPU_INTERRUPT_HARD) &&
387 (env->irq_input_state & (1<<PPC_INPUT_INT)))
388 {
389 /* For now KVM disregards the 'irq' argument. However, in the
390 * future KVM could cache it in-kernel to avoid a heavyweight exit
391 * when reading the UIC.
392 */
393 irq = KVM_INTERRUPT_SET;
394
395 dprintf("injected interrupt %d\n", irq);
396 r = kvm_vcpu_ioctl(env, KVM_INTERRUPT, &irq);
397 if (r < 0)
398 printf("cpu %d fail inject %x\n", env->cpu_index, irq);
399
400 /* Always wake up soon in case the interrupt was level based */
401 qemu_mod_timer(idle_timer, qemu_get_clock_ns(vm_clock) +
402 (get_ticks_per_sec() / 50));
403 }
404
405 /* We don't know if there are more interrupts pending after this. However,
406 * the guest will return to userspace in the course of handling this one
407 * anyways, so we will get a chance to deliver the rest. */
408 }
409
410 void kvm_arch_post_run(CPUState *env, struct kvm_run *run)
411 {
412 }
413
414 int kvm_arch_process_async_events(CPUState *env)
415 {
416 return 0;
417 }
418
419 static int kvmppc_handle_halt(CPUState *env)
420 {
421 if (!(env->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
422 env->halted = 1;
423 env->exception_index = EXCP_HLT;
424 }
425
426 return 0;
427 }
428
429 /* map dcr access to existing qemu dcr emulation */
430 static int kvmppc_handle_dcr_read(CPUState *env, uint32_t dcrn, uint32_t *data)
431 {
432 if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
433 fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
434
435 return 0;
436 }
437
438 static int kvmppc_handle_dcr_write(CPUState *env, uint32_t dcrn, uint32_t data)
439 {
440 if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
441 fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
442
443 return 0;
444 }
445
446 int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
447 {
448 int ret;
449
450 switch (run->exit_reason) {
451 case KVM_EXIT_DCR:
452 if (run->dcr.is_write) {
453 dprintf("handle dcr write\n");
454 ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
455 } else {
456 dprintf("handle dcr read\n");
457 ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
458 }
459 break;
460 case KVM_EXIT_HLT:
461 dprintf("handle halt\n");
462 ret = kvmppc_handle_halt(env);
463 break;
464 default:
465 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
466 ret = -1;
467 break;
468 }
469
470 return ret;
471 }
472
473 static int read_cpuinfo(const char *field, char *value, int len)
474 {
475 FILE *f;
476 int ret = -1;
477 int field_len = strlen(field);
478 char line[512];
479
480 f = fopen("/proc/cpuinfo", "r");
481 if (!f) {
482 return -1;
483 }
484
485 do {
486 if(!fgets(line, sizeof(line), f)) {
487 break;
488 }
489 if (!strncmp(line, field, field_len)) {
490 strncpy(value, line, len);
491 ret = 0;
492 break;
493 }
494 } while(*line);
495
496 fclose(f);
497
498 return ret;
499 }
500
501 uint32_t kvmppc_get_tbfreq(void)
502 {
503 char line[512];
504 char *ns;
505 uint32_t retval = get_ticks_per_sec();
506
507 if (read_cpuinfo("timebase", line, sizeof(line))) {
508 return retval;
509 }
510
511 if (!(ns = strchr(line, ':'))) {
512 return retval;
513 }
514
515 ns++;
516
517 retval = atoi(ns);
518 return retval;
519 }
520
521 int kvmppc_get_hypercall(CPUState *env, uint8_t *buf, int buf_len)
522 {
523 uint32_t *hc = (uint32_t*)buf;
524
525 #ifdef KVM_CAP_PPC_GET_PVINFO
526 struct kvm_ppc_pvinfo pvinfo;
527
528 if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
529 !kvm_vm_ioctl(env->kvm_state, KVM_PPC_GET_PVINFO, &pvinfo)) {
530 memcpy(buf, pvinfo.hcall, buf_len);
531
532 return 0;
533 }
534 #endif
535
536 /*
537 * Fallback to always fail hypercalls:
538 *
539 * li r3, -1
540 * nop
541 * nop
542 * nop
543 */
544
545 hc[0] = 0x3860ffff;
546 hc[1] = 0x60000000;
547 hc[2] = 0x60000000;
548 hc[3] = 0x60000000;
549
550 return 0;
551 }
552
553 bool kvm_arch_stop_on_emulation_error(CPUState *env)
554 {
555 return true;
556 }
557
558 int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr)
559 {
560 return 1;
561 }
562
563 int kvm_arch_on_sigbus(int code, void *addr)
564 {
565 return 1;
566 }