2 * PowerPC implementation of KVM hooks
4 * Copyright IBM Corp. 2007
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
8 * Jerone Young <jyoung5@us.ibm.com>
9 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10 * Hollis Blanchard <hollisb@us.ibm.com>
12 * This work is licensed under the terms of the GNU GPL, version 2 or later.
13 * See the COPYING file in the top-level directory.
18 #include <sys/types.h>
19 #include <sys/ioctl.h>
22 #include <linux/kvm.h>
24 #include "qemu-common.h"
25 #include "qemu-timer.h"
30 #include "device_tree.h"
33 #include "hw/sysbus.h"
35 #include "hw/spapr_vio.h"
40 #define dprintf(fmt, ...) \
41 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
43 #define dprintf(fmt, ...) \
47 #define PROC_DEVTREE_CPU "/proc/device-tree/cpus/"
49 const KVMCapabilityInfo kvm_arch_required_capabilities
[] = {
53 static int cap_interrupt_unset
= false;
54 static int cap_interrupt_level
= false;
55 static int cap_segstate
;
56 static int cap_booke_sregs
;
57 static int cap_ppc_smt
;
58 static int cap_ppc_rma
;
60 /* XXX We have a race condition where we actually have a level triggered
61 * interrupt, but the infrastructure can't expose that yet, so the guest
62 * takes but ignores it, goes to sleep and never gets notified that there's
63 * still an interrupt pending.
65 * As a quick workaround, let's just wake up again 20 ms after we injected
66 * an interrupt. That way we can assure that we're always reinjecting
67 * interrupts in case the guest swallowed them.
69 static QEMUTimer
*idle_timer
;
71 static void kvm_kick_env(void *env
)
76 int kvm_arch_init(KVMState
*s
)
78 cap_interrupt_unset
= kvm_check_extension(s
, KVM_CAP_PPC_UNSET_IRQ
);
79 cap_interrupt_level
= kvm_check_extension(s
, KVM_CAP_PPC_IRQ_LEVEL
);
80 cap_segstate
= kvm_check_extension(s
, KVM_CAP_PPC_SEGSTATE
);
81 cap_booke_sregs
= kvm_check_extension(s
, KVM_CAP_PPC_BOOKE_SREGS
);
82 cap_ppc_smt
= kvm_check_extension(s
, KVM_CAP_PPC_SMT
);
83 cap_ppc_rma
= kvm_check_extension(s
, KVM_CAP_PPC_RMA
);
85 if (!cap_interrupt_level
) {
86 fprintf(stderr
, "KVM: Couldn't find level irq capability. Expect the "
87 "VM to stall at times!\n");
93 static int kvm_arch_sync_sregs(CPUState
*cenv
)
95 struct kvm_sregs sregs
;
98 if (cenv
->excp_model
== POWERPC_EXCP_BOOKE
) {
99 /* What we're really trying to say is "if we're on BookE, we use
100 the native PVR for now". This is the only sane way to check
101 it though, so we potentially confuse users that they can run
102 BookE guests on BookS. Let's hope nobody dares enough :) */
106 fprintf(stderr
, "kvm error: missing PVR setting capability\n");
111 ret
= kvm_vcpu_ioctl(cenv
, KVM_GET_SREGS
, &sregs
);
116 sregs
.pvr
= cenv
->spr
[SPR_PVR
];
117 return kvm_vcpu_ioctl(cenv
, KVM_SET_SREGS
, &sregs
);
120 /* Set up a shared TLB array with KVM */
121 static int kvm_booke206_tlb_init(CPUState
*env
)
123 struct kvm_book3e_206_tlb_params params
= {};
124 struct kvm_config_tlb cfg
= {};
125 struct kvm_enable_cap encap
= {};
126 unsigned int entries
= 0;
129 if (!kvm_enabled() ||
130 !kvm_check_extension(env
->kvm_state
, KVM_CAP_SW_TLB
)) {
134 assert(ARRAY_SIZE(params
.tlb_sizes
) == BOOKE206_MAX_TLBN
);
136 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
137 params
.tlb_sizes
[i
] = booke206_tlb_size(env
, i
);
138 params
.tlb_ways
[i
] = booke206_tlb_ways(env
, i
);
139 entries
+= params
.tlb_sizes
[i
];
142 assert(entries
== env
->nb_tlb
);
143 assert(sizeof(struct kvm_book3e_206_tlb_entry
) == sizeof(ppcmas_tlb_t
));
145 env
->tlb_dirty
= true;
147 cfg
.array
= (uintptr_t)env
->tlb
.tlbm
;
148 cfg
.array_len
= sizeof(ppcmas_tlb_t
) * entries
;
149 cfg
.params
= (uintptr_t)¶ms
;
150 cfg
.mmu_type
= KVM_MMU_FSL_BOOKE_NOHV
;
152 encap
.cap
= KVM_CAP_SW_TLB
;
153 encap
.args
[0] = (uintptr_t)&cfg
;
155 ret
= kvm_vcpu_ioctl(env
, KVM_ENABLE_CAP
, &encap
);
157 fprintf(stderr
, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
158 __func__
, strerror(-ret
));
162 env
->kvm_sw_tlb
= true;
166 int kvm_arch_init_vcpu(CPUState
*cenv
)
170 ret
= kvm_arch_sync_sregs(cenv
);
175 idle_timer
= qemu_new_timer_ns(vm_clock
, kvm_kick_env
, cenv
);
177 /* Some targets support access to KVM's guest TLB. */
178 switch (cenv
->mmu_model
) {
179 case POWERPC_MMU_BOOKE206
:
180 ret
= kvm_booke206_tlb_init(cenv
);
189 void kvm_arch_reset_vcpu(CPUState
*env
)
193 static void kvm_sw_tlb_put(CPUState
*env
)
195 struct kvm_dirty_tlb dirty_tlb
;
196 unsigned char *bitmap
;
199 if (!env
->kvm_sw_tlb
) {
203 bitmap
= g_malloc((env
->nb_tlb
+ 7) / 8);
204 memset(bitmap
, 0xFF, (env
->nb_tlb
+ 7) / 8);
206 dirty_tlb
.bitmap
= (uintptr_t)bitmap
;
207 dirty_tlb
.num_dirty
= env
->nb_tlb
;
209 ret
= kvm_vcpu_ioctl(env
, KVM_DIRTY_TLB
, &dirty_tlb
);
211 fprintf(stderr
, "%s: KVM_DIRTY_TLB: %s\n",
212 __func__
, strerror(-ret
));
218 int kvm_arch_put_registers(CPUState
*env
, int level
)
220 struct kvm_regs regs
;
224 ret
= kvm_vcpu_ioctl(env
, KVM_GET_REGS
, ®s
);
234 regs
.srr0
= env
->spr
[SPR_SRR0
];
235 regs
.srr1
= env
->spr
[SPR_SRR1
];
237 regs
.sprg0
= env
->spr
[SPR_SPRG0
];
238 regs
.sprg1
= env
->spr
[SPR_SPRG1
];
239 regs
.sprg2
= env
->spr
[SPR_SPRG2
];
240 regs
.sprg3
= env
->spr
[SPR_SPRG3
];
241 regs
.sprg4
= env
->spr
[SPR_SPRG4
];
242 regs
.sprg5
= env
->spr
[SPR_SPRG5
];
243 regs
.sprg6
= env
->spr
[SPR_SPRG6
];
244 regs
.sprg7
= env
->spr
[SPR_SPRG7
];
246 regs
.pid
= env
->spr
[SPR_BOOKE_PID
];
248 for (i
= 0;i
< 32; i
++)
249 regs
.gpr
[i
] = env
->gpr
[i
];
251 ret
= kvm_vcpu_ioctl(env
, KVM_SET_REGS
, ®s
);
255 if (env
->tlb_dirty
) {
257 env
->tlb_dirty
= false;
263 int kvm_arch_get_registers(CPUState
*env
)
265 struct kvm_regs regs
;
266 struct kvm_sregs sregs
;
270 ret
= kvm_vcpu_ioctl(env
, KVM_GET_REGS
, ®s
);
275 for (i
= 7; i
>= 0; i
--) {
276 env
->crf
[i
] = cr
& 15;
286 env
->spr
[SPR_SRR0
] = regs
.srr0
;
287 env
->spr
[SPR_SRR1
] = regs
.srr1
;
289 env
->spr
[SPR_SPRG0
] = regs
.sprg0
;
290 env
->spr
[SPR_SPRG1
] = regs
.sprg1
;
291 env
->spr
[SPR_SPRG2
] = regs
.sprg2
;
292 env
->spr
[SPR_SPRG3
] = regs
.sprg3
;
293 env
->spr
[SPR_SPRG4
] = regs
.sprg4
;
294 env
->spr
[SPR_SPRG5
] = regs
.sprg5
;
295 env
->spr
[SPR_SPRG6
] = regs
.sprg6
;
296 env
->spr
[SPR_SPRG7
] = regs
.sprg7
;
298 env
->spr
[SPR_BOOKE_PID
] = regs
.pid
;
300 for (i
= 0;i
< 32; i
++)
301 env
->gpr
[i
] = regs
.gpr
[i
];
303 if (cap_booke_sregs
) {
304 ret
= kvm_vcpu_ioctl(env
, KVM_GET_SREGS
, &sregs
);
309 if (sregs
.u
.e
.features
& KVM_SREGS_E_BASE
) {
310 env
->spr
[SPR_BOOKE_CSRR0
] = sregs
.u
.e
.csrr0
;
311 env
->spr
[SPR_BOOKE_CSRR1
] = sregs
.u
.e
.csrr1
;
312 env
->spr
[SPR_BOOKE_ESR
] = sregs
.u
.e
.esr
;
313 env
->spr
[SPR_BOOKE_DEAR
] = sregs
.u
.e
.dear
;
314 env
->spr
[SPR_BOOKE_MCSR
] = sregs
.u
.e
.mcsr
;
315 env
->spr
[SPR_BOOKE_TSR
] = sregs
.u
.e
.tsr
;
316 env
->spr
[SPR_BOOKE_TCR
] = sregs
.u
.e
.tcr
;
317 env
->spr
[SPR_DECR
] = sregs
.u
.e
.dec
;
318 env
->spr
[SPR_TBL
] = sregs
.u
.e
.tb
& 0xffffffff;
319 env
->spr
[SPR_TBU
] = sregs
.u
.e
.tb
>> 32;
320 env
->spr
[SPR_VRSAVE
] = sregs
.u
.e
.vrsave
;
323 if (sregs
.u
.e
.features
& KVM_SREGS_E_ARCH206
) {
324 env
->spr
[SPR_BOOKE_PIR
] = sregs
.u
.e
.pir
;
325 env
->spr
[SPR_BOOKE_MCSRR0
] = sregs
.u
.e
.mcsrr0
;
326 env
->spr
[SPR_BOOKE_MCSRR1
] = sregs
.u
.e
.mcsrr1
;
327 env
->spr
[SPR_BOOKE_DECAR
] = sregs
.u
.e
.decar
;
328 env
->spr
[SPR_BOOKE_IVPR
] = sregs
.u
.e
.ivpr
;
331 if (sregs
.u
.e
.features
& KVM_SREGS_E_64
) {
332 env
->spr
[SPR_BOOKE_EPCR
] = sregs
.u
.e
.epcr
;
335 if (sregs
.u
.e
.features
& KVM_SREGS_E_SPRG8
) {
336 env
->spr
[SPR_BOOKE_SPRG8
] = sregs
.u
.e
.sprg8
;
339 if (sregs
.u
.e
.features
& KVM_SREGS_E_IVOR
) {
340 env
->spr
[SPR_BOOKE_IVOR0
] = sregs
.u
.e
.ivor_low
[0];
341 env
->spr
[SPR_BOOKE_IVOR1
] = sregs
.u
.e
.ivor_low
[1];
342 env
->spr
[SPR_BOOKE_IVOR2
] = sregs
.u
.e
.ivor_low
[2];
343 env
->spr
[SPR_BOOKE_IVOR3
] = sregs
.u
.e
.ivor_low
[3];
344 env
->spr
[SPR_BOOKE_IVOR4
] = sregs
.u
.e
.ivor_low
[4];
345 env
->spr
[SPR_BOOKE_IVOR5
] = sregs
.u
.e
.ivor_low
[5];
346 env
->spr
[SPR_BOOKE_IVOR6
] = sregs
.u
.e
.ivor_low
[6];
347 env
->spr
[SPR_BOOKE_IVOR7
] = sregs
.u
.e
.ivor_low
[7];
348 env
->spr
[SPR_BOOKE_IVOR8
] = sregs
.u
.e
.ivor_low
[8];
349 env
->spr
[SPR_BOOKE_IVOR9
] = sregs
.u
.e
.ivor_low
[9];
350 env
->spr
[SPR_BOOKE_IVOR10
] = sregs
.u
.e
.ivor_low
[10];
351 env
->spr
[SPR_BOOKE_IVOR11
] = sregs
.u
.e
.ivor_low
[11];
352 env
->spr
[SPR_BOOKE_IVOR12
] = sregs
.u
.e
.ivor_low
[12];
353 env
->spr
[SPR_BOOKE_IVOR13
] = sregs
.u
.e
.ivor_low
[13];
354 env
->spr
[SPR_BOOKE_IVOR14
] = sregs
.u
.e
.ivor_low
[14];
355 env
->spr
[SPR_BOOKE_IVOR15
] = sregs
.u
.e
.ivor_low
[15];
357 if (sregs
.u
.e
.features
& KVM_SREGS_E_SPE
) {
358 env
->spr
[SPR_BOOKE_IVOR32
] = sregs
.u
.e
.ivor_high
[0];
359 env
->spr
[SPR_BOOKE_IVOR33
] = sregs
.u
.e
.ivor_high
[1];
360 env
->spr
[SPR_BOOKE_IVOR34
] = sregs
.u
.e
.ivor_high
[2];
363 if (sregs
.u
.e
.features
& KVM_SREGS_E_PM
) {
364 env
->spr
[SPR_BOOKE_IVOR35
] = sregs
.u
.e
.ivor_high
[3];
367 if (sregs
.u
.e
.features
& KVM_SREGS_E_PC
) {
368 env
->spr
[SPR_BOOKE_IVOR36
] = sregs
.u
.e
.ivor_high
[4];
369 env
->spr
[SPR_BOOKE_IVOR37
] = sregs
.u
.e
.ivor_high
[5];
373 if (sregs
.u
.e
.features
& KVM_SREGS_E_ARCH206_MMU
) {
374 env
->spr
[SPR_BOOKE_MAS0
] = sregs
.u
.e
.mas0
;
375 env
->spr
[SPR_BOOKE_MAS1
] = sregs
.u
.e
.mas1
;
376 env
->spr
[SPR_BOOKE_MAS2
] = sregs
.u
.e
.mas2
;
377 env
->spr
[SPR_BOOKE_MAS3
] = sregs
.u
.e
.mas7_3
& 0xffffffff;
378 env
->spr
[SPR_BOOKE_MAS4
] = sregs
.u
.e
.mas4
;
379 env
->spr
[SPR_BOOKE_MAS6
] = sregs
.u
.e
.mas6
;
380 env
->spr
[SPR_BOOKE_MAS7
] = sregs
.u
.e
.mas7_3
>> 32;
381 env
->spr
[SPR_MMUCFG
] = sregs
.u
.e
.mmucfg
;
382 env
->spr
[SPR_BOOKE_TLB0CFG
] = sregs
.u
.e
.tlbcfg
[0];
383 env
->spr
[SPR_BOOKE_TLB1CFG
] = sregs
.u
.e
.tlbcfg
[1];
386 if (sregs
.u
.e
.features
& KVM_SREGS_EXP
) {
387 env
->spr
[SPR_BOOKE_EPR
] = sregs
.u
.e
.epr
;
390 if (sregs
.u
.e
.features
& KVM_SREGS_E_PD
) {
391 env
->spr
[SPR_BOOKE_EPLC
] = sregs
.u
.e
.eplc
;
392 env
->spr
[SPR_BOOKE_EPSC
] = sregs
.u
.e
.epsc
;
395 if (sregs
.u
.e
.impl_id
== KVM_SREGS_E_IMPL_FSL
) {
396 env
->spr
[SPR_E500_SVR
] = sregs
.u
.e
.impl
.fsl
.svr
;
397 env
->spr
[SPR_Exxx_MCAR
] = sregs
.u
.e
.impl
.fsl
.mcar
;
398 env
->spr
[SPR_HID0
] = sregs
.u
.e
.impl
.fsl
.hid0
;
400 if (sregs
.u
.e
.impl
.fsl
.features
& KVM_SREGS_E_FSL_PIDn
) {
401 env
->spr
[SPR_BOOKE_PID1
] = sregs
.u
.e
.impl
.fsl
.pid1
;
402 env
->spr
[SPR_BOOKE_PID2
] = sregs
.u
.e
.impl
.fsl
.pid2
;
408 ret
= kvm_vcpu_ioctl(env
, KVM_GET_SREGS
, &sregs
);
413 ppc_store_sdr1(env
, sregs
.u
.s
.sdr1
);
417 for (i
= 0; i
< 64; i
++) {
418 ppc_store_slb(env
, sregs
.u
.s
.ppc64
.slb
[i
].slbe
,
419 sregs
.u
.s
.ppc64
.slb
[i
].slbv
);
424 for (i
= 0; i
< 16; i
++) {
425 env
->sr
[i
] = sregs
.u
.s
.ppc32
.sr
[i
];
429 for (i
= 0; i
< 8; i
++) {
430 env
->DBAT
[0][i
] = sregs
.u
.s
.ppc32
.dbat
[i
] & 0xffffffff;
431 env
->DBAT
[1][i
] = sregs
.u
.s
.ppc32
.dbat
[i
] >> 32;
432 env
->IBAT
[0][i
] = sregs
.u
.s
.ppc32
.ibat
[i
] & 0xffffffff;
433 env
->IBAT
[1][i
] = sregs
.u
.s
.ppc32
.ibat
[i
] >> 32;
440 int kvmppc_set_interrupt(CPUState
*env
, int irq
, int level
)
442 unsigned virq
= level
? KVM_INTERRUPT_SET_LEVEL
: KVM_INTERRUPT_UNSET
;
444 if (irq
!= PPC_INTERRUPT_EXT
) {
448 if (!kvm_enabled() || !cap_interrupt_unset
|| !cap_interrupt_level
) {
452 kvm_vcpu_ioctl(env
, KVM_INTERRUPT
, &virq
);
457 #if defined(TARGET_PPCEMB)
458 #define PPC_INPUT_INT PPC40x_INPUT_INT
459 #elif defined(TARGET_PPC64)
460 #define PPC_INPUT_INT PPC970_INPUT_INT
462 #define PPC_INPUT_INT PPC6xx_INPUT_INT
465 void kvm_arch_pre_run(CPUState
*env
, struct kvm_run
*run
)
470 /* PowerPC Qemu tracks the various core input pins (interrupt, critical
471 * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
472 if (!cap_interrupt_level
&&
473 run
->ready_for_interrupt_injection
&&
474 (env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
475 (env
->irq_input_state
& (1<<PPC_INPUT_INT
)))
477 /* For now KVM disregards the 'irq' argument. However, in the
478 * future KVM could cache it in-kernel to avoid a heavyweight exit
479 * when reading the UIC.
481 irq
= KVM_INTERRUPT_SET
;
483 dprintf("injected interrupt %d\n", irq
);
484 r
= kvm_vcpu_ioctl(env
, KVM_INTERRUPT
, &irq
);
486 printf("cpu %d fail inject %x\n", env
->cpu_index
, irq
);
488 /* Always wake up soon in case the interrupt was level based */
489 qemu_mod_timer(idle_timer
, qemu_get_clock_ns(vm_clock
) +
490 (get_ticks_per_sec() / 50));
493 /* We don't know if there are more interrupts pending after this. However,
494 * the guest will return to userspace in the course of handling this one
495 * anyways, so we will get a chance to deliver the rest. */
498 void kvm_arch_post_run(CPUState
*env
, struct kvm_run
*run
)
502 int kvm_arch_process_async_events(CPUState
*env
)
507 static int kvmppc_handle_halt(CPUState
*env
)
509 if (!(env
->interrupt_request
& CPU_INTERRUPT_HARD
) && (msr_ee
)) {
511 env
->exception_index
= EXCP_HLT
;
517 /* map dcr access to existing qemu dcr emulation */
518 static int kvmppc_handle_dcr_read(CPUState
*env
, uint32_t dcrn
, uint32_t *data
)
520 if (ppc_dcr_read(env
->dcr_env
, dcrn
, data
) < 0)
521 fprintf(stderr
, "Read to unhandled DCR (0x%x)\n", dcrn
);
526 static int kvmppc_handle_dcr_write(CPUState
*env
, uint32_t dcrn
, uint32_t data
)
528 if (ppc_dcr_write(env
->dcr_env
, dcrn
, data
) < 0)
529 fprintf(stderr
, "Write to unhandled DCR (0x%x)\n", dcrn
);
534 int kvm_arch_handle_exit(CPUState
*env
, struct kvm_run
*run
)
538 switch (run
->exit_reason
) {
540 if (run
->dcr
.is_write
) {
541 dprintf("handle dcr write\n");
542 ret
= kvmppc_handle_dcr_write(env
, run
->dcr
.dcrn
, run
->dcr
.data
);
544 dprintf("handle dcr read\n");
545 ret
= kvmppc_handle_dcr_read(env
, run
->dcr
.dcrn
, &run
->dcr
.data
);
549 dprintf("handle halt\n");
550 ret
= kvmppc_handle_halt(env
);
552 #ifdef CONFIG_PSERIES
553 case KVM_EXIT_PAPR_HCALL
:
554 dprintf("handle PAPR hypercall\n");
555 run
->papr_hcall
.ret
= spapr_hypercall(env
, run
->papr_hcall
.nr
,
556 run
->papr_hcall
.args
);
561 fprintf(stderr
, "KVM: unknown exit reason %d\n", run
->exit_reason
);
569 static int read_cpuinfo(const char *field
, char *value
, int len
)
573 int field_len
= strlen(field
);
576 f
= fopen("/proc/cpuinfo", "r");
582 if(!fgets(line
, sizeof(line
), f
)) {
585 if (!strncmp(line
, field
, field_len
)) {
586 strncpy(value
, line
, len
);
597 uint32_t kvmppc_get_tbfreq(void)
601 uint32_t retval
= get_ticks_per_sec();
603 if (read_cpuinfo("timebase", line
, sizeof(line
))) {
607 if (!(ns
= strchr(line
, ':'))) {
617 /* Try to find a device tree node for a CPU with clock-frequency property */
618 static int kvmppc_find_cpu_dt(char *buf
, int buf_len
)
623 if ((dp
= opendir(PROC_DEVTREE_CPU
)) == NULL
) {
624 printf("Can't open directory " PROC_DEVTREE_CPU
"\n");
629 while ((dirp
= readdir(dp
)) != NULL
) {
631 snprintf(buf
, buf_len
, "%s%s/clock-frequency", PROC_DEVTREE_CPU
,
635 snprintf(buf
, buf_len
, "%s%s", PROC_DEVTREE_CPU
, dirp
->d_name
);
642 if (buf
[0] == '\0') {
643 printf("Unknown host!\n");
650 uint64_t kvmppc_get_clockfreq(void)
657 if (kvmppc_find_cpu_dt(buf
, sizeof(buf
))) {
661 strncat(buf
, "/clock-frequency", sizeof(buf
) - strlen(buf
));
663 f
= fopen(buf
, "rb");
668 len
= fread(tb
, sizeof(tb
[0]), 2, f
);
672 /* freq is only a single cell */
675 return *(uint64_t*)tb
;
681 int kvmppc_get_hypercall(CPUState
*env
, uint8_t *buf
, int buf_len
)
683 uint32_t *hc
= (uint32_t*)buf
;
685 struct kvm_ppc_pvinfo pvinfo
;
687 if (kvm_check_extension(env
->kvm_state
, KVM_CAP_PPC_GET_PVINFO
) &&
688 !kvm_vm_ioctl(env
->kvm_state
, KVM_PPC_GET_PVINFO
, &pvinfo
)) {
689 memcpy(buf
, pvinfo
.hcall
, buf_len
);
695 * Fallback to always fail hypercalls:
711 void kvmppc_set_papr(CPUState
*env
)
713 struct kvm_enable_cap cap
= {};
714 struct kvm_one_reg reg
= {};
715 struct kvm_sregs sregs
= {};
718 cap
.cap
= KVM_CAP_PPC_PAPR
;
719 ret
= kvm_vcpu_ioctl(env
, KVM_ENABLE_CAP
, &cap
);
726 * XXX We set HIOR here. It really should be a qdev property of
727 * the CPU node, but we don't have CPUs converted to qdev yet.
729 * Once we have qdev CPUs, move HIOR to a qdev property and
732 reg
.id
= KVM_ONE_REG_PPC_HIOR
;
733 reg
.u
.reg64
= env
->spr
[SPR_HIOR
];
734 ret
= kvm_vcpu_ioctl(env
, KVM_SET_ONE_REG
, ®
);
739 /* Set SDR1 so kernel space finds the HTAB */
740 ret
= kvm_vcpu_ioctl(env
, KVM_GET_SREGS
, &sregs
);
745 sregs
.u
.s
.sdr1
= env
->spr
[SPR_SDR1
];
747 ret
= kvm_vcpu_ioctl(env
, KVM_SET_SREGS
, &sregs
);
755 cpu_abort(env
, "This KVM version does not support PAPR\n");
758 int kvmppc_smt_threads(void)
760 return cap_ppc_smt
? cap_ppc_smt
: 1;
763 off_t
kvmppc_alloc_rma(const char *name
, MemoryRegion
*sysmem
)
768 struct kvm_allocate_rma ret
;
769 MemoryRegion
*rma_region
;
771 /* If cap_ppc_rma == 0, contiguous RMA allocation is not supported
772 * if cap_ppc_rma == 1, contiguous RMA allocation is supported, but
773 * not necessary on this hardware
774 * if cap_ppc_rma == 2, contiguous RMA allocation is needed on this hardware
776 * FIXME: We should allow the user to force contiguous RMA
777 * allocation in the cap_ppc_rma==1 case.
779 if (cap_ppc_rma
< 2) {
783 fd
= kvm_vm_ioctl(kvm_state
, KVM_ALLOCATE_RMA
, &ret
);
785 fprintf(stderr
, "KVM: Error on KVM_ALLOCATE_RMA: %s\n",
790 size
= MIN(ret
.rma_size
, 256ul << 20);
792 rma
= mmap(NULL
, size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
, fd
, 0);
793 if (rma
== MAP_FAILED
) {
794 fprintf(stderr
, "KVM: Error mapping RMA: %s\n", strerror(errno
));
798 rma_region
= g_new(MemoryRegion
, 1);
799 memory_region_init_ram_ptr(rma_region
, NULL
, name
, size
, rma
);
800 memory_region_add_subregion(sysmem
, 0, rma_region
);
805 bool kvm_arch_stop_on_emulation_error(CPUState
*env
)
810 int kvm_arch_on_sigbus_vcpu(CPUState
*env
, int code
, void *addr
)
815 int kvm_arch_on_sigbus(int code
, void *addr
)