2 * ARM implementation of KVM hooks
4 * Copyright Christoffer Dall 2009-2010
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
12 #include <sys/types.h>
13 #include <sys/ioctl.h>
16 #include <linux/kvm.h>
18 #include "qemu-common.h"
19 #include "qemu/timer.h"
20 #include "sysemu/sysemu.h"
21 #include "sysemu/kvm.h"
23 #include "hw/arm-misc.h"
25 const KVMCapabilityInfo kvm_arch_required_capabilities
[] = {
29 int kvm_arch_init(KVMState
*s
)
31 /* For ARM interrupt delivery is always asynchronous,
32 * whether we are using an in-kernel VGIC or not.
34 kvm_async_interrupts_allowed
= true;
38 unsigned long kvm_arch_vcpu_id(CPUState
*cpu
)
40 return cpu
->cpu_index
;
43 int kvm_arch_init_vcpu(CPUState
*cs
)
45 struct kvm_vcpu_init init
;
50 init
.target
= KVM_ARM_TARGET_CORTEX_A15
;
51 memset(init
.features
, 0, sizeof(init
.features
));
52 ret
= kvm_vcpu_ioctl(cs
, KVM_ARM_VCPU_INIT
, &init
);
56 /* Query the kernel to make sure it supports 32 VFP
57 * registers: QEMU's "cortex-a15" CPU is always a
58 * VFP-D32 core. The simplest way to do this is just
59 * to attempt to read register d31.
61 r
.id
= KVM_REG_ARM
| KVM_REG_SIZE_U64
| KVM_REG_ARM_VFP
| 31;
62 r
.addr
= (uintptr_t)(&v
);
63 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, &r
);
75 #define COREREG(KERNELNAME, QEMUFIELD) \
77 KVM_REG_ARM | KVM_REG_SIZE_U32 | \
78 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
79 offsetof(CPUARMState, QEMUFIELD) \
82 #define CP15REG(CRN, CRM, OPC1, OPC2, QEMUFIELD) \
84 KVM_REG_ARM | KVM_REG_SIZE_U32 | \
85 (15 << KVM_REG_ARM_COPROC_SHIFT) | \
86 ((CRN) << KVM_REG_ARM_32_CRN_SHIFT) | \
87 ((CRM) << KVM_REG_ARM_CRM_SHIFT) | \
88 ((OPC1) << KVM_REG_ARM_OPC1_SHIFT) | \
89 ((OPC2) << KVM_REG_ARM_32_OPC2_SHIFT), \
90 offsetof(CPUARMState, QEMUFIELD) \
93 #define VFPSYSREG(R) \
95 KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | \
96 KVM_REG_ARM_VFP_##R, \
97 offsetof(CPUARMState, vfp.xregs[ARM_VFP_##R]) \
100 static const Reg regs
[] = {
101 /* R0_usr .. R14_usr */
102 COREREG(usr_regs
.uregs
[0], regs
[0]),
103 COREREG(usr_regs
.uregs
[1], regs
[1]),
104 COREREG(usr_regs
.uregs
[2], regs
[2]),
105 COREREG(usr_regs
.uregs
[3], regs
[3]),
106 COREREG(usr_regs
.uregs
[4], regs
[4]),
107 COREREG(usr_regs
.uregs
[5], regs
[5]),
108 COREREG(usr_regs
.uregs
[6], regs
[6]),
109 COREREG(usr_regs
.uregs
[7], regs
[7]),
110 COREREG(usr_regs
.uregs
[8], usr_regs
[0]),
111 COREREG(usr_regs
.uregs
[9], usr_regs
[1]),
112 COREREG(usr_regs
.uregs
[10], usr_regs
[2]),
113 COREREG(usr_regs
.uregs
[11], usr_regs
[3]),
114 COREREG(usr_regs
.uregs
[12], usr_regs
[4]),
115 COREREG(usr_regs
.uregs
[13], banked_r13
[0]),
116 COREREG(usr_regs
.uregs
[14], banked_r14
[0]),
117 /* R13, R14, SPSR for SVC, ABT, UND, IRQ banks */
118 COREREG(svc_regs
[0], banked_r13
[1]),
119 COREREG(svc_regs
[1], banked_r14
[1]),
120 COREREG(svc_regs
[2], banked_spsr
[1]),
121 COREREG(abt_regs
[0], banked_r13
[2]),
122 COREREG(abt_regs
[1], banked_r14
[2]),
123 COREREG(abt_regs
[2], banked_spsr
[2]),
124 COREREG(und_regs
[0], banked_r13
[3]),
125 COREREG(und_regs
[1], banked_r14
[3]),
126 COREREG(und_regs
[2], banked_spsr
[3]),
127 COREREG(irq_regs
[0], banked_r13
[4]),
128 COREREG(irq_regs
[1], banked_r14
[4]),
129 COREREG(irq_regs
[2], banked_spsr
[4]),
130 /* R8_fiq .. R14_fiq and SPSR_fiq */
131 COREREG(fiq_regs
[0], fiq_regs
[0]),
132 COREREG(fiq_regs
[1], fiq_regs
[1]),
133 COREREG(fiq_regs
[2], fiq_regs
[2]),
134 COREREG(fiq_regs
[3], fiq_regs
[3]),
135 COREREG(fiq_regs
[4], fiq_regs
[4]),
136 COREREG(fiq_regs
[5], banked_r13
[5]),
137 COREREG(fiq_regs
[6], banked_r14
[5]),
138 COREREG(fiq_regs
[7], banked_spsr
[5]),
140 COREREG(usr_regs
.uregs
[15], regs
[15]),
141 /* A non-comprehensive set of cp15 registers.
142 * TODO: drive this from the cp_regs hashtable instead.
144 CP15REG(1, 0, 0, 0, cp15
.c1_sys
), /* SCTLR */
145 CP15REG(2, 0, 0, 2, cp15
.c2_control
), /* TTBCR */
146 CP15REG(3, 0, 0, 0, cp15
.c3
), /* DACR */
147 /* VFP system registers */
156 int kvm_arch_put_registers(CPUState
*cs
, int level
)
158 ARMCPU
*cpu
= ARM_CPU(cs
);
159 CPUARMState
*env
= &cpu
->env
;
160 struct kvm_one_reg r
;
163 uint32_t cpsr
, fpscr
;
166 /* Make sure the banked regs are properly set */
167 mode
= env
->uncached_cpsr
& CPSR_M
;
168 bn
= bank_number(mode
);
169 if (mode
== ARM_CPU_MODE_FIQ
) {
170 memcpy(env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
172 memcpy(env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
174 env
->banked_r13
[bn
] = env
->regs
[13];
175 env
->banked_r14
[bn
] = env
->regs
[14];
176 env
->banked_spsr
[bn
] = env
->spsr
;
178 /* Now we can safely copy stuff down to the kernel */
179 for (i
= 0; i
< ARRAY_SIZE(regs
); i
++) {
181 r
.addr
= (uintptr_t)(env
) + regs
[i
].offset
;
182 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, &r
);
188 /* Special cases which aren't a single CPUARMState field */
189 cpsr
= cpsr_read(env
);
190 r
.id
= KVM_REG_ARM
| KVM_REG_SIZE_U32
|
191 KVM_REG_ARM_CORE
| KVM_REG_ARM_CORE_REG(usr_regs
.ARM_cpsr
);
192 r
.addr
= (uintptr_t)(&cpsr
);
193 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, &r
);
198 /* TTBR0: cp15 crm=2 opc1=0 */
199 ttbr
= ((uint64_t)env
->cp15
.c2_base0_hi
<< 32) | env
->cp15
.c2_base0
;
200 r
.id
= KVM_REG_ARM
| KVM_REG_SIZE_U64
| (15 << KVM_REG_ARM_COPROC_SHIFT
) |
201 (2 << KVM_REG_ARM_CRM_SHIFT
) | (0 << KVM_REG_ARM_OPC1_SHIFT
);
202 r
.addr
= (uintptr_t)(&ttbr
);
203 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, &r
);
208 /* TTBR1: cp15 crm=2 opc1=1 */
209 ttbr
= ((uint64_t)env
->cp15
.c2_base1_hi
<< 32) | env
->cp15
.c2_base1
;
210 r
.id
= KVM_REG_ARM
| KVM_REG_SIZE_U64
| (15 << KVM_REG_ARM_COPROC_SHIFT
) |
211 (2 << KVM_REG_ARM_CRM_SHIFT
) | (1 << KVM_REG_ARM_OPC1_SHIFT
);
212 r
.addr
= (uintptr_t)(&ttbr
);
213 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, &r
);
219 r
.id
= KVM_REG_ARM
| KVM_REG_SIZE_U64
| KVM_REG_ARM_VFP
;
220 for (i
= 0; i
< 32; i
++) {
221 r
.addr
= (uintptr_t)(&env
->vfp
.regs
[i
]);
222 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, &r
);
229 r
.id
= KVM_REG_ARM
| KVM_REG_SIZE_U32
| KVM_REG_ARM_VFP
|
230 KVM_REG_ARM_VFP_FPSCR
;
231 fpscr
= vfp_get_fpscr(env
);
232 r
.addr
= (uintptr_t)&fpscr
;
233 ret
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, &r
);
238 int kvm_arch_get_registers(CPUState
*cs
)
240 ARMCPU
*cpu
= ARM_CPU(cs
);
241 CPUARMState
*env
= &cpu
->env
;
242 struct kvm_one_reg r
;
245 uint32_t cpsr
, fpscr
;
248 for (i
= 0; i
< ARRAY_SIZE(regs
); i
++) {
250 r
.addr
= (uintptr_t)(env
) + regs
[i
].offset
;
251 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, &r
);
257 /* Special cases which aren't a single CPUARMState field */
258 r
.id
= KVM_REG_ARM
| KVM_REG_SIZE_U32
|
259 KVM_REG_ARM_CORE
| KVM_REG_ARM_CORE_REG(usr_regs
.ARM_cpsr
);
260 r
.addr
= (uintptr_t)(&cpsr
);
261 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, &r
);
265 cpsr_write(env
, cpsr
, 0xffffffff);
267 /* TTBR0: cp15 crm=2 opc1=0 */
268 r
.id
= KVM_REG_ARM
| KVM_REG_SIZE_U64
| (15 << KVM_REG_ARM_COPROC_SHIFT
) |
269 (2 << KVM_REG_ARM_CRM_SHIFT
) | (0 << KVM_REG_ARM_OPC1_SHIFT
);
270 r
.addr
= (uintptr_t)(&ttbr
);
271 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, &r
);
275 env
->cp15
.c2_base0_hi
= ttbr
>> 32;
276 env
->cp15
.c2_base0
= ttbr
;
278 /* TTBR1: cp15 crm=2 opc1=1 */
279 r
.id
= KVM_REG_ARM
| KVM_REG_SIZE_U64
| (15 << KVM_REG_ARM_COPROC_SHIFT
) |
280 (2 << KVM_REG_ARM_CRM_SHIFT
) | (1 << KVM_REG_ARM_OPC1_SHIFT
);
281 r
.addr
= (uintptr_t)(&ttbr
);
282 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, &r
);
286 env
->cp15
.c2_base1_hi
= ttbr
>> 32;
287 env
->cp15
.c2_base1
= ttbr
;
289 /* Make sure the current mode regs are properly set */
290 mode
= env
->uncached_cpsr
& CPSR_M
;
291 bn
= bank_number(mode
);
292 if (mode
== ARM_CPU_MODE_FIQ
) {
293 memcpy(env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
295 memcpy(env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
297 env
->regs
[13] = env
->banked_r13
[bn
];
298 env
->regs
[14] = env
->banked_r14
[bn
];
299 env
->spsr
= env
->banked_spsr
[bn
];
301 /* The main GET_ONE_REG loop above set c2_control, but we need to
302 * update some extra cached precomputed values too.
303 * When this is driven from the cp_regs hashtable then this ugliness
304 * can disappear because we'll use the access function which sets
305 * these values automatically.
307 env
->cp15
.c2_mask
= ~(0xffffffffu
>> env
->cp15
.c2_control
);
308 env
->cp15
.c2_base_mask
= ~(0x3fffu
>> env
->cp15
.c2_control
);
311 r
.id
= KVM_REG_ARM
| KVM_REG_SIZE_U64
| KVM_REG_ARM_VFP
;
312 for (i
= 0; i
< 32; i
++) {
313 r
.addr
= (uintptr_t)(&env
->vfp
.regs
[i
]);
314 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, &r
);
321 r
.id
= KVM_REG_ARM
| KVM_REG_SIZE_U32
| KVM_REG_ARM_VFP
|
322 KVM_REG_ARM_VFP_FPSCR
;
323 r
.addr
= (uintptr_t)&fpscr
;
324 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, &r
);
328 vfp_set_fpscr(env
, fpscr
);
333 void kvm_arch_pre_run(CPUState
*cs
, struct kvm_run
*run
)
337 void kvm_arch_post_run(CPUState
*cs
, struct kvm_run
*run
)
341 int kvm_arch_handle_exit(CPUState
*cs
, struct kvm_run
*run
)
346 void kvm_arch_reset_vcpu(CPUState
*cs
)
350 bool kvm_arch_stop_on_emulation_error(CPUState
*cs
)
355 int kvm_arch_process_async_events(CPUState
*cs
)
360 int kvm_arch_on_sigbus_vcpu(CPUState
*cs
, int code
, void *addr
)
365 int kvm_arch_on_sigbus(int code
, void *addr
)
370 void kvm_arch_update_guest_debug(CPUState
*cs
, struct kvm_guest_debug
*dbg
)
372 qemu_log_mask(LOG_UNIMP
, "%s: not implemented\n", __func__
);
375 int kvm_arch_insert_sw_breakpoint(CPUState
*cs
,
376 struct kvm_sw_breakpoint
*bp
)
378 qemu_log_mask(LOG_UNIMP
, "%s: not implemented\n", __func__
);
382 int kvm_arch_insert_hw_breakpoint(target_ulong addr
,
383 target_ulong len
, int type
)
385 qemu_log_mask(LOG_UNIMP
, "%s: not implemented\n", __func__
);
389 int kvm_arch_remove_hw_breakpoint(target_ulong addr
,
390 target_ulong len
, int type
)
392 qemu_log_mask(LOG_UNIMP
, "%s: not implemented\n", __func__
);
396 int kvm_arch_remove_sw_breakpoint(CPUState
*cs
,
397 struct kvm_sw_breakpoint
*bp
)
399 qemu_log_mask(LOG_UNIMP
, "%s: not implemented\n", __func__
);
403 void kvm_arch_remove_all_hw_breakpoints(void)
405 qemu_log_mask(LOG_UNIMP
, "%s: not implemented\n", __func__
);