]> git.proxmox.com Git - qemu.git/blame - target-arm/kvm.c
ARM KVM: save and load VFP registers from kernel
[qemu.git] / target-arm / kvm.c
CommitLineData
494b00c7
CD
1/*
2 * ARM implementation of KVM hooks
3 *
4 * Copyright Christoffer Dall 2009-2010
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 */
10
11#include <stdio.h>
12#include <sys/types.h>
13#include <sys/ioctl.h>
14#include <sys/mman.h>
15
16#include <linux/kvm.h>
17
18#include "qemu-common.h"
19#include "qemu/timer.h"
20#include "sysemu/sysemu.h"
21#include "sysemu/kvm.h"
22#include "cpu.h"
23#include "hw/arm-misc.h"
24
25const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
26 KVM_CAP_LAST_INFO
27};
28
29int kvm_arch_init(KVMState *s)
30{
31 /* For ARM interrupt delivery is always asynchronous,
32 * whether we are using an in-kernel VGIC or not.
33 */
34 kvm_async_interrupts_allowed = true;
35 return 0;
36}
37
38unsigned long kvm_arch_vcpu_id(CPUState *cpu)
39{
40 return cpu->cpu_index;
41}
42
43int kvm_arch_init_vcpu(CPUState *cs)
44{
45 struct kvm_vcpu_init init;
81635574
PM
46 int ret;
47 uint64_t v;
48 struct kvm_one_reg r;
494b00c7
CD
49
50 init.target = KVM_ARM_TARGET_CORTEX_A15;
51 memset(init.features, 0, sizeof(init.features));
81635574
PM
52 ret = kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init);
53 if (ret) {
54 return ret;
55 }
56 /* Query the kernel to make sure it supports 32 VFP
57 * registers: QEMU's "cortex-a15" CPU is always a
58 * VFP-D32 core. The simplest way to do this is just
59 * to attempt to read register d31.
60 */
61 r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP | 31;
62 r.addr = (uintptr_t)(&v);
63 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
64 if (ret == ENOENT) {
65 return EINVAL;
66 }
67 return ret;
494b00c7
CD
68}
69
70typedef struct Reg {
71 uint64_t id;
72 int offset;
73} Reg;
74
75#define COREREG(KERNELNAME, QEMUFIELD) \
76 { \
77 KVM_REG_ARM | KVM_REG_SIZE_U32 | \
78 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
79 offsetof(CPUARMState, QEMUFIELD) \
80 }
81
82#define CP15REG(CRN, CRM, OPC1, OPC2, QEMUFIELD) \
83 { \
84 KVM_REG_ARM | KVM_REG_SIZE_U32 | \
85 (15 << KVM_REG_ARM_COPROC_SHIFT) | \
86 ((CRN) << KVM_REG_ARM_32_CRN_SHIFT) | \
87 ((CRM) << KVM_REG_ARM_CRM_SHIFT) | \
88 ((OPC1) << KVM_REG_ARM_OPC1_SHIFT) | \
89 ((OPC2) << KVM_REG_ARM_32_OPC2_SHIFT), \
90 offsetof(CPUARMState, QEMUFIELD) \
91 }
92
81635574
PM
93#define VFPSYSREG(R) \
94 { \
95 KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | \
96 KVM_REG_ARM_VFP_##R, \
97 offsetof(CPUARMState, vfp.xregs[ARM_VFP_##R]) \
98 }
99
494b00c7
CD
100static const Reg regs[] = {
101 /* R0_usr .. R14_usr */
102 COREREG(usr_regs.uregs[0], regs[0]),
103 COREREG(usr_regs.uregs[1], regs[1]),
104 COREREG(usr_regs.uregs[2], regs[2]),
105 COREREG(usr_regs.uregs[3], regs[3]),
106 COREREG(usr_regs.uregs[4], regs[4]),
107 COREREG(usr_regs.uregs[5], regs[5]),
108 COREREG(usr_regs.uregs[6], regs[6]),
109 COREREG(usr_regs.uregs[7], regs[7]),
110 COREREG(usr_regs.uregs[8], usr_regs[0]),
111 COREREG(usr_regs.uregs[9], usr_regs[1]),
112 COREREG(usr_regs.uregs[10], usr_regs[2]),
113 COREREG(usr_regs.uregs[11], usr_regs[3]),
114 COREREG(usr_regs.uregs[12], usr_regs[4]),
115 COREREG(usr_regs.uregs[13], banked_r13[0]),
116 COREREG(usr_regs.uregs[14], banked_r14[0]),
117 /* R13, R14, SPSR for SVC, ABT, UND, IRQ banks */
118 COREREG(svc_regs[0], banked_r13[1]),
119 COREREG(svc_regs[1], banked_r14[1]),
120 COREREG(svc_regs[2], banked_spsr[1]),
121 COREREG(abt_regs[0], banked_r13[2]),
122 COREREG(abt_regs[1], banked_r14[2]),
123 COREREG(abt_regs[2], banked_spsr[2]),
124 COREREG(und_regs[0], banked_r13[3]),
125 COREREG(und_regs[1], banked_r14[3]),
126 COREREG(und_regs[2], banked_spsr[3]),
127 COREREG(irq_regs[0], banked_r13[4]),
128 COREREG(irq_regs[1], banked_r14[4]),
129 COREREG(irq_regs[2], banked_spsr[4]),
130 /* R8_fiq .. R14_fiq and SPSR_fiq */
131 COREREG(fiq_regs[0], fiq_regs[0]),
132 COREREG(fiq_regs[1], fiq_regs[1]),
133 COREREG(fiq_regs[2], fiq_regs[2]),
134 COREREG(fiq_regs[3], fiq_regs[3]),
135 COREREG(fiq_regs[4], fiq_regs[4]),
136 COREREG(fiq_regs[5], banked_r13[5]),
137 COREREG(fiq_regs[6], banked_r14[5]),
138 COREREG(fiq_regs[7], banked_spsr[5]),
139 /* R15 */
140 COREREG(usr_regs.uregs[15], regs[15]),
141 /* A non-comprehensive set of cp15 registers.
142 * TODO: drive this from the cp_regs hashtable instead.
143 */
144 CP15REG(1, 0, 0, 0, cp15.c1_sys), /* SCTLR */
145 CP15REG(2, 0, 0, 2, cp15.c2_control), /* TTBCR */
146 CP15REG(3, 0, 0, 0, cp15.c3), /* DACR */
81635574
PM
147 /* VFP system registers */
148 VFPSYSREG(FPSID),
149 VFPSYSREG(MVFR1),
150 VFPSYSREG(MVFR0),
151 VFPSYSREG(FPEXC),
152 VFPSYSREG(FPINST),
153 VFPSYSREG(FPINST2),
494b00c7
CD
154};
155
156int kvm_arch_put_registers(CPUState *cs, int level)
157{
158 ARMCPU *cpu = ARM_CPU(cs);
159 CPUARMState *env = &cpu->env;
160 struct kvm_one_reg r;
161 int mode, bn;
162 int ret, i;
81635574 163 uint32_t cpsr, fpscr;
494b00c7
CD
164 uint64_t ttbr;
165
166 /* Make sure the banked regs are properly set */
167 mode = env->uncached_cpsr & CPSR_M;
168 bn = bank_number(mode);
169 if (mode == ARM_CPU_MODE_FIQ) {
170 memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
171 } else {
172 memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
173 }
174 env->banked_r13[bn] = env->regs[13];
175 env->banked_r14[bn] = env->regs[14];
176 env->banked_spsr[bn] = env->spsr;
177
178 /* Now we can safely copy stuff down to the kernel */
179 for (i = 0; i < ARRAY_SIZE(regs); i++) {
180 r.id = regs[i].id;
181 r.addr = (uintptr_t)(env) + regs[i].offset;
182 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
183 if (ret) {
184 return ret;
185 }
186 }
187
188 /* Special cases which aren't a single CPUARMState field */
189 cpsr = cpsr_read(env);
190 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
191 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
192 r.addr = (uintptr_t)(&cpsr);
193 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
194 if (ret) {
195 return ret;
196 }
197
198 /* TTBR0: cp15 crm=2 opc1=0 */
199 ttbr = ((uint64_t)env->cp15.c2_base0_hi << 32) | env->cp15.c2_base0;
200 r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | (15 << KVM_REG_ARM_COPROC_SHIFT) |
201 (2 << KVM_REG_ARM_CRM_SHIFT) | (0 << KVM_REG_ARM_OPC1_SHIFT);
202 r.addr = (uintptr_t)(&ttbr);
203 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
204 if (ret) {
205 return ret;
206 }
207
208 /* TTBR1: cp15 crm=2 opc1=1 */
209 ttbr = ((uint64_t)env->cp15.c2_base1_hi << 32) | env->cp15.c2_base1;
210 r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | (15 << KVM_REG_ARM_COPROC_SHIFT) |
211 (2 << KVM_REG_ARM_CRM_SHIFT) | (1 << KVM_REG_ARM_OPC1_SHIFT);
212 r.addr = (uintptr_t)(&ttbr);
213 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
81635574
PM
214 if (ret) {
215 return ret;
216 }
217
218 /* VFP registers */
219 r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
220 for (i = 0; i < 32; i++) {
221 r.addr = (uintptr_t)(&env->vfp.regs[i]);
222 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
223 if (ret) {
224 return ret;
225 }
226 r.id++;
227 }
228
229 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
230 KVM_REG_ARM_VFP_FPSCR;
231 fpscr = vfp_get_fpscr(env);
232 r.addr = (uintptr_t)&fpscr;
233 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
494b00c7
CD
234
235 return ret;
236}
237
238int kvm_arch_get_registers(CPUState *cs)
239{
240 ARMCPU *cpu = ARM_CPU(cs);
241 CPUARMState *env = &cpu->env;
242 struct kvm_one_reg r;
243 int mode, bn;
244 int ret, i;
81635574 245 uint32_t cpsr, fpscr;
494b00c7
CD
246 uint64_t ttbr;
247
248 for (i = 0; i < ARRAY_SIZE(regs); i++) {
249 r.id = regs[i].id;
250 r.addr = (uintptr_t)(env) + regs[i].offset;
251 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
252 if (ret) {
253 return ret;
254 }
255 }
256
257 /* Special cases which aren't a single CPUARMState field */
258 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
259 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
260 r.addr = (uintptr_t)(&cpsr);
261 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
262 if (ret) {
263 return ret;
264 }
265 cpsr_write(env, cpsr, 0xffffffff);
266
267 /* TTBR0: cp15 crm=2 opc1=0 */
268 r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | (15 << KVM_REG_ARM_COPROC_SHIFT) |
269 (2 << KVM_REG_ARM_CRM_SHIFT) | (0 << KVM_REG_ARM_OPC1_SHIFT);
270 r.addr = (uintptr_t)(&ttbr);
271 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
272 if (ret) {
273 return ret;
274 }
275 env->cp15.c2_base0_hi = ttbr >> 32;
276 env->cp15.c2_base0 = ttbr;
277
278 /* TTBR1: cp15 crm=2 opc1=1 */
279 r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | (15 << KVM_REG_ARM_COPROC_SHIFT) |
280 (2 << KVM_REG_ARM_CRM_SHIFT) | (1 << KVM_REG_ARM_OPC1_SHIFT);
281 r.addr = (uintptr_t)(&ttbr);
282 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
283 if (ret) {
284 return ret;
285 }
286 env->cp15.c2_base1_hi = ttbr >> 32;
287 env->cp15.c2_base1 = ttbr;
288
289 /* Make sure the current mode regs are properly set */
290 mode = env->uncached_cpsr & CPSR_M;
291 bn = bank_number(mode);
292 if (mode == ARM_CPU_MODE_FIQ) {
293 memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
294 } else {
295 memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
296 }
297 env->regs[13] = env->banked_r13[bn];
298 env->regs[14] = env->banked_r14[bn];
299 env->spsr = env->banked_spsr[bn];
300
301 /* The main GET_ONE_REG loop above set c2_control, but we need to
302 * update some extra cached precomputed values too.
303 * When this is driven from the cp_regs hashtable then this ugliness
304 * can disappear because we'll use the access function which sets
305 * these values automatically.
306 */
307 env->cp15.c2_mask = ~(0xffffffffu >> env->cp15.c2_control);
308 env->cp15.c2_base_mask = ~(0x3fffu >> env->cp15.c2_control);
309
81635574
PM
310 /* VFP registers */
311 r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
312 for (i = 0; i < 32; i++) {
313 r.addr = (uintptr_t)(&env->vfp.regs[i]);
314 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
315 if (ret) {
316 return ret;
317 }
318 r.id++;
319 }
320
321 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
322 KVM_REG_ARM_VFP_FPSCR;
323 r.addr = (uintptr_t)&fpscr;
324 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
325 if (ret) {
326 return ret;
327 }
328 vfp_set_fpscr(env, fpscr);
329
494b00c7
CD
330 return 0;
331}
332
333void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
334{
335}
336
337void kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
338{
339}
340
341int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
342{
343 return 0;
344}
345
346void kvm_arch_reset_vcpu(CPUState *cs)
347{
348}
349
350bool kvm_arch_stop_on_emulation_error(CPUState *cs)
351{
352 return true;
353}
354
355int kvm_arch_process_async_events(CPUState *cs)
356{
357 return 0;
358}
359
360int kvm_arch_on_sigbus_vcpu(CPUState *cs, int code, void *addr)
361{
362 return 1;
363}
364
365int kvm_arch_on_sigbus(int code, void *addr)
366{
367 return 1;
368}
369
370void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
371{
372 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
373}
374
375int kvm_arch_insert_sw_breakpoint(CPUState *cs,
376 struct kvm_sw_breakpoint *bp)
377{
378 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
379 return -EINVAL;
380}
381
382int kvm_arch_insert_hw_breakpoint(target_ulong addr,
383 target_ulong len, int type)
384{
385 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
386 return -EINVAL;
387}
388
389int kvm_arch_remove_hw_breakpoint(target_ulong addr,
390 target_ulong len, int type)
391{
392 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
393 return -EINVAL;
394}
395
396int kvm_arch_remove_sw_breakpoint(CPUState *cs,
397 struct kvm_sw_breakpoint *bp)
398{
399 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
400 return -EINVAL;
401}
402
403void kvm_arch_remove_all_hw_breakpoints(void)
404{
405 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
406}