]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/kvm32.c
Clean up inclusion of sysemu/sysemu.h
[mirror_qemu.git] / target / arm / kvm32.c
CommitLineData
b197ebd4
PM
1/*
2 * ARM implementation of KVM hooks, 32 bit specific code.
3 *
4 * Copyright Christoffer Dall 2009-2010
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 */
10
74c21bd0 11#include "qemu/osdep.h"
b197ebd4 12#include <sys/ioctl.h>
b197ebd4
PM
13
14#include <linux/kvm.h>
15
16#include "qemu-common.h"
33c11879 17#include "cpu.h"
b197ebd4 18#include "qemu/timer.h"
b197ebd4
PM
19#include "sysemu/kvm.h"
20#include "kvm_arm.h"
ccd38087 21#include "internals.h"
03dd024f 22#include "qemu/log.h"
b197ebd4
PM
23
24static inline void set_feature(uint64_t *features, int feature)
25{
26 *features |= 1ULL << feature;
27}
28
b653c55f
RH
29static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id)
30{
31 struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret };
32
33 assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32);
34 return ioctl(fd, KVM_GET_ONE_REG, &idreg);
35}
36
c4487d76 37bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
b197ebd4
PM
38{
39 /* Identify the feature bits corresponding to the host CPU, and
40 * fill out the ARMHostCPUClass fields accordingly. To do this
41 * we have to create a scratch VM, create a single CPU inside it,
42 * and then query that CPU for the relevant ID registers.
43 */
b653c55f 44 int err = 0, fdarray[3];
3c3efcf7 45 uint32_t midr, id_pfr0;
b197ebd4 46 uint64_t features = 0;
b653c55f 47
b197ebd4
PM
48 /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
49 * we know these will only support creating one kind of guest CPU,
50 * which is its preferred CPU type.
51 */
52 static const uint32_t cpus_to_try[] = {
53 QEMU_KVM_ARM_TARGET_CORTEX_A15,
54 QEMU_KVM_ARM_TARGET_NONE
55 };
56 struct kvm_vcpu_init init;
b197ebd4
PM
57
58 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
59 return false;
60 }
61
c4487d76 62 ahcf->target = init.target;
b197ebd4
PM
63
64 /* This is not strictly blessed by the device tree binding docs yet,
65 * but in practice the kernel does not care about this string so
66 * there is no point maintaining an KVM_ARM_TARGET_* -> string table.
67 */
c4487d76 68 ahcf->dtb_compatible = "arm,arm-v7";
b197ebd4 69
b653c55f
RH
70 err |= read_sys_reg32(fdarray[2], &midr, ARM_CP15_REG32(0, 0, 0, 0));
71 err |= read_sys_reg32(fdarray[2], &id_pfr0, ARM_CP15_REG32(0, 0, 1, 0));
3c3efcf7
RH
72
73 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0,
74 ARM_CP15_REG32(0, 0, 2, 0));
75 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1,
76 ARM_CP15_REG32(0, 0, 2, 1));
77 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2,
78 ARM_CP15_REG32(0, 0, 2, 2));
79 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3,
80 ARM_CP15_REG32(0, 0, 2, 3));
81 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4,
82 ARM_CP15_REG32(0, 0, 2, 4));
83 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5,
84 ARM_CP15_REG32(0, 0, 2, 5));
85 if (read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6,
86 ARM_CP15_REG32(0, 0, 2, 7))) {
87 /*
88 * Older kernels don't support reading ID_ISAR6. This register was
89 * only introduced in ARMv8, so we can assume that it is zero on a
90 * CPU that a kernel this old is running on.
91 */
92 ahcf->isar.id_isar6 = 0;
93 }
94
95 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0,
96 KVM_REG_ARM | KVM_REG_SIZE_U32 |
97 KVM_REG_ARM_VFP | KVM_REG_ARM_VFP_MVFR0);
98 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1,
b653c55f
RH
99 KVM_REG_ARM | KVM_REG_SIZE_U32 |
100 KVM_REG_ARM_VFP | KVM_REG_ARM_VFP_MVFR1);
3c3efcf7
RH
101 /*
102 * FIXME: There is not yet a way to read MVFR2.
103 * Fortunately there is not yet anything in there that affects migration.
104 */
b197ebd4
PM
105
106 kvm_arm_destroy_scratch_host_vcpu(fdarray);
107
b653c55f 108 if (err < 0) {
b197ebd4
PM
109 return false;
110 }
111
112 /* Now we've retrieved all the register information we can
113 * set the feature bits based on the ID register fields.
114 * We can assume any KVM supporting CPU is at least a v7
5110e683
AL
115 * with VFPv3, virtualization extensions, and the generic
116 * timers; this in turn implies most of the other feature
117 * bits, but a few must be tested.
b197ebd4 118 */
5110e683 119 set_feature(&features, ARM_FEATURE_V7VE);
b197ebd4 120 set_feature(&features, ARM_FEATURE_VFP3);
b197ebd4
PM
121 set_feature(&features, ARM_FEATURE_GENERIC_TIMER);
122
b197ebd4
PM
123 if (extract32(id_pfr0, 12, 4) == 1) {
124 set_feature(&features, ARM_FEATURE_THUMB2EE);
125 }
3c3efcf7 126 if (extract32(ahcf->isar.mvfr1, 12, 4) == 1) {
b197ebd4
PM
127 set_feature(&features, ARM_FEATURE_NEON);
128 }
3c3efcf7 129 if (extract32(ahcf->isar.mvfr1, 28, 4) == 1) {
b197ebd4
PM
130 /* FMAC support implies VFPv4 */
131 set_feature(&features, ARM_FEATURE_VFP4);
132 }
133
c4487d76 134 ahcf->features = features;
b197ebd4
PM
135
136 return true;
137}
138
38df27c8 139bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
b197ebd4
PM
140{
141 /* Return true if the regidx is a register we should synchronize
142 * via the cpreg_tuples array (ie is not a core reg we sync by
143 * hand in kvm_arch_get/put_registers())
144 */
145 switch (regidx & KVM_REG_ARM_COPROC_MASK) {
146 case KVM_REG_ARM_CORE:
147 case KVM_REG_ARM_VFP:
148 return false;
149 default:
150 return true;
151 }
152}
153
4b7a6bf4
CD
154typedef struct CPRegStateLevel {
155 uint64_t regidx;
156 int level;
157} CPRegStateLevel;
158
159/* All coprocessor registers not listed in the following table are assumed to
160 * be of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
161 * often, you must add it to this table with a state of either
162 * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
163 */
164static const CPRegStateLevel non_runtime_cpregs[] = {
165 { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
166};
167
168int kvm_arm_cpreg_level(uint64_t regidx)
169{
170 int i;
171
172 for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
173 const CPRegStateLevel *l = &non_runtime_cpregs[i];
174 if (l->regidx == regidx) {
175 return l->level;
176 }
177 }
178
179 return KVM_PUT_RUNTIME_STATE;
180}
181
eb5e1d3c
PF
182#define ARM_CPU_ID_MPIDR 0, 0, 0, 5
183
b197ebd4
PM
184int kvm_arch_init_vcpu(CPUState *cs)
185{
38df27c8 186 int ret;
b197ebd4 187 uint64_t v;
eb5e1d3c 188 uint32_t mpidr;
b197ebd4 189 struct kvm_one_reg r;
b197ebd4
PM
190 ARMCPU *cpu = ARM_CPU(cs);
191
192 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) {
193 fprintf(stderr, "KVM is not supported for this guest CPU type\n");
194 return -EINVAL;
195 }
196
228d5e04
PS
197 /* Determine init features for this CPU */
198 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
b197ebd4 199 if (cpu->start_powered_off) {
228d5e04 200 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
b197ebd4 201 }
7cd62e53 202 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
dd032e34 203 cpu->psci_version = 2;
7cd62e53
PS
204 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
205 }
228d5e04
PS
206
207 /* Do KVM_ARM_VCPU_INIT ioctl */
208 ret = kvm_arm_vcpu_init(cs);
b197ebd4
PM
209 if (ret) {
210 return ret;
211 }
228d5e04 212
b197ebd4
PM
213 /* Query the kernel to make sure it supports 32 VFP
214 * registers: QEMU's "cortex-a15" CPU is always a
215 * VFP-D32 core. The simplest way to do this is just
216 * to attempt to read register d31.
217 */
218 r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP | 31;
219 r.addr = (uintptr_t)(&v);
220 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
221 if (ret == -ENOENT) {
222 return -EINVAL;
223 }
224
eb5e1d3c
PF
225 /*
226 * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
227 * Currently KVM has its own idea about MPIDR assignment, so we
228 * override our defaults with what we get from KVM.
229 */
230 ret = kvm_get_one_reg(cs, ARM_CP15_REG32(ARM_CPU_ID_MPIDR), &mpidr);
231 if (ret) {
232 return ret;
233 }
0f4a9e45 234 cpu->mp_affinity = mpidr & ARM32_AFFINITY_MASK;
eb5e1d3c 235
202ccb6b
DG
236 /* Check whether userspace can specify guest syndrome value */
237 kvm_arm_init_serror_injection(cs);
238
38df27c8 239 return kvm_arm_init_cpreg_list(cpu);
b197ebd4
PM
240}
241
b1115c99
LA
242int kvm_arch_destroy_vcpu(CPUState *cs)
243{
244 return 0;
245}
246
b197ebd4
PM
247typedef struct Reg {
248 uint64_t id;
249 int offset;
250} Reg;
251
252#define COREREG(KERNELNAME, QEMUFIELD) \
253 { \
254 KVM_REG_ARM | KVM_REG_SIZE_U32 | \
255 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
256 offsetof(CPUARMState, QEMUFIELD) \
257 }
258
259#define VFPSYSREG(R) \
260 { \
261 KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | \
262 KVM_REG_ARM_VFP_##R, \
263 offsetof(CPUARMState, vfp.xregs[ARM_VFP_##R]) \
264 }
265
a65f1de9
PM
266/* Like COREREG, but handle fields which are in a uint64_t in CPUARMState. */
267#define COREREG64(KERNELNAME, QEMUFIELD) \
268 { \
269 KVM_REG_ARM | KVM_REG_SIZE_U32 | \
270 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
271 offsetoflow32(CPUARMState, QEMUFIELD) \
272 }
273
b197ebd4
PM
274static const Reg regs[] = {
275 /* R0_usr .. R14_usr */
276 COREREG(usr_regs.uregs[0], regs[0]),
277 COREREG(usr_regs.uregs[1], regs[1]),
278 COREREG(usr_regs.uregs[2], regs[2]),
279 COREREG(usr_regs.uregs[3], regs[3]),
280 COREREG(usr_regs.uregs[4], regs[4]),
281 COREREG(usr_regs.uregs[5], regs[5]),
282 COREREG(usr_regs.uregs[6], regs[6]),
283 COREREG(usr_regs.uregs[7], regs[7]),
284 COREREG(usr_regs.uregs[8], usr_regs[0]),
285 COREREG(usr_regs.uregs[9], usr_regs[1]),
286 COREREG(usr_regs.uregs[10], usr_regs[2]),
287 COREREG(usr_regs.uregs[11], usr_regs[3]),
288 COREREG(usr_regs.uregs[12], usr_regs[4]),
99a99c1f
SB
289 COREREG(usr_regs.uregs[13], banked_r13[BANK_USRSYS]),
290 COREREG(usr_regs.uregs[14], banked_r14[BANK_USRSYS]),
b197ebd4 291 /* R13, R14, SPSR for SVC, ABT, UND, IRQ banks */
99a99c1f
SB
292 COREREG(svc_regs[0], banked_r13[BANK_SVC]),
293 COREREG(svc_regs[1], banked_r14[BANK_SVC]),
294 COREREG64(svc_regs[2], banked_spsr[BANK_SVC]),
295 COREREG(abt_regs[0], banked_r13[BANK_ABT]),
296 COREREG(abt_regs[1], banked_r14[BANK_ABT]),
297 COREREG64(abt_regs[2], banked_spsr[BANK_ABT]),
298 COREREG(und_regs[0], banked_r13[BANK_UND]),
299 COREREG(und_regs[1], banked_r14[BANK_UND]),
300 COREREG64(und_regs[2], banked_spsr[BANK_UND]),
301 COREREG(irq_regs[0], banked_r13[BANK_IRQ]),
302 COREREG(irq_regs[1], banked_r14[BANK_IRQ]),
303 COREREG64(irq_regs[2], banked_spsr[BANK_IRQ]),
b197ebd4
PM
304 /* R8_fiq .. R14_fiq and SPSR_fiq */
305 COREREG(fiq_regs[0], fiq_regs[0]),
306 COREREG(fiq_regs[1], fiq_regs[1]),
307 COREREG(fiq_regs[2], fiq_regs[2]),
308 COREREG(fiq_regs[3], fiq_regs[3]),
309 COREREG(fiq_regs[4], fiq_regs[4]),
99a99c1f
SB
310 COREREG(fiq_regs[5], banked_r13[BANK_FIQ]),
311 COREREG(fiq_regs[6], banked_r14[BANK_FIQ]),
312 COREREG64(fiq_regs[7], banked_spsr[BANK_FIQ]),
b197ebd4
PM
313 /* R15 */
314 COREREG(usr_regs.uregs[15], regs[15]),
315 /* VFP system registers */
316 VFPSYSREG(FPSID),
317 VFPSYSREG(MVFR1),
318 VFPSYSREG(MVFR0),
319 VFPSYSREG(FPEXC),
320 VFPSYSREG(FPINST),
321 VFPSYSREG(FPINST2),
322};
323
324int kvm_arch_put_registers(CPUState *cs, int level)
325{
326 ARMCPU *cpu = ARM_CPU(cs);
327 CPUARMState *env = &cpu->env;
328 struct kvm_one_reg r;
329 int mode, bn;
330 int ret, i;
331 uint32_t cpsr, fpscr;
332
333 /* Make sure the banked regs are properly set */
334 mode = env->uncached_cpsr & CPSR_M;
335 bn = bank_number(mode);
336 if (mode == ARM_CPU_MODE_FIQ) {
337 memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
338 } else {
339 memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
340 }
341 env->banked_r13[bn] = env->regs[13];
b197ebd4 342 env->banked_spsr[bn] = env->spsr;
593cfa2b 343 env->banked_r14[r14_bank_number(mode)] = env->regs[14];
b197ebd4
PM
344
345 /* Now we can safely copy stuff down to the kernel */
346 for (i = 0; i < ARRAY_SIZE(regs); i++) {
347 r.id = regs[i].id;
348 r.addr = (uintptr_t)(env) + regs[i].offset;
349 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
350 if (ret) {
351 return ret;
352 }
353 }
354
355 /* Special cases which aren't a single CPUARMState field */
356 cpsr = cpsr_read(env);
357 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
358 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
359 r.addr = (uintptr_t)(&cpsr);
360 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
361 if (ret) {
362 return ret;
363 }
364
365 /* VFP registers */
366 r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
367 for (i = 0; i < 32; i++) {
9a2b5256 368 r.addr = (uintptr_t)aa32_vfp_dreg(env, i);
b197ebd4
PM
369 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
370 if (ret) {
371 return ret;
372 }
373 r.id++;
374 }
375
376 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
377 KVM_REG_ARM_VFP_FPSCR;
378 fpscr = vfp_get_fpscr(env);
379 r.addr = (uintptr_t)&fpscr;
380 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
381 if (ret) {
382 return ret;
383 }
384
202ccb6b
DG
385 ret = kvm_put_vcpu_events(cpu);
386 if (ret) {
387 return ret;
388 }
389
b698e4ee
PM
390 write_cpustate_to_list(cpu, true);
391
4b7a6bf4 392 if (!write_list_to_kvmstate(cpu, level)) {
b197ebd4
PM
393 return EINVAL;
394 }
395
1a1753f7
AB
396 kvm_arm_sync_mpstate_to_kvm(cpu);
397
b197ebd4
PM
398 return ret;
399}
400
401int kvm_arch_get_registers(CPUState *cs)
402{
403 ARMCPU *cpu = ARM_CPU(cs);
404 CPUARMState *env = &cpu->env;
405 struct kvm_one_reg r;
406 int mode, bn;
407 int ret, i;
408 uint32_t cpsr, fpscr;
409
410 for (i = 0; i < ARRAY_SIZE(regs); i++) {
411 r.id = regs[i].id;
412 r.addr = (uintptr_t)(env) + regs[i].offset;
413 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
414 if (ret) {
415 return ret;
416 }
417 }
418
419 /* Special cases which aren't a single CPUARMState field */
420 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
421 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
422 r.addr = (uintptr_t)(&cpsr);
423 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
424 if (ret) {
425 return ret;
426 }
50866ba5 427 cpsr_write(env, cpsr, 0xffffffff, CPSRWriteRaw);
b197ebd4
PM
428
429 /* Make sure the current mode regs are properly set */
430 mode = env->uncached_cpsr & CPSR_M;
431 bn = bank_number(mode);
432 if (mode == ARM_CPU_MODE_FIQ) {
433 memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
434 } else {
435 memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
436 }
437 env->regs[13] = env->banked_r13[bn];
b197ebd4 438 env->spsr = env->banked_spsr[bn];
593cfa2b 439 env->regs[14] = env->banked_r14[r14_bank_number(mode)];
b197ebd4
PM
440
441 /* VFP registers */
442 r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
443 for (i = 0; i < 32; i++) {
9a2b5256 444 r.addr = (uintptr_t)aa32_vfp_dreg(env, i);
b197ebd4
PM
445 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
446 if (ret) {
447 return ret;
448 }
449 r.id++;
450 }
451
452 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
453 KVM_REG_ARM_VFP_FPSCR;
454 r.addr = (uintptr_t)&fpscr;
455 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
456 if (ret) {
457 return ret;
458 }
459 vfp_set_fpscr(env, fpscr);
460
202ccb6b
DG
461 ret = kvm_get_vcpu_events(cpu);
462 if (ret) {
463 return ret;
464 }
465
b197ebd4
PM
466 if (!write_kvmstate_to_list(cpu)) {
467 return EINVAL;
468 }
469 /* Note that it's OK to have registers which aren't in CPUState,
470 * so we can ignore a failure return here.
471 */
472 write_list_to_cpustate(cpu);
473
1a1753f7
AB
474 kvm_arm_sync_mpstate_to_qemu(cpu);
475
b197ebd4
PM
476 return 0;
477}
2ecb2027
AB
478
479int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
480{
481 qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__);
482 return -EINVAL;
483}
484
485int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
486{
487 qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__);
488 return -EINVAL;
489}
490
491bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
492{
493 qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__);
494 return false;
495}
e4482ab7
AB
496
497int kvm_arch_insert_hw_breakpoint(target_ulong addr,
498 target_ulong len, int type)
499{
500 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
501 return -EINVAL;
502}
503
504int kvm_arch_remove_hw_breakpoint(target_ulong addr,
505 target_ulong len, int type)
506{
507 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
508 return -EINVAL;
509}
510
511void kvm_arch_remove_all_hw_breakpoints(void)
512{
513 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
514}
515
516void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
517{
518 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
519}
520
521bool kvm_arm_hw_debug_active(CPUState *cs)
522{
523 return false;
524}
01fe6b60 525
b2bfe9f7 526void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
3f07cb2a
AJ
527{
528 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
3f07cb2a
AJ
529}
530
b2bfe9f7 531void kvm_arm_pmu_init(CPUState *cs)
01fe6b60
SZ
532{
533 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
01fe6b60 534}