]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - virt/kvm/arm/pmu.c
KVM: arm/arm64: Don't assume initialized vgic when setting PMU IRQ
[mirror_ubuntu-artful-kernel.git] / virt / kvm / arm / pmu.c
CommitLineData
051ff581
SZ
1/*
2 * Copyright (C) 2015 Linaro Ltd.
3 * Author: Shannon Zhao <shannon.zhao@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/cpu.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
21#include <linux/perf_event.h>
bb0c70bc 22#include <linux/uaccess.h>
051ff581
SZ
23#include <asm/kvm_emulate.h>
24#include <kvm/arm_pmu.h>
b02386eb 25#include <kvm/arm_vgic.h>
051ff581
SZ
26
27/**
28 * kvm_pmu_get_counter_value - get PMU counter value
29 * @vcpu: The vcpu pointer
30 * @select_idx: The counter index
31 */
32u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
33{
34 u64 counter, reg, enabled, running;
35 struct kvm_pmu *pmu = &vcpu->arch.pmu;
36 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
37
38 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
39 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
40 counter = vcpu_sys_reg(vcpu, reg);
41
42 /* The real counter value is equal to the value of counter register plus
43 * the value perf event counts.
44 */
45 if (pmc->perf_event)
46 counter += perf_event_read_value(pmc->perf_event, &enabled,
47 &running);
48
49 return counter & pmc->bitmask;
50}
51
52/**
53 * kvm_pmu_set_counter_value - set PMU counter value
54 * @vcpu: The vcpu pointer
55 * @select_idx: The counter index
56 * @val: The counter value
57 */
58void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
59{
60 u64 reg;
61
62 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
63 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
64 vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
65}
96b0eebc 66
7f766358
SZ
67/**
68 * kvm_pmu_stop_counter - stop PMU counter
69 * @pmc: The PMU counter pointer
70 *
71 * If this counter has been configured to monitor some event, release it here.
72 */
73static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
74{
75 u64 counter, reg;
76
77 if (pmc->perf_event) {
78 counter = kvm_pmu_get_counter_value(vcpu, pmc->idx);
79 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
80 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
81 vcpu_sys_reg(vcpu, reg) = counter;
82 perf_event_disable(pmc->perf_event);
83 perf_event_release_kernel(pmc->perf_event);
84 pmc->perf_event = NULL;
85 }
86}
87
2aa36e98
SZ
88/**
89 * kvm_pmu_vcpu_reset - reset pmu state for cpu
90 * @vcpu: The vcpu pointer
91 *
92 */
93void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
94{
95 int i;
96 struct kvm_pmu *pmu = &vcpu->arch.pmu;
97
98 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
99 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
100 pmu->pmc[i].idx = i;
101 pmu->pmc[i].bitmask = 0xffffffffUL;
102 }
103}
104
5f0a714a
SZ
105/**
106 * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
107 * @vcpu: The vcpu pointer
108 *
109 */
110void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
111{
112 int i;
113 struct kvm_pmu *pmu = &vcpu->arch.pmu;
114
115 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
116 struct kvm_pmc *pmc = &pmu->pmc[i];
117
118 if (pmc->perf_event) {
119 perf_event_disable(pmc->perf_event);
120 perf_event_release_kernel(pmc->perf_event);
121 pmc->perf_event = NULL;
122 }
123 }
124}
125
96b0eebc
SZ
126u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
127{
128 u64 val = vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
129
130 val &= ARMV8_PMU_PMCR_N_MASK;
131 if (val == 0)
132 return BIT(ARMV8_PMU_CYCLE_IDX);
133 else
134 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
135}
136
137/**
138 * kvm_pmu_enable_counter - enable selected PMU counter
139 * @vcpu: The vcpu pointer
140 * @val: the value guest writes to PMCNTENSET register
141 *
142 * Call perf_event_enable to start counting the perf event
143 */
144void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val)
145{
146 int i;
147 struct kvm_pmu *pmu = &vcpu->arch.pmu;
148 struct kvm_pmc *pmc;
149
150 if (!(vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
151 return;
152
153 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
154 if (!(val & BIT(i)))
155 continue;
156
157 pmc = &pmu->pmc[i];
158 if (pmc->perf_event) {
159 perf_event_enable(pmc->perf_event);
160 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
161 kvm_debug("fail to enable perf event\n");
162 }
163 }
164}
165
166/**
167 * kvm_pmu_disable_counter - disable selected PMU counter
168 * @vcpu: The vcpu pointer
169 * @val: the value guest writes to PMCNTENCLR register
170 *
171 * Call perf_event_disable to stop counting the perf event
172 */
173void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val)
174{
175 int i;
176 struct kvm_pmu *pmu = &vcpu->arch.pmu;
177 struct kvm_pmc *pmc;
178
179 if (!val)
180 return;
181
182 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
183 if (!(val & BIT(i)))
184 continue;
185
186 pmc = &pmu->pmc[i];
187 if (pmc->perf_event)
188 perf_event_disable(pmc->perf_event);
189 }
190}
7f766358 191
76d883c4
SZ
192static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
193{
194 u64 reg = 0;
195
7d4bd1d2 196 if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
76d883c4
SZ
197 reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0);
198 reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
199 reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1);
200 reg &= kvm_pmu_valid_counter_mask(vcpu);
7d4bd1d2 201 }
76d883c4
SZ
202
203 return reg;
204}
205
b7484931
AJ
206static void kvm_pmu_check_overflow(struct kvm_vcpu *vcpu)
207{
208 struct kvm_pmu *pmu = &vcpu->arch.pmu;
209 bool overflow = !!kvm_pmu_overflow_status(vcpu);
210
211 if (pmu->irq_level == overflow)
212 return;
213
214 pmu->irq_level = overflow;
215
216 if (likely(irqchip_in_kernel(vcpu->kvm))) {
217 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
cb3f0ad8
CD
218 pmu->irq_num, overflow,
219 &vcpu->arch.pmu);
b7484931
AJ
220 WARN_ON(ret);
221 }
222}
223
76d883c4
SZ
224/**
225 * kvm_pmu_overflow_set - set PMU overflow interrupt
226 * @vcpu: The vcpu pointer
227 * @val: the value guest writes to PMOVSSET register
228 */
229void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val)
230{
76d883c4
SZ
231 if (val == 0)
232 return;
233
234 vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= val;
b7484931 235 kvm_pmu_check_overflow(vcpu);
76d883c4
SZ
236}
237
b02386eb
SZ
238static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
239{
b02386eb
SZ
240 if (!kvm_arm_pmu_v3_ready(vcpu))
241 return;
b7484931 242 kvm_pmu_check_overflow(vcpu);
b02386eb
SZ
243}
244
3dbbdf78
CD
245bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
246{
247 struct kvm_pmu *pmu = &vcpu->arch.pmu;
248 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
249 bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
250
251 if (likely(irqchip_in_kernel(vcpu->kvm)))
252 return false;
253
254 return pmu->irq_level != run_level;
255}
256
257/*
258 * Reflect the PMU overflow interrupt output level into the kvm_run structure
259 */
260void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
261{
262 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
263
264 /* Populate the timer bitmap for user space */
265 regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
266 if (vcpu->arch.pmu.irq_level)
267 regs->device_irq_level |= KVM_ARM_DEV_PMU;
268}
269
b02386eb
SZ
270/**
271 * kvm_pmu_flush_hwstate - flush pmu state to cpu
272 * @vcpu: The vcpu pointer
273 *
274 * Check if the PMU has overflowed while we were running in the host, and inject
275 * an interrupt if that was the case.
276 */
277void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
278{
279 kvm_pmu_update_state(vcpu);
280}
281
282/**
283 * kvm_pmu_sync_hwstate - sync pmu state from cpu
284 * @vcpu: The vcpu pointer
285 *
286 * Check if the PMU has overflowed while we were running in the guest, and
287 * inject an interrupt if that was the case.
288 */
289void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
290{
291 kvm_pmu_update_state(vcpu);
292}
293
294static inline struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
295{
296 struct kvm_pmu *pmu;
297 struct kvm_vcpu_arch *vcpu_arch;
298
299 pmc -= pmc->idx;
300 pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
301 vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
302 return container_of(vcpu_arch, struct kvm_vcpu, arch);
303}
304
305/**
306 * When perf event overflows, call kvm_pmu_overflow_set to set overflow status.
307 */
308static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
309 struct perf_sample_data *data,
310 struct pt_regs *regs)
311{
312 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
313 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
314 int idx = pmc->idx;
315
316 kvm_pmu_overflow_set(vcpu, BIT(idx));
317}
318
7a0adc70
SZ
319/**
320 * kvm_pmu_software_increment - do software increment
321 * @vcpu: The vcpu pointer
322 * @val: the value guest writes to PMSWINC register
323 */
324void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
325{
326 int i;
327 u64 type, enable, reg;
328
329 if (val == 0)
330 return;
331
332 enable = vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
333 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
334 if (!(val & BIT(i)))
335 continue;
336 type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
337 & ARMV8_PMU_EVTYPE_EVENT;
b112c84a 338 if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
7a0adc70
SZ
339 && (enable & BIT(i))) {
340 reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
341 reg = lower_32_bits(reg);
342 vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
343 if (!reg)
344 kvm_pmu_overflow_set(vcpu, BIT(i));
345 }
346 }
347}
348
76993739
SZ
349/**
350 * kvm_pmu_handle_pmcr - handle PMCR register
351 * @vcpu: The vcpu pointer
352 * @val: the value guest writes to PMCR register
353 */
354void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
355{
356 struct kvm_pmu *pmu = &vcpu->arch.pmu;
357 struct kvm_pmc *pmc;
358 u64 mask;
359 int i;
360
361 mask = kvm_pmu_valid_counter_mask(vcpu);
362 if (val & ARMV8_PMU_PMCR_E) {
363 kvm_pmu_enable_counter(vcpu,
364 vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
365 } else {
366 kvm_pmu_disable_counter(vcpu, mask);
367 }
368
369 if (val & ARMV8_PMU_PMCR_C)
370 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
371
372 if (val & ARMV8_PMU_PMCR_P) {
373 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
374 kvm_pmu_set_counter_value(vcpu, i, 0);
375 }
376
377 if (val & ARMV8_PMU_PMCR_LC) {
378 pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX];
379 pmc->bitmask = 0xffffffffffffffffUL;
380 }
381}
382
7f766358
SZ
383static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
384{
385 return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
386 (vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
387}
388
389/**
390 * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
391 * @vcpu: The vcpu pointer
392 * @data: The data guest writes to PMXEVTYPER_EL0
393 * @select_idx: The number of selected counter
394 *
395 * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
396 * event with given hardware event number. Here we call perf_event API to
397 * emulate this action and create a kernel perf event for it.
398 */
399void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
400 u64 select_idx)
401{
402 struct kvm_pmu *pmu = &vcpu->arch.pmu;
403 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
404 struct perf_event *event;
405 struct perf_event_attr attr;
406 u64 eventsel, counter;
407
408 kvm_pmu_stop_counter(vcpu, pmc);
409 eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
410
7a0adc70 411 /* Software increment event does't need to be backed by a perf event */
b112c84a
WH
412 if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
413 select_idx != ARMV8_PMU_CYCLE_IDX)
7a0adc70
SZ
414 return;
415
7f766358
SZ
416 memset(&attr, 0, sizeof(struct perf_event_attr));
417 attr.type = PERF_TYPE_RAW;
418 attr.size = sizeof(attr);
419 attr.pinned = 1;
420 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, select_idx);
421 attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
422 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
423 attr.exclude_hv = 1; /* Don't count EL2 events */
424 attr.exclude_host = 1; /* Don't count host events */
b112c84a
WH
425 attr.config = (select_idx == ARMV8_PMU_CYCLE_IDX) ?
426 ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
7f766358
SZ
427
428 counter = kvm_pmu_get_counter_value(vcpu, select_idx);
429 /* The initial sample period (overflow count) of an event. */
430 attr.sample_period = (-counter) & pmc->bitmask;
431
b02386eb
SZ
432 event = perf_event_create_kernel_counter(&attr, -1, current,
433 kvm_pmu_perf_overflow, pmc);
7f766358
SZ
434 if (IS_ERR(event)) {
435 pr_err_once("kvm: pmu event creation failed %ld\n",
436 PTR_ERR(event));
437 return;
438 }
439
440 pmc->perf_event = event;
441}
808e7381
SZ
442
443bool kvm_arm_support_pmu_v3(void)
444{
445 /*
446 * Check if HW_PERF_EVENTS are supported by checking the number of
447 * hardware performance counters. This could ensure the presence of
448 * a physical PMU and CONFIG_PERF_EVENT is selected.
449 */
450 return (perf_num_counters() > 0);
451}
bb0c70bc 452
a2befacf 453int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
bb0c70bc 454{
a2befacf
CD
455 if (!vcpu->arch.pmu.created)
456 return 0;
bb0c70bc 457
6fe407f2 458 /*
a2befacf
CD
459 * A valid interrupt configuration for the PMU is either to have a
460 * properly configured interrupt number and using an in-kernel
ebb127f2 461 * irqchip, or to not have an in-kernel GIC and not set an IRQ.
6fe407f2 462 */
ebb127f2
CD
463 if (irqchip_in_kernel(vcpu->kvm)) {
464 int irq = vcpu->arch.pmu.irq_num;
465 if (!kvm_arm_pmu_irq_initialized(vcpu))
466 return -EINVAL;
467
468 /*
469 * If we are using an in-kernel vgic, at this point we know
470 * the vgic will be initialized, so we can check the PMU irq
471 * number against the dimensions of the vgic and make sure
472 * it's valid.
473 */
474 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
475 return -EINVAL;
476 } else if (kvm_arm_pmu_irq_initialized(vcpu)) {
477 return -EINVAL;
478 }
a2befacf
CD
479
480 kvm_pmu_vcpu_reset(vcpu);
481 vcpu->arch.pmu.ready = true;
482
483 return 0;
484}
485
486static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
487{
488 if (!kvm_arm_support_pmu_v3())
6fe407f2
CD
489 return -ENODEV;
490
a2befacf 491 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
bb0c70bc
SZ
492 return -ENXIO;
493
a2befacf 494 if (vcpu->arch.pmu.created)
bb0c70bc
SZ
495 return -EBUSY;
496
a2befacf 497 if (irqchip_in_kernel(vcpu->kvm)) {
abcb851d
CD
498 int ret;
499
a2befacf
CD
500 /*
501 * If using the PMU with an in-kernel virtual GIC
502 * implementation, we require the GIC to be already
503 * initialized when initializing the PMU.
504 */
505 if (!vgic_initialized(vcpu->kvm))
506 return -ENODEV;
507
508 if (!kvm_arm_pmu_irq_initialized(vcpu))
509 return -ENXIO;
abcb851d
CD
510
511 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
512 &vcpu->arch.pmu);
513 if (ret)
514 return ret;
a2befacf 515 }
bb0c70bc 516
a2befacf 517 vcpu->arch.pmu.created = true;
bb0c70bc
SZ
518 return 0;
519}
520
2defaff4
AP
521/*
522 * For one VM the interrupt type must be same for each vcpu.
523 * As a PPI, the interrupt number is the same for all vcpus,
524 * while as an SPI it must be a separate number per vcpu.
525 */
526static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
bb0c70bc
SZ
527{
528 int i;
529 struct kvm_vcpu *vcpu;
530
531 kvm_for_each_vcpu(i, vcpu, kvm) {
532 if (!kvm_arm_pmu_irq_initialized(vcpu))
533 continue;
534
2defaff4 535 if (irq_is_ppi(irq)) {
bb0c70bc
SZ
536 if (vcpu->arch.pmu.irq_num != irq)
537 return false;
538 } else {
539 if (vcpu->arch.pmu.irq_num == irq)
540 return false;
541 }
542 }
543
544 return true;
545}
546
bb0c70bc
SZ
547int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
548{
549 switch (attr->attr) {
550 case KVM_ARM_VCPU_PMU_V3_IRQ: {
551 int __user *uaddr = (int __user *)(long)attr->addr;
552 int irq;
553
a2befacf
CD
554 if (!irqchip_in_kernel(vcpu->kvm))
555 return -EINVAL;
556
bb0c70bc
SZ
557 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
558 return -ENODEV;
559
560 if (get_user(irq, uaddr))
561 return -EFAULT;
562
2defaff4 563 /* The PMU overflow interrupt can be a PPI or a valid SPI. */
ebb127f2 564 if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
2defaff4
AP
565 return -EINVAL;
566
567 if (!pmu_irq_is_valid(vcpu->kvm, irq))
bb0c70bc
SZ
568 return -EINVAL;
569
570 if (kvm_arm_pmu_irq_initialized(vcpu))
571 return -EBUSY;
572
573 kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
574 vcpu->arch.pmu.irq_num = irq;
575 return 0;
576 }
577 case KVM_ARM_VCPU_PMU_V3_INIT:
578 return kvm_arm_pmu_v3_init(vcpu);
579 }
580
581 return -ENXIO;
582}
583
584int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
585{
586 switch (attr->attr) {
587 case KVM_ARM_VCPU_PMU_V3_IRQ: {
588 int __user *uaddr = (int __user *)(long)attr->addr;
589 int irq;
590
a2befacf
CD
591 if (!irqchip_in_kernel(vcpu->kvm))
592 return -EINVAL;
593
bb0c70bc
SZ
594 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
595 return -ENODEV;
596
597 if (!kvm_arm_pmu_irq_initialized(vcpu))
598 return -ENXIO;
599
600 irq = vcpu->arch.pmu.irq_num;
601 return put_user(irq, uaddr);
602 }
603 }
604
605 return -ENXIO;
606}
607
608int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
609{
610 switch (attr->attr) {
611 case KVM_ARM_VCPU_PMU_V3_IRQ:
612 case KVM_ARM_VCPU_PMU_V3_INIT:
613 if (kvm_arm_support_pmu_v3() &&
614 test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
615 return 0;
616 }
617
618 return -ENXIO;
619}