]>
Commit | Line | Data |
---|---|---|
04fe4726 SZ |
1 | /* |
2 | * Copyright (C) 2015 Linaro Ltd. | |
3 | * Author: Shannon Zhao <shannon.zhao@linaro.org> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #ifndef __ASM_ARM_KVM_PMU_H | |
19 | #define __ASM_ARM_KVM_PMU_H | |
20 | ||
21 | #ifdef CONFIG_KVM_ARM_PMU | |
22 | ||
23 | #include <linux/perf_event.h> | |
24 | #include <asm/perf_event.h> | |
25 | ||
051ff581 SZ |
26 | #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) |
27 | ||
04fe4726 SZ |
28 | struct kvm_pmc { |
29 | u8 idx; /* index into the pmu->pmc array */ | |
30 | struct perf_event *perf_event; | |
31 | u64 bitmask; | |
32 | }; | |
33 | ||
34 | struct kvm_pmu { | |
35 | int irq_num; | |
36 | struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; | |
37 | bool ready; | |
38 | }; | |
ab946834 SZ |
39 | |
40 | #define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) | |
051ff581 SZ |
41 | u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); |
42 | void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); | |
96b0eebc SZ |
43 | u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); |
44 | void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); | |
45 | void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); | |
76d883c4 | 46 | void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val); |
7a0adc70 | 47 | void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); |
7f766358 SZ |
48 | void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, |
49 | u64 select_idx); | |
04fe4726 SZ |
50 | #else |
51 | struct kvm_pmu { | |
52 | }; | |
ab946834 SZ |
53 | |
54 | #define kvm_arm_pmu_v3_ready(v) (false) | |
051ff581 SZ |
55 | static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, |
56 | u64 select_idx) | |
57 | { | |
58 | return 0; | |
59 | } | |
60 | static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, | |
61 | u64 select_idx, u64 val) {} | |
96b0eebc SZ |
62 | static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) |
63 | { | |
64 | return 0; | |
65 | } | |
66 | static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} | |
67 | static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} | |
76d883c4 | 68 | static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {} |
7a0adc70 | 69 | static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} |
7f766358 SZ |
70 | static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, |
71 | u64 data, u64 select_idx) {} | |
04fe4726 SZ |
72 | #endif |
73 | ||
74 | #endif |