]>
Commit | Line | Data |
---|---|---|
04fe4726 SZ |
1 | /* |
2 | * Copyright (C) 2015 Linaro Ltd. | |
3 | * Author: Shannon Zhao <shannon.zhao@linaro.org> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #ifndef __ASM_ARM_KVM_PMU_H | |
19 | #define __ASM_ARM_KVM_PMU_H | |
20 | ||
21 | #ifdef CONFIG_KVM_ARM_PMU | |
22 | ||
23 | #include <linux/perf_event.h> | |
24 | #include <asm/perf_event.h> | |
25 | ||
051ff581 SZ |
26 | #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) |
27 | ||
04fe4726 SZ |
28 | struct kvm_pmc { |
29 | u8 idx; /* index into the pmu->pmc array */ | |
30 | struct perf_event *perf_event; | |
31 | u64 bitmask; | |
32 | }; | |
33 | ||
34 | struct kvm_pmu { | |
35 | int irq_num; | |
36 | struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; | |
37 | bool ready; | |
b02386eb | 38 | bool irq_level; |
04fe4726 | 39 | }; |
ab946834 SZ |
40 | |
41 | #define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) | |
051ff581 SZ |
42 | u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); |
43 | void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); | |
96b0eebc | 44 | u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); |
2aa36e98 | 45 | void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); |
5f0a714a | 46 | void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); |
96b0eebc SZ |
47 | void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); |
48 | void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); | |
76d883c4 | 49 | void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val); |
b02386eb SZ |
50 | void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); |
51 | void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); | |
7a0adc70 | 52 | void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); |
76993739 | 53 | void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); |
7f766358 SZ |
54 | void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, |
55 | u64 select_idx); | |
04fe4726 SZ |
56 | #else |
57 | struct kvm_pmu { | |
58 | }; | |
ab946834 SZ |
59 | |
60 | #define kvm_arm_pmu_v3_ready(v) (false) | |
051ff581 SZ |
61 | static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, |
62 | u64 select_idx) | |
63 | { | |
64 | return 0; | |
65 | } | |
66 | static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, | |
67 | u64 select_idx, u64 val) {} | |
96b0eebc SZ |
68 | static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) |
69 | { | |
70 | return 0; | |
71 | } | |
2aa36e98 | 72 | static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} |
5f0a714a | 73 | static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} |
96b0eebc SZ |
74 | static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} |
75 | static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} | |
76d883c4 | 76 | static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {} |
b02386eb SZ |
77 | static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} |
78 | static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} | |
7a0adc70 | 79 | static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} |
76993739 | 80 | static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} |
7f766358 SZ |
81 | static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, |
82 | u64 data, u64 select_idx) {} | |
04fe4726 SZ |
83 | #endif |
84 | ||
85 | #endif |