]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - virt/kvm/arm/pmu.c
arm64: KVM: Add access handler for PMOVSSET and PMOVSCLR register
[mirror_ubuntu-bionic-kernel.git] / virt / kvm / arm / pmu.c
CommitLineData
051ff581
SZ
1/*
2 * Copyright (C) 2015 Linaro Ltd.
3 * Author: Shannon Zhao <shannon.zhao@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/cpu.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
21#include <linux/perf_event.h>
22#include <asm/kvm_emulate.h>
23#include <kvm/arm_pmu.h>
24
25/**
26 * kvm_pmu_get_counter_value - get PMU counter value
27 * @vcpu: The vcpu pointer
28 * @select_idx: The counter index
29 */
30u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
31{
32 u64 counter, reg, enabled, running;
33 struct kvm_pmu *pmu = &vcpu->arch.pmu;
34 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
35
36 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
37 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
38 counter = vcpu_sys_reg(vcpu, reg);
39
40 /* The real counter value is equal to the value of counter register plus
41 * the value perf event counts.
42 */
43 if (pmc->perf_event)
44 counter += perf_event_read_value(pmc->perf_event, &enabled,
45 &running);
46
47 return counter & pmc->bitmask;
48}
49
50/**
51 * kvm_pmu_set_counter_value - set PMU counter value
52 * @vcpu: The vcpu pointer
53 * @select_idx: The counter index
54 * @val: The counter value
55 */
56void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
57{
58 u64 reg;
59
60 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
61 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
62 vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
63}
96b0eebc 64
7f766358
SZ
65/**
66 * kvm_pmu_stop_counter - stop PMU counter
67 * @pmc: The PMU counter pointer
68 *
69 * If this counter has been configured to monitor some event, release it here.
70 */
71static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
72{
73 u64 counter, reg;
74
75 if (pmc->perf_event) {
76 counter = kvm_pmu_get_counter_value(vcpu, pmc->idx);
77 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
78 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
79 vcpu_sys_reg(vcpu, reg) = counter;
80 perf_event_disable(pmc->perf_event);
81 perf_event_release_kernel(pmc->perf_event);
82 pmc->perf_event = NULL;
83 }
84}
85
96b0eebc
SZ
86u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
87{
88 u64 val = vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
89
90 val &= ARMV8_PMU_PMCR_N_MASK;
91 if (val == 0)
92 return BIT(ARMV8_PMU_CYCLE_IDX);
93 else
94 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
95}
96
97/**
98 * kvm_pmu_enable_counter - enable selected PMU counter
99 * @vcpu: The vcpu pointer
100 * @val: the value guest writes to PMCNTENSET register
101 *
102 * Call perf_event_enable to start counting the perf event
103 */
104void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val)
105{
106 int i;
107 struct kvm_pmu *pmu = &vcpu->arch.pmu;
108 struct kvm_pmc *pmc;
109
110 if (!(vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
111 return;
112
113 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
114 if (!(val & BIT(i)))
115 continue;
116
117 pmc = &pmu->pmc[i];
118 if (pmc->perf_event) {
119 perf_event_enable(pmc->perf_event);
120 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
121 kvm_debug("fail to enable perf event\n");
122 }
123 }
124}
125
126/**
127 * kvm_pmu_disable_counter - disable selected PMU counter
128 * @vcpu: The vcpu pointer
129 * @val: the value guest writes to PMCNTENCLR register
130 *
131 * Call perf_event_disable to stop counting the perf event
132 */
133void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val)
134{
135 int i;
136 struct kvm_pmu *pmu = &vcpu->arch.pmu;
137 struct kvm_pmc *pmc;
138
139 if (!val)
140 return;
141
142 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
143 if (!(val & BIT(i)))
144 continue;
145
146 pmc = &pmu->pmc[i];
147 if (pmc->perf_event)
148 perf_event_disable(pmc->perf_event);
149 }
150}
7f766358 151
76d883c4
SZ
152static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
153{
154 u64 reg = 0;
155
156 if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
157 reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0);
158 reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
159 reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1);
160 reg &= kvm_pmu_valid_counter_mask(vcpu);
161
162 return reg;
163}
164
165/**
166 * kvm_pmu_overflow_set - set PMU overflow interrupt
167 * @vcpu: The vcpu pointer
168 * @val: the value guest writes to PMOVSSET register
169 */
170void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val)
171{
172 u64 reg;
173
174 if (val == 0)
175 return;
176
177 vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= val;
178 reg = kvm_pmu_overflow_status(vcpu);
179 if (reg != 0)
180 kvm_vcpu_kick(vcpu);
181}
182
7f766358
SZ
183static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
184{
185 return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
186 (vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
187}
188
189/**
190 * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
191 * @vcpu: The vcpu pointer
192 * @data: The data guest writes to PMXEVTYPER_EL0
193 * @select_idx: The number of selected counter
194 *
195 * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
196 * event with given hardware event number. Here we call perf_event API to
197 * emulate this action and create a kernel perf event for it.
198 */
199void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
200 u64 select_idx)
201{
202 struct kvm_pmu *pmu = &vcpu->arch.pmu;
203 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
204 struct perf_event *event;
205 struct perf_event_attr attr;
206 u64 eventsel, counter;
207
208 kvm_pmu_stop_counter(vcpu, pmc);
209 eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
210
211 memset(&attr, 0, sizeof(struct perf_event_attr));
212 attr.type = PERF_TYPE_RAW;
213 attr.size = sizeof(attr);
214 attr.pinned = 1;
215 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, select_idx);
216 attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
217 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
218 attr.exclude_hv = 1; /* Don't count EL2 events */
219 attr.exclude_host = 1; /* Don't count host events */
220 attr.config = eventsel;
221
222 counter = kvm_pmu_get_counter_value(vcpu, select_idx);
223 /* The initial sample period (overflow count) of an event. */
224 attr.sample_period = (-counter) & pmc->bitmask;
225
226 event = perf_event_create_kernel_counter(&attr, -1, current, NULL, pmc);
227 if (IS_ERR(event)) {
228 pr_err_once("kvm: pmu event creation failed %ld\n",
229 PTR_ERR(event));
230 return;
231 }
232
233 pmc->perf_event = event;
234}