]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/x86/kvm/vmx/pmu_intel.c
KVM: x86/pmu: preserve IA32_PERF_CAPABILITIES across CPUID refresh
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kvm / vmx / pmu_intel.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * KVM PMU support for Intel CPUs
4 *
5 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
6 *
7 * Authors:
8 * Avi Kivity <avi@redhat.com>
9 * Gleb Natapov <gleb@redhat.com>
10 */
11 #include <linux/types.h>
12 #include <linux/kvm_host.h>
13 #include <linux/perf_event.h>
14 #include <asm/perf_event.h>
15 #include "x86.h"
16 #include "cpuid.h"
17 #include "lapic.h"
18 #include "nested.h"
19 #include "pmu.h"
20
21 #define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
22
23 static struct kvm_event_hw_type_mapping intel_arch_events[] = {
24 /* Index must match CPUID 0x0A.EBX bit vector */
25 [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
26 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
27 [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
28 [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
29 [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
30 [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
31 [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
32 [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
33 };
34
35 /* mapping between fixed pmc index and intel_arch_events array */
36 static int fixed_pmc_events[] = {1, 0, 7};
37
38 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
39 {
40 int i;
41
42 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
43 u8 new_ctrl = fixed_ctrl_field(data, i);
44 u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
45 struct kvm_pmc *pmc;
46
47 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
48
49 if (old_ctrl == new_ctrl)
50 continue;
51
52 __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
53 reprogram_fixed_counter(pmc, new_ctrl, i);
54 }
55
56 pmu->fixed_ctr_ctrl = data;
57 }
58
59 /* function is called when global control register has been updated. */
60 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
61 {
62 int bit;
63 u64 diff = pmu->global_ctrl ^ data;
64
65 pmu->global_ctrl = data;
66
67 for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
68 reprogram_counter(pmu, bit);
69 }
70
71 static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
72 u8 event_select,
73 u8 unit_mask)
74 {
75 int i;
76
77 for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
78 if (intel_arch_events[i].eventsel == event_select
79 && intel_arch_events[i].unit_mask == unit_mask
80 && (pmu->available_event_types & (1 << i)))
81 break;
82
83 if (i == ARRAY_SIZE(intel_arch_events))
84 return PERF_COUNT_HW_MAX;
85
86 return intel_arch_events[i].event_type;
87 }
88
89 static unsigned intel_find_fixed_event(int idx)
90 {
91 u32 event;
92 size_t size = ARRAY_SIZE(fixed_pmc_events);
93
94 if (idx >= size)
95 return PERF_COUNT_HW_MAX;
96
97 event = fixed_pmc_events[array_index_nospec(idx, size)];
98 return intel_arch_events[event].event_type;
99 }
100
101 /* check if a PMC is enabled by comparing it with globl_ctrl bits. */
102 static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
103 {
104 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
105
106 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
107 }
108
109 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
110 {
111 if (pmc_idx < INTEL_PMC_IDX_FIXED)
112 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
113 MSR_P6_EVNTSEL0);
114 else {
115 u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
116
117 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
118 }
119 }
120
121 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
122 static int intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
123 {
124 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
125 bool fixed = idx & (1u << 30);
126
127 idx &= ~(3u << 30);
128
129 return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
130 (fixed && idx >= pmu->nr_arch_fixed_counters);
131 }
132
133 static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
134 unsigned int idx, u64 *mask)
135 {
136 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
137 bool fixed = idx & (1u << 30);
138 struct kvm_pmc *counters;
139 unsigned int num_counters;
140
141 idx &= ~(3u << 30);
142 if (fixed) {
143 counters = pmu->fixed_counters;
144 num_counters = pmu->nr_arch_fixed_counters;
145 } else {
146 counters = pmu->gp_counters;
147 num_counters = pmu->nr_arch_gp_counters;
148 }
149 if (idx >= num_counters)
150 return NULL;
151 *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
152 return &counters[array_index_nospec(idx, num_counters)];
153 }
154
155 static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
156 {
157 if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
158 return 0;
159
160 return vcpu->arch.perf_capabilities;
161 }
162
163 static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
164 {
165 return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0;
166 }
167
168 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
169 {
170 if (!fw_writes_is_enabled(pmu_to_vcpu(pmu)))
171 return NULL;
172
173 return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
174 }
175
176 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
177 {
178 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
179 int ret;
180
181 switch (msr) {
182 case MSR_CORE_PERF_FIXED_CTR_CTRL:
183 case MSR_CORE_PERF_GLOBAL_STATUS:
184 case MSR_CORE_PERF_GLOBAL_CTRL:
185 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
186 ret = pmu->version > 1;
187 break;
188 default:
189 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
190 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
191 get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr);
192 break;
193 }
194
195 return ret;
196 }
197
198 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
199 {
200 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
201 struct kvm_pmc *pmc;
202
203 pmc = get_fixed_pmc(pmu, msr);
204 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
205 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
206
207 return pmc;
208 }
209
210 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
211 {
212 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
213 struct kvm_pmc *pmc;
214 u32 msr = msr_info->index;
215
216 switch (msr) {
217 case MSR_CORE_PERF_FIXED_CTR_CTRL:
218 msr_info->data = pmu->fixed_ctr_ctrl;
219 return 0;
220 case MSR_CORE_PERF_GLOBAL_STATUS:
221 msr_info->data = pmu->global_status;
222 return 0;
223 case MSR_CORE_PERF_GLOBAL_CTRL:
224 msr_info->data = pmu->global_ctrl;
225 return 0;
226 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
227 msr_info->data = pmu->global_ovf_ctrl;
228 return 0;
229 default:
230 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
231 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
232 u64 val = pmc_read_counter(pmc);
233 msr_info->data =
234 val & pmu->counter_bitmask[KVM_PMC_GP];
235 return 0;
236 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
237 u64 val = pmc_read_counter(pmc);
238 msr_info->data =
239 val & pmu->counter_bitmask[KVM_PMC_FIXED];
240 return 0;
241 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
242 msr_info->data = pmc->eventsel;
243 return 0;
244 }
245 }
246
247 return 1;
248 }
249
250 static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
251 {
252 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
253 struct kvm_pmc *pmc;
254 u32 msr = msr_info->index;
255 u64 data = msr_info->data;
256
257 switch (msr) {
258 case MSR_CORE_PERF_FIXED_CTR_CTRL:
259 if (pmu->fixed_ctr_ctrl == data)
260 return 0;
261 if (!(data & 0xfffffffffffff444ull)) {
262 reprogram_fixed_counters(pmu, data);
263 return 0;
264 }
265 break;
266 case MSR_CORE_PERF_GLOBAL_STATUS:
267 if (msr_info->host_initiated) {
268 pmu->global_status = data;
269 return 0;
270 }
271 break; /* RO MSR */
272 case MSR_CORE_PERF_GLOBAL_CTRL:
273 if (pmu->global_ctrl == data)
274 return 0;
275 if (kvm_valid_perf_global_ctrl(pmu, data)) {
276 global_ctrl_changed(pmu, data);
277 return 0;
278 }
279 break;
280 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
281 if (!(data & pmu->global_ovf_ctrl_mask)) {
282 if (!msr_info->host_initiated)
283 pmu->global_status &= ~data;
284 pmu->global_ovf_ctrl = data;
285 return 0;
286 }
287 break;
288 default:
289 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
290 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
291 if ((msr & MSR_PMC_FULL_WIDTH_BIT) &&
292 (data & ~pmu->counter_bitmask[KVM_PMC_GP]))
293 return 1;
294 if (!msr_info->host_initiated &&
295 !(msr & MSR_PMC_FULL_WIDTH_BIT))
296 data = (s64)(s32)data;
297 pmc->counter += data - pmc_read_counter(pmc);
298 if (pmc->perf_event)
299 perf_event_period(pmc->perf_event,
300 get_sample_period(pmc, data));
301 return 0;
302 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
303 pmc->counter += data - pmc_read_counter(pmc);
304 if (pmc->perf_event)
305 perf_event_period(pmc->perf_event,
306 get_sample_period(pmc, data));
307 return 0;
308 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
309 if (data == pmc->eventsel)
310 return 0;
311 if (!(data & pmu->reserved_bits)) {
312 reprogram_gp_counter(pmc, data);
313 return 0;
314 }
315 }
316 }
317
318 return 1;
319 }
320
321 static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
322 {
323 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
324 struct x86_pmu_capability x86_pmu;
325 struct kvm_cpuid_entry2 *entry;
326 union cpuid10_eax eax;
327 union cpuid10_edx edx;
328
329 pmu->nr_arch_gp_counters = 0;
330 pmu->nr_arch_fixed_counters = 0;
331 pmu->counter_bitmask[KVM_PMC_GP] = 0;
332 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
333 pmu->version = 0;
334 pmu->reserved_bits = 0xffffffff00200000ull;
335
336 entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
337 if (!entry)
338 return;
339 eax.full = entry->eax;
340 edx.full = entry->edx;
341
342 pmu->version = eax.split.version_id;
343 if (!pmu->version)
344 return;
345
346 perf_get_x86_pmu_capability(&x86_pmu);
347
348 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
349 x86_pmu.num_counters_gp);
350 eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
351 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
352 eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
353 pmu->available_event_types = ~entry->ebx &
354 ((1ull << eax.split.mask_length) - 1);
355
356 if (pmu->version == 1) {
357 pmu->nr_arch_fixed_counters = 0;
358 } else {
359 pmu->nr_arch_fixed_counters =
360 min_t(int, edx.split.num_counters_fixed,
361 x86_pmu.num_counters_fixed);
362 edx.split.bit_width_fixed = min_t(int,
363 edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
364 pmu->counter_bitmask[KVM_PMC_FIXED] =
365 ((u64)1 << edx.split.bit_width_fixed) - 1;
366 }
367
368 pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
369 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
370 pmu->global_ctrl_mask = ~pmu->global_ctrl;
371 pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
372 & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
373 MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
374 if (vmx_pt_mode_is_host_guest())
375 pmu->global_ovf_ctrl_mask &=
376 ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
377
378 entry = kvm_find_cpuid_entry(vcpu, 7, 0);
379 if (entry &&
380 (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
381 (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
382 pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
383
384 bitmap_set(pmu->all_valid_pmc_idx,
385 0, pmu->nr_arch_gp_counters);
386 bitmap_set(pmu->all_valid_pmc_idx,
387 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
388
389 nested_vmx_pmu_entry_exit_ctls_update(vcpu);
390 }
391
392 static void intel_pmu_init(struct kvm_vcpu *vcpu)
393 {
394 int i;
395 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
396
397 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
398 pmu->gp_counters[i].type = KVM_PMC_GP;
399 pmu->gp_counters[i].vcpu = vcpu;
400 pmu->gp_counters[i].idx = i;
401 pmu->gp_counters[i].current_config = 0;
402 }
403
404 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
405 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
406 pmu->fixed_counters[i].vcpu = vcpu;
407 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
408 pmu->fixed_counters[i].current_config = 0;
409 }
410
411 vcpu->arch.perf_capabilities = vmx_get_perf_capabilities();
412 }
413
414 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
415 {
416 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
417 struct kvm_pmc *pmc = NULL;
418 int i;
419
420 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
421 pmc = &pmu->gp_counters[i];
422
423 pmc_stop_counter(pmc);
424 pmc->counter = pmc->eventsel = 0;
425 }
426
427 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
428 pmc = &pmu->fixed_counters[i];
429
430 pmc_stop_counter(pmc);
431 pmc->counter = 0;
432 }
433
434 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
435 pmu->global_ovf_ctrl = 0;
436 }
437
438 struct kvm_pmu_ops intel_pmu_ops = {
439 .find_arch_event = intel_find_arch_event,
440 .find_fixed_event = intel_find_fixed_event,
441 .pmc_is_enabled = intel_pmc_is_enabled,
442 .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
443 .rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
444 .msr_idx_to_pmc = intel_msr_idx_to_pmc,
445 .is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx,
446 .is_valid_msr = intel_is_valid_msr,
447 .get_msr = intel_pmu_get_msr,
448 .set_msr = intel_pmu_set_msr,
449 .refresh = intel_pmu_refresh,
450 .init = intel_pmu_init,
451 .reset = intel_pmu_reset,
452 };