1 // SPDX-License-Identifier: GPL-2.0-only
3 * KVM PMU support for Intel CPUs
5 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
8 * Avi Kivity <avi@redhat.com>
9 * Gleb Natapov <gleb@redhat.com>
11 #include <linux/types.h>
12 #include <linux/kvm_host.h>
13 #include <linux/perf_event.h>
14 #include <asm/perf_event.h>
21 #define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
23 static struct kvm_event_hw_type_mapping intel_arch_events
[] = {
24 /* Index must match CPUID 0x0A.EBX bit vector */
25 [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES
},
26 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS
},
27 [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES
},
28 [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES
},
29 [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES
},
30 [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS
},
31 [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES
},
32 [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES
},
35 /* mapping between fixed pmc index and intel_arch_events array */
36 static int fixed_pmc_events
[] = {1, 0, 7};
38 static void reprogram_fixed_counters(struct kvm_pmu
*pmu
, u64 data
)
42 for (i
= 0; i
< pmu
->nr_arch_fixed_counters
; i
++) {
43 u8 new_ctrl
= fixed_ctrl_field(data
, i
);
44 u8 old_ctrl
= fixed_ctrl_field(pmu
->fixed_ctr_ctrl
, i
);
47 pmc
= get_fixed_pmc(pmu
, MSR_CORE_PERF_FIXED_CTR0
+ i
);
49 if (old_ctrl
== new_ctrl
)
52 __set_bit(INTEL_PMC_IDX_FIXED
+ i
, pmu
->pmc_in_use
);
53 reprogram_fixed_counter(pmc
, new_ctrl
, i
);
56 pmu
->fixed_ctr_ctrl
= data
;
59 /* function is called when global control register has been updated. */
60 static void global_ctrl_changed(struct kvm_pmu
*pmu
, u64 data
)
63 u64 diff
= pmu
->global_ctrl
^ data
;
65 pmu
->global_ctrl
= data
;
67 for_each_set_bit(bit
, (unsigned long *)&diff
, X86_PMC_IDX_MAX
)
68 reprogram_counter(pmu
, bit
);
71 static unsigned intel_find_arch_event(struct kvm_pmu
*pmu
,
77 for (i
= 0; i
< ARRAY_SIZE(intel_arch_events
); i
++)
78 if (intel_arch_events
[i
].eventsel
== event_select
79 && intel_arch_events
[i
].unit_mask
== unit_mask
80 && (pmu
->available_event_types
& (1 << i
)))
83 if (i
== ARRAY_SIZE(intel_arch_events
))
84 return PERF_COUNT_HW_MAX
;
86 return intel_arch_events
[i
].event_type
;
89 static unsigned intel_find_fixed_event(int idx
)
92 size_t size
= ARRAY_SIZE(fixed_pmc_events
);
95 return PERF_COUNT_HW_MAX
;
97 event
= fixed_pmc_events
[array_index_nospec(idx
, size
)];
98 return intel_arch_events
[event
].event_type
;
101 /* check if a PMC is enabled by comparing it with globl_ctrl bits. */
102 static bool intel_pmc_is_enabled(struct kvm_pmc
*pmc
)
104 struct kvm_pmu
*pmu
= pmc_to_pmu(pmc
);
106 return test_bit(pmc
->idx
, (unsigned long *)&pmu
->global_ctrl
);
109 static struct kvm_pmc
*intel_pmc_idx_to_pmc(struct kvm_pmu
*pmu
, int pmc_idx
)
111 if (pmc_idx
< INTEL_PMC_IDX_FIXED
)
112 return get_gp_pmc(pmu
, MSR_P6_EVNTSEL0
+ pmc_idx
,
115 u32 idx
= pmc_idx
- INTEL_PMC_IDX_FIXED
;
117 return get_fixed_pmc(pmu
, idx
+ MSR_CORE_PERF_FIXED_CTR0
);
121 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
122 static int intel_is_valid_rdpmc_ecx(struct kvm_vcpu
*vcpu
, unsigned int idx
)
124 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
125 bool fixed
= idx
& (1u << 30);
129 return (!fixed
&& idx
>= pmu
->nr_arch_gp_counters
) ||
130 (fixed
&& idx
>= pmu
->nr_arch_fixed_counters
);
133 static struct kvm_pmc
*intel_rdpmc_ecx_to_pmc(struct kvm_vcpu
*vcpu
,
134 unsigned int idx
, u64
*mask
)
136 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
137 bool fixed
= idx
& (1u << 30);
138 struct kvm_pmc
*counters
;
139 unsigned int num_counters
;
143 counters
= pmu
->fixed_counters
;
144 num_counters
= pmu
->nr_arch_fixed_counters
;
146 counters
= pmu
->gp_counters
;
147 num_counters
= pmu
->nr_arch_gp_counters
;
149 if (idx
>= num_counters
)
151 *mask
&= pmu
->counter_bitmask
[fixed
? KVM_PMC_FIXED
: KVM_PMC_GP
];
152 return &counters
[array_index_nospec(idx
, num_counters
)];
155 static inline bool fw_writes_is_enabled(struct kvm_vcpu
*vcpu
)
157 if (!guest_cpuid_has(vcpu
, X86_FEATURE_PDCM
))
160 return vcpu
->arch
.perf_capabilities
& PMU_CAP_FW_WRITES
;
163 static inline struct kvm_pmc
*get_fw_gp_pmc(struct kvm_pmu
*pmu
, u32 msr
)
165 if (!fw_writes_is_enabled(pmu_to_vcpu(pmu
)))
168 return get_gp_pmc(pmu
, msr
, MSR_IA32_PMC0
);
171 static bool intel_is_valid_msr(struct kvm_vcpu
*vcpu
, u32 msr
)
173 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
177 case MSR_CORE_PERF_FIXED_CTR_CTRL
:
178 case MSR_CORE_PERF_GLOBAL_STATUS
:
179 case MSR_CORE_PERF_GLOBAL_CTRL
:
180 case MSR_CORE_PERF_GLOBAL_OVF_CTRL
:
181 ret
= pmu
->version
> 1;
184 ret
= get_gp_pmc(pmu
, msr
, MSR_IA32_PERFCTR0
) ||
185 get_gp_pmc(pmu
, msr
, MSR_P6_EVNTSEL0
) ||
186 get_fixed_pmc(pmu
, msr
) || get_fw_gp_pmc(pmu
, msr
);
193 static struct kvm_pmc
*intel_msr_idx_to_pmc(struct kvm_vcpu
*vcpu
, u32 msr
)
195 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
198 pmc
= get_fixed_pmc(pmu
, msr
);
199 pmc
= pmc
? pmc
: get_gp_pmc(pmu
, msr
, MSR_P6_EVNTSEL0
);
200 pmc
= pmc
? pmc
: get_gp_pmc(pmu
, msr
, MSR_IA32_PERFCTR0
);
205 static int intel_pmu_get_msr(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
)
207 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
209 u32 msr
= msr_info
->index
;
212 case MSR_CORE_PERF_FIXED_CTR_CTRL
:
213 msr_info
->data
= pmu
->fixed_ctr_ctrl
;
215 case MSR_CORE_PERF_GLOBAL_STATUS
:
216 msr_info
->data
= pmu
->global_status
;
218 case MSR_CORE_PERF_GLOBAL_CTRL
:
219 msr_info
->data
= pmu
->global_ctrl
;
221 case MSR_CORE_PERF_GLOBAL_OVF_CTRL
:
222 msr_info
->data
= pmu
->global_ovf_ctrl
;
225 if ((pmc
= get_gp_pmc(pmu
, msr
, MSR_IA32_PERFCTR0
)) ||
226 (pmc
= get_gp_pmc(pmu
, msr
, MSR_IA32_PMC0
))) {
227 u64 val
= pmc_read_counter(pmc
);
229 val
& pmu
->counter_bitmask
[KVM_PMC_GP
];
231 } else if ((pmc
= get_fixed_pmc(pmu
, msr
))) {
232 u64 val
= pmc_read_counter(pmc
);
234 val
& pmu
->counter_bitmask
[KVM_PMC_FIXED
];
236 } else if ((pmc
= get_gp_pmc(pmu
, msr
, MSR_P6_EVNTSEL0
))) {
237 msr_info
->data
= pmc
->eventsel
;
245 static int intel_pmu_set_msr(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
)
247 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
249 u32 msr
= msr_info
->index
;
250 u64 data
= msr_info
->data
;
253 case MSR_CORE_PERF_FIXED_CTR_CTRL
:
254 if (pmu
->fixed_ctr_ctrl
== data
)
256 if (!(data
& 0xfffffffffffff444ull
)) {
257 reprogram_fixed_counters(pmu
, data
);
261 case MSR_CORE_PERF_GLOBAL_STATUS
:
262 if (msr_info
->host_initiated
) {
263 pmu
->global_status
= data
;
267 case MSR_CORE_PERF_GLOBAL_CTRL
:
268 if (pmu
->global_ctrl
== data
)
270 if (kvm_valid_perf_global_ctrl(pmu
, data
)) {
271 global_ctrl_changed(pmu
, data
);
275 case MSR_CORE_PERF_GLOBAL_OVF_CTRL
:
276 if (!(data
& pmu
->global_ovf_ctrl_mask
)) {
277 if (!msr_info
->host_initiated
)
278 pmu
->global_status
&= ~data
;
279 pmu
->global_ovf_ctrl
= data
;
284 if ((pmc
= get_gp_pmc(pmu
, msr
, MSR_IA32_PERFCTR0
)) ||
285 (pmc
= get_gp_pmc(pmu
, msr
, MSR_IA32_PMC0
))) {
286 if ((msr
& MSR_PMC_FULL_WIDTH_BIT
) &&
287 (data
& ~pmu
->counter_bitmask
[KVM_PMC_GP
]))
289 if (!msr_info
->host_initiated
&&
290 !(msr
& MSR_PMC_FULL_WIDTH_BIT
))
291 data
= (s64
)(s32
)data
;
292 pmc
->counter
+= data
- pmc_read_counter(pmc
);
294 perf_event_period(pmc
->perf_event
,
295 get_sample_period(pmc
, data
));
297 } else if ((pmc
= get_fixed_pmc(pmu
, msr
))) {
298 pmc
->counter
+= data
- pmc_read_counter(pmc
);
300 perf_event_period(pmc
->perf_event
,
301 get_sample_period(pmc
, data
));
303 } else if ((pmc
= get_gp_pmc(pmu
, msr
, MSR_P6_EVNTSEL0
))) {
304 if (data
== pmc
->eventsel
)
306 if (!(data
& pmu
->reserved_bits
)) {
307 reprogram_gp_counter(pmc
, data
);
316 static void intel_pmu_refresh(struct kvm_vcpu
*vcpu
)
318 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
319 struct x86_pmu_capability x86_pmu
;
320 struct kvm_cpuid_entry2
*entry
;
321 union cpuid10_eax eax
;
322 union cpuid10_edx edx
;
324 pmu
->nr_arch_gp_counters
= 0;
325 pmu
->nr_arch_fixed_counters
= 0;
326 pmu
->counter_bitmask
[KVM_PMC_GP
] = 0;
327 pmu
->counter_bitmask
[KVM_PMC_FIXED
] = 0;
329 pmu
->reserved_bits
= 0xffffffff00200000ull
;
330 vcpu
->arch
.perf_capabilities
= 0;
332 entry
= kvm_find_cpuid_entry(vcpu
, 0xa, 0);
335 eax
.full
= entry
->eax
;
336 edx
.full
= entry
->edx
;
338 pmu
->version
= eax
.split
.version_id
;
342 perf_get_x86_pmu_capability(&x86_pmu
);
343 if (guest_cpuid_has(vcpu
, X86_FEATURE_PDCM
))
344 vcpu
->arch
.perf_capabilities
= vmx_get_perf_capabilities();
346 pmu
->nr_arch_gp_counters
= min_t(int, eax
.split
.num_counters
,
347 x86_pmu
.num_counters_gp
);
348 pmu
->counter_bitmask
[KVM_PMC_GP
] = ((u64
)1 << eax
.split
.bit_width
) - 1;
349 pmu
->available_event_types
= ~entry
->ebx
&
350 ((1ull << eax
.split
.mask_length
) - 1);
352 if (pmu
->version
== 1) {
353 pmu
->nr_arch_fixed_counters
= 0;
355 pmu
->nr_arch_fixed_counters
=
356 min_t(int, edx
.split
.num_counters_fixed
,
357 x86_pmu
.num_counters_fixed
);
358 pmu
->counter_bitmask
[KVM_PMC_FIXED
] =
359 ((u64
)1 << edx
.split
.bit_width_fixed
) - 1;
362 pmu
->global_ctrl
= ((1ull << pmu
->nr_arch_gp_counters
) - 1) |
363 (((1ull << pmu
->nr_arch_fixed_counters
) - 1) << INTEL_PMC_IDX_FIXED
);
364 pmu
->global_ctrl_mask
= ~pmu
->global_ctrl
;
365 pmu
->global_ovf_ctrl_mask
= pmu
->global_ctrl_mask
366 & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF
|
367 MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD
);
368 if (vmx_pt_mode_is_host_guest())
369 pmu
->global_ovf_ctrl_mask
&=
370 ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI
;
372 entry
= kvm_find_cpuid_entry(vcpu
, 7, 0);
374 (boot_cpu_has(X86_FEATURE_HLE
) || boot_cpu_has(X86_FEATURE_RTM
)) &&
375 (entry
->ebx
& (X86_FEATURE_HLE
|X86_FEATURE_RTM
)))
376 pmu
->reserved_bits
^= HSW_IN_TX
|HSW_IN_TX_CHECKPOINTED
;
378 bitmap_set(pmu
->all_valid_pmc_idx
,
379 0, pmu
->nr_arch_gp_counters
);
380 bitmap_set(pmu
->all_valid_pmc_idx
,
381 INTEL_PMC_MAX_GENERIC
, pmu
->nr_arch_fixed_counters
);
383 nested_vmx_pmu_entry_exit_ctls_update(vcpu
);
386 static void intel_pmu_init(struct kvm_vcpu
*vcpu
)
389 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
391 for (i
= 0; i
< INTEL_PMC_MAX_GENERIC
; i
++) {
392 pmu
->gp_counters
[i
].type
= KVM_PMC_GP
;
393 pmu
->gp_counters
[i
].vcpu
= vcpu
;
394 pmu
->gp_counters
[i
].idx
= i
;
395 pmu
->gp_counters
[i
].current_config
= 0;
398 for (i
= 0; i
< INTEL_PMC_MAX_FIXED
; i
++) {
399 pmu
->fixed_counters
[i
].type
= KVM_PMC_FIXED
;
400 pmu
->fixed_counters
[i
].vcpu
= vcpu
;
401 pmu
->fixed_counters
[i
].idx
= i
+ INTEL_PMC_IDX_FIXED
;
402 pmu
->fixed_counters
[i
].current_config
= 0;
406 static void intel_pmu_reset(struct kvm_vcpu
*vcpu
)
408 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
409 struct kvm_pmc
*pmc
= NULL
;
412 for (i
= 0; i
< INTEL_PMC_MAX_GENERIC
; i
++) {
413 pmc
= &pmu
->gp_counters
[i
];
415 pmc_stop_counter(pmc
);
416 pmc
->counter
= pmc
->eventsel
= 0;
419 for (i
= 0; i
< INTEL_PMC_MAX_FIXED
; i
++) {
420 pmc
= &pmu
->fixed_counters
[i
];
422 pmc_stop_counter(pmc
);
426 pmu
->fixed_ctr_ctrl
= pmu
->global_ctrl
= pmu
->global_status
=
427 pmu
->global_ovf_ctrl
= 0;
430 struct kvm_pmu_ops intel_pmu_ops
= {
431 .find_arch_event
= intel_find_arch_event
,
432 .find_fixed_event
= intel_find_fixed_event
,
433 .pmc_is_enabled
= intel_pmc_is_enabled
,
434 .pmc_idx_to_pmc
= intel_pmc_idx_to_pmc
,
435 .rdpmc_ecx_to_pmc
= intel_rdpmc_ecx_to_pmc
,
436 .msr_idx_to_pmc
= intel_msr_idx_to_pmc
,
437 .is_valid_rdpmc_ecx
= intel_is_valid_rdpmc_ecx
,
438 .is_valid_msr
= intel_is_valid_msr
,
439 .get_msr
= intel_pmu_get_msr
,
440 .set_msr
= intel_pmu_set_msr
,
441 .refresh
= intel_pmu_refresh
,
442 .init
= intel_pmu_init
,
443 .reset
= intel_pmu_reset
,