1 #include <linux/perf_event.h>
15 static bool test_aperfmperf(int idx
)
17 return boot_cpu_has(X86_FEATURE_APERFMPERF
);
20 static bool test_ptsc(int idx
)
22 return boot_cpu_has(X86_FEATURE_PTSC
);
25 static bool test_irperf(int idx
)
27 return boot_cpu_has(X86_FEATURE_IRPERF
);
30 static bool test_intel(int idx
)
32 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
||
33 boot_cpu_data
.x86
!= 6)
36 switch (boot_cpu_data
.x86_model
) {
37 case 30: /* 45nm Nehalem */
38 case 26: /* 45nm Nehalem-EP */
39 case 46: /* 45nm Nehalem-EX */
41 case 37: /* 32nm Westmere */
42 case 44: /* 32nm Westmere-EP */
43 case 47: /* 32nm Westmere-EX */
45 case 42: /* 32nm SandyBridge */
46 case 45: /* 32nm SandyBridge-E/EN/EP */
48 case 58: /* 22nm IvyBridge */
49 case 62: /* 22nm IvyBridge-EP/EX */
51 case 60: /* 22nm Haswell Core */
52 case 63: /* 22nm Haswell Server */
53 case 69: /* 22nm Haswell ULT */
54 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
56 case 61: /* 14nm Broadwell Core-M */
57 case 86: /* 14nm Broadwell Xeon D */
58 case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
59 case 79: /* 14nm Broadwell Server */
61 case 55: /* 22nm Atom "Silvermont" */
62 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
63 case 76: /* 14nm Atom "Airmont" */
64 if (idx
== PERF_MSR_SMI
)
68 case 78: /* 14nm Skylake Mobile */
69 case 94: /* 14nm Skylake Desktop */
70 if (idx
== PERF_MSR_SMI
|| idx
== PERF_MSR_PPERF
)
80 struct perf_pmu_events_attr
*attr
;
81 bool (*test
)(int idx
);
84 PMU_EVENT_ATTR_STRING(tsc
, evattr_tsc
, "event=0x00");
85 PMU_EVENT_ATTR_STRING(aperf
, evattr_aperf
, "event=0x01");
86 PMU_EVENT_ATTR_STRING(mperf
, evattr_mperf
, "event=0x02");
87 PMU_EVENT_ATTR_STRING(pperf
, evattr_pperf
, "event=0x03");
88 PMU_EVENT_ATTR_STRING(smi
, evattr_smi
, "event=0x04");
89 PMU_EVENT_ATTR_STRING(ptsc
, evattr_ptsc
, "event=0x05");
90 PMU_EVENT_ATTR_STRING(irperf
, evattr_irperf
, "event=0x06");
92 static struct perf_msr msr
[] = {
93 [PERF_MSR_TSC
] = { 0, &evattr_tsc
, NULL
, },
94 [PERF_MSR_APERF
] = { MSR_IA32_APERF
, &evattr_aperf
, test_aperfmperf
, },
95 [PERF_MSR_MPERF
] = { MSR_IA32_MPERF
, &evattr_mperf
, test_aperfmperf
, },
96 [PERF_MSR_PPERF
] = { MSR_PPERF
, &evattr_pperf
, test_intel
, },
97 [PERF_MSR_SMI
] = { MSR_SMI_COUNT
, &evattr_smi
, test_intel
, },
98 [PERF_MSR_PTSC
] = { MSR_F15H_PTSC
, &evattr_ptsc
, test_ptsc
, },
99 [PERF_MSR_IRPERF
] = { MSR_F17H_IRPERF
, &evattr_irperf
, test_irperf
, },
102 static struct attribute
*events_attrs
[PERF_MSR_EVENT_MAX
+ 1] = {
106 static struct attribute_group events_attr_group
= {
108 .attrs
= events_attrs
,
111 PMU_FORMAT_ATTR(event
, "config:0-63");
112 static struct attribute
*format_attrs
[] = {
113 &format_attr_event
.attr
,
116 static struct attribute_group format_attr_group
= {
118 .attrs
= format_attrs
,
121 static const struct attribute_group
*attr_groups
[] = {
127 static int msr_event_init(struct perf_event
*event
)
129 u64 cfg
= event
->attr
.config
;
131 if (event
->attr
.type
!= event
->pmu
->type
)
134 if (cfg
>= PERF_MSR_EVENT_MAX
)
137 /* unsupported modes and filters */
138 if (event
->attr
.exclude_user
||
139 event
->attr
.exclude_kernel
||
140 event
->attr
.exclude_hv
||
141 event
->attr
.exclude_idle
||
142 event
->attr
.exclude_host
||
143 event
->attr
.exclude_guest
||
144 event
->attr
.sample_period
) /* no sampling */
151 event
->hw
.event_base
= msr
[cfg
].msr
;
152 event
->hw
.config
= cfg
;
157 static inline u64
msr_read_counter(struct perf_event
*event
)
161 if (event
->hw
.event_base
)
162 rdmsrl(event
->hw
.event_base
, now
);
168 static void msr_event_update(struct perf_event
*event
)
173 /* Careful, an NMI might modify the previous event value. */
175 prev
= local64_read(&event
->hw
.prev_count
);
176 now
= msr_read_counter(event
);
178 if (local64_cmpxchg(&event
->hw
.prev_count
, prev
, now
) != prev
)
182 if (unlikely(event
->hw
.event_base
== MSR_SMI_COUNT
))
183 delta
= sign_extend64(delta
, 31);
185 local64_add(delta
, &event
->count
);
188 static void msr_event_start(struct perf_event
*event
, int flags
)
192 now
= msr_read_counter(event
);
193 local64_set(&event
->hw
.prev_count
, now
);
196 static void msr_event_stop(struct perf_event
*event
, int flags
)
198 msr_event_update(event
);
201 static void msr_event_del(struct perf_event
*event
, int flags
)
203 msr_event_stop(event
, PERF_EF_UPDATE
);
206 static int msr_event_add(struct perf_event
*event
, int flags
)
208 if (flags
& PERF_EF_START
)
209 msr_event_start(event
, flags
);
214 static struct pmu pmu_msr
= {
215 .task_ctx_nr
= perf_sw_context
,
216 .attr_groups
= attr_groups
,
217 .event_init
= msr_event_init
,
218 .add
= msr_event_add
,
219 .del
= msr_event_del
,
220 .start
= msr_event_start
,
221 .stop
= msr_event_stop
,
222 .read
= msr_event_update
,
223 .capabilities
= PERF_PMU_CAP_NO_INTERRUPT
,
226 static int __init
msr_init(void)
230 if (!boot_cpu_has(X86_FEATURE_TSC
)) {
231 pr_cont("no MSR PMU driver.\n");
235 /* Probe the MSRs. */
236 for (i
= PERF_MSR_TSC
+ 1; i
< PERF_MSR_EVENT_MAX
; i
++) {
240 * Virt sucks arse; you cannot tell if a R/O MSR is present :/
242 if (!msr
[i
].test(i
) || rdmsrl_safe(msr
[i
].msr
, &val
))
246 /* List remaining MSRs in the sysfs attrs. */
247 for (i
= 0; i
< PERF_MSR_EVENT_MAX
; i
++) {
249 events_attrs
[j
++] = &msr
[i
].attr
->attr
.attr
;
251 events_attrs
[j
] = NULL
;
253 perf_pmu_register(&pmu_msr
, "msr", -1);
257 device_initcall(msr_init
);