]>
Commit | Line | Data |
---|---|---|
de0428a7 | 1 | #include <linux/perf_event.h> |
1018faa6 | 2 | #include <linux/export.h> |
de0428a7 KW |
3 | #include <linux/types.h> |
4 | #include <linux/init.h> | |
5 | #include <linux/slab.h> | |
914123fa | 6 | #include <linux/delay.h> |
d6eed550 | 7 | #include <asm/apicdef.h> |
3966c3fe | 8 | #include <asm/nmi.h> |
de0428a7 | 9 | |
27f6d22b | 10 | #include "../perf_event.h" |
f22f54f4 | 11 | |
6d3edaae LT |
12 | static DEFINE_PER_CPU(unsigned int, perf_nmi_counter); |
13 | ||
caaa8be3 | 14 | static __initconst const u64 amd_hw_cache_event_ids |
f22f54f4 PZ |
15 | [PERF_COUNT_HW_CACHE_MAX] |
16 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
17 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
18 | { | |
19 | [ C(L1D) ] = { | |
20 | [ C(OP_READ) ] = { | |
21 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ | |
83112e68 | 22 | [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */ |
f22f54f4 PZ |
23 | }, |
24 | [ C(OP_WRITE) ] = { | |
9cc2617d | 25 | [ C(RESULT_ACCESS) ] = 0, |
f22f54f4 PZ |
26 | [ C(RESULT_MISS) ] = 0, |
27 | }, | |
28 | [ C(OP_PREFETCH) ] = { | |
29 | [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */ | |
30 | [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */ | |
31 | }, | |
32 | }, | |
33 | [ C(L1I ) ] = { | |
34 | [ C(OP_READ) ] = { | |
35 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */ | |
36 | [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */ | |
37 | }, | |
38 | [ C(OP_WRITE) ] = { | |
39 | [ C(RESULT_ACCESS) ] = -1, | |
40 | [ C(RESULT_MISS) ] = -1, | |
41 | }, | |
42 | [ C(OP_PREFETCH) ] = { | |
43 | [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */ | |
44 | [ C(RESULT_MISS) ] = 0, | |
45 | }, | |
46 | }, | |
47 | [ C(LL ) ] = { | |
48 | [ C(OP_READ) ] = { | |
49 | [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */ | |
50 | [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */ | |
51 | }, | |
52 | [ C(OP_WRITE) ] = { | |
53 | [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */ | |
54 | [ C(RESULT_MISS) ] = 0, | |
55 | }, | |
56 | [ C(OP_PREFETCH) ] = { | |
57 | [ C(RESULT_ACCESS) ] = 0, | |
58 | [ C(RESULT_MISS) ] = 0, | |
59 | }, | |
60 | }, | |
61 | [ C(DTLB) ] = { | |
62 | [ C(OP_READ) ] = { | |
63 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ | |
ba0cef3d | 64 | [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */ |
f22f54f4 PZ |
65 | }, |
66 | [ C(OP_WRITE) ] = { | |
67 | [ C(RESULT_ACCESS) ] = 0, | |
68 | [ C(RESULT_MISS) ] = 0, | |
69 | }, | |
70 | [ C(OP_PREFETCH) ] = { | |
71 | [ C(RESULT_ACCESS) ] = 0, | |
72 | [ C(RESULT_MISS) ] = 0, | |
73 | }, | |
74 | }, | |
75 | [ C(ITLB) ] = { | |
76 | [ C(OP_READ) ] = { | |
77 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ | |
ba0cef3d | 78 | [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */ |
f22f54f4 PZ |
79 | }, |
80 | [ C(OP_WRITE) ] = { | |
81 | [ C(RESULT_ACCESS) ] = -1, | |
82 | [ C(RESULT_MISS) ] = -1, | |
83 | }, | |
84 | [ C(OP_PREFETCH) ] = { | |
85 | [ C(RESULT_ACCESS) ] = -1, | |
86 | [ C(RESULT_MISS) ] = -1, | |
87 | }, | |
88 | }, | |
89 | [ C(BPU ) ] = { | |
90 | [ C(OP_READ) ] = { | |
91 | [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */ | |
92 | [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */ | |
93 | }, | |
94 | [ C(OP_WRITE) ] = { | |
95 | [ C(RESULT_ACCESS) ] = -1, | |
96 | [ C(RESULT_MISS) ] = -1, | |
97 | }, | |
98 | [ C(OP_PREFETCH) ] = { | |
99 | [ C(RESULT_ACCESS) ] = -1, | |
100 | [ C(RESULT_MISS) ] = -1, | |
101 | }, | |
102 | }, | |
89d6c0b5 PZ |
103 | [ C(NODE) ] = { |
104 | [ C(OP_READ) ] = { | |
105 | [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */ | |
106 | [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */ | |
107 | }, | |
108 | [ C(OP_WRITE) ] = { | |
109 | [ C(RESULT_ACCESS) ] = -1, | |
110 | [ C(RESULT_MISS) ] = -1, | |
111 | }, | |
112 | [ C(OP_PREFETCH) ] = { | |
113 | [ C(RESULT_ACCESS) ] = -1, | |
114 | [ C(RESULT_MISS) ] = -1, | |
115 | }, | |
116 | }, | |
f22f54f4 PZ |
117 | }; |
118 | ||
0e3b74e2 KP |
119 | static __initconst const u64 amd_hw_cache_event_ids_f17h |
120 | [PERF_COUNT_HW_CACHE_MAX] | |
121 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
122 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
123 | [C(L1D)] = { | |
124 | [C(OP_READ)] = { | |
125 | [C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */ | |
126 | [C(RESULT_MISS)] = 0xc860, /* L2$ access from DC Miss */ | |
127 | }, | |
128 | [C(OP_WRITE)] = { | |
129 | [C(RESULT_ACCESS)] = 0, | |
130 | [C(RESULT_MISS)] = 0, | |
131 | }, | |
132 | [C(OP_PREFETCH)] = { | |
133 | [C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */ | |
134 | [C(RESULT_MISS)] = 0, | |
135 | }, | |
136 | }, | |
137 | [C(L1I)] = { | |
138 | [C(OP_READ)] = { | |
139 | [C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches */ | |
140 | [C(RESULT_MISS)] = 0x0081, /* Instruction cache misses */ | |
141 | }, | |
142 | [C(OP_WRITE)] = { | |
143 | [C(RESULT_ACCESS)] = -1, | |
144 | [C(RESULT_MISS)] = -1, | |
145 | }, | |
146 | [C(OP_PREFETCH)] = { | |
147 | [C(RESULT_ACCESS)] = 0, | |
148 | [C(RESULT_MISS)] = 0, | |
149 | }, | |
150 | }, | |
151 | [C(LL)] = { | |
152 | [C(OP_READ)] = { | |
153 | [C(RESULT_ACCESS)] = 0, | |
154 | [C(RESULT_MISS)] = 0, | |
155 | }, | |
156 | [C(OP_WRITE)] = { | |
157 | [C(RESULT_ACCESS)] = 0, | |
158 | [C(RESULT_MISS)] = 0, | |
159 | }, | |
160 | [C(OP_PREFETCH)] = { | |
161 | [C(RESULT_ACCESS)] = 0, | |
162 | [C(RESULT_MISS)] = 0, | |
163 | }, | |
164 | }, | |
165 | [C(DTLB)] = { | |
166 | [C(OP_READ)] = { | |
167 | [C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */ | |
168 | [C(RESULT_MISS)] = 0xf045, /* L2 DTLB misses (PT walks) */ | |
169 | }, | |
170 | [C(OP_WRITE)] = { | |
171 | [C(RESULT_ACCESS)] = 0, | |
172 | [C(RESULT_MISS)] = 0, | |
173 | }, | |
174 | [C(OP_PREFETCH)] = { | |
175 | [C(RESULT_ACCESS)] = 0, | |
176 | [C(RESULT_MISS)] = 0, | |
177 | }, | |
178 | }, | |
179 | [C(ITLB)] = { | |
180 | [C(OP_READ)] = { | |
181 | [C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */ | |
182 | [C(RESULT_MISS)] = 0xff85, /* L1 ITLB misses, L2 misses */ | |
183 | }, | |
184 | [C(OP_WRITE)] = { | |
185 | [C(RESULT_ACCESS)] = -1, | |
186 | [C(RESULT_MISS)] = -1, | |
187 | }, | |
188 | [C(OP_PREFETCH)] = { | |
189 | [C(RESULT_ACCESS)] = -1, | |
190 | [C(RESULT_MISS)] = -1, | |
191 | }, | |
192 | }, | |
193 | [C(BPU)] = { | |
194 | [C(OP_READ)] = { | |
195 | [C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr. */ | |
196 | [C(RESULT_MISS)] = 0x00c3, /* Retired Mispredicted BI */ | |
197 | }, | |
198 | [C(OP_WRITE)] = { | |
199 | [C(RESULT_ACCESS)] = -1, | |
200 | [C(RESULT_MISS)] = -1, | |
201 | }, | |
202 | [C(OP_PREFETCH)] = { | |
203 | [C(RESULT_ACCESS)] = -1, | |
204 | [C(RESULT_MISS)] = -1, | |
205 | }, | |
206 | }, | |
207 | [C(NODE)] = { | |
208 | [C(OP_READ)] = { | |
209 | [C(RESULT_ACCESS)] = 0, | |
210 | [C(RESULT_MISS)] = 0, | |
211 | }, | |
212 | [C(OP_WRITE)] = { | |
213 | [C(RESULT_ACCESS)] = -1, | |
214 | [C(RESULT_MISS)] = -1, | |
215 | }, | |
216 | [C(OP_PREFETCH)] = { | |
217 | [C(RESULT_ACCESS)] = -1, | |
218 | [C(RESULT_MISS)] = -1, | |
219 | }, | |
220 | }, | |
221 | }; | |
222 | ||
f22f54f4 | 223 | /* |
3fe3331b | 224 | * AMD Performance Monitor K7 and later, up to and including Family 16h: |
f22f54f4 | 225 | */ |
0a25556f | 226 | static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] = |
f22f54f4 | 227 | { |
3fe3331b KP |
228 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, |
229 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | |
230 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d, | |
231 | [PERF_COUNT_HW_CACHE_MISSES] = 0x077e, | |
232 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, | |
233 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, | |
234 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ | |
235 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */ | |
236 | }; | |
237 | ||
238 | /* | |
239 | * AMD Performance Monitor Family 17h and later: | |
240 | */ | |
241 | static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] = | |
242 | { | |
243 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, | |
244 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | |
245 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60, | |
246 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, | |
247 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, | |
248 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287, | |
249 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187, | |
f22f54f4 PZ |
250 | }; |
251 | ||
252 | static u64 amd_pmu_event_map(int hw_event) | |
253 | { | |
3fe3331b KP |
254 | if (boot_cpu_data.x86 >= 0x17) |
255 | return amd_f17h_perfmon_event_map[hw_event]; | |
256 | ||
f22f54f4 PZ |
257 | return amd_perfmon_event_map[hw_event]; |
258 | } | |
259 | ||
4c1fd17a JS |
260 | /* |
261 | * Previously calculated offsets | |
262 | */ | |
263 | static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly; | |
264 | static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly; | |
265 | ||
266 | /* | |
267 | * Legacy CPUs: | |
268 | * 4 counters starting at 0xc0010000 each offset by 1 | |
269 | * | |
270 | * CPUs with core performance counter extensions: | |
271 | * 6 counters starting at 0xc0010200 each offset by 2 | |
272 | */ | |
273 | static inline int amd_pmu_addr_offset(int index, bool eventsel) | |
274 | { | |
0cf5f432 | 275 | int offset; |
4c1fd17a JS |
276 | |
277 | if (!index) | |
278 | return index; | |
279 | ||
280 | if (eventsel) | |
281 | offset = event_offsets[index]; | |
282 | else | |
283 | offset = count_offsets[index]; | |
284 | ||
285 | if (offset) | |
286 | return offset; | |
287 | ||
362f924b | 288 | if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) |
4c1fd17a JS |
289 | offset = index; |
290 | else | |
291 | offset = index << 1; | |
292 | ||
293 | if (eventsel) | |
294 | event_offsets[index] = offset; | |
295 | else | |
296 | count_offsets[index] = offset; | |
297 | ||
298 | return offset; | |
299 | } | |
300 | ||
e259514e JS |
301 | static int amd_core_hw_config(struct perf_event *event) |
302 | { | |
011af857 JR |
303 | if (event->attr.exclude_host && event->attr.exclude_guest) |
304 | /* | |
305 | * When HO == GO == 1 the hardware treats that as GO == HO == 0 | |
306 | * and will count in both modes. We don't want to count in that | |
307 | * case so we emulate no-counting by setting US = OS = 0. | |
308 | */ | |
309 | event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | | |
310 | ARCH_PERFMON_EVENTSEL_OS); | |
311 | else if (event->attr.exclude_host) | |
9f19010a | 312 | event->hw.config |= AMD64_EVENTSEL_GUESTONLY; |
011af857 | 313 | else if (event->attr.exclude_guest) |
9f19010a | 314 | event->hw.config |= AMD64_EVENTSEL_HOSTONLY; |
011af857 | 315 | |
e259514e JS |
316 | return 0; |
317 | } | |
b4cdc5c2 | 318 | |
f22f54f4 PZ |
319 | /* |
320 | * AMD64 events are detected based on their event codes. | |
321 | */ | |
4979d272 RR |
322 | static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc) |
323 | { | |
324 | return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff); | |
325 | } | |
326 | ||
f22f54f4 PZ |
327 | static inline int amd_is_nb_event(struct hw_perf_event *hwc) |
328 | { | |
329 | return (hwc->config & 0xe0) == 0xe0; | |
330 | } | |
331 | ||
b38b24ea PZ |
332 | static inline int amd_has_nb(struct cpu_hw_events *cpuc) |
333 | { | |
334 | struct amd_nb *nb = cpuc->amd_nb; | |
335 | ||
336 | return nb && nb->nb_id != -1; | |
337 | } | |
338 | ||
e259514e JS |
339 | static int amd_pmu_hw_config(struct perf_event *event) |
340 | { | |
341 | int ret; | |
342 | ||
343 | /* pass precise event sampling to ibs: */ | |
344 | if (event->attr.precise_ip && get_ibs_caps()) | |
345 | return -ENOENT; | |
346 | ||
347 | if (has_branch_stack(event)) | |
348 | return -EOPNOTSUPP; | |
349 | ||
350 | ret = x86_pmu_hw_config(event); | |
351 | if (ret) | |
352 | return ret; | |
353 | ||
354 | if (event->attr.type == PERF_TYPE_RAW) | |
355 | event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; | |
356 | ||
e259514e JS |
357 | return amd_core_hw_config(event); |
358 | } | |
359 | ||
4dd4c2ae RR |
360 | static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc, |
361 | struct perf_event *event) | |
f22f54f4 | 362 | { |
f22f54f4 PZ |
363 | struct amd_nb *nb = cpuc->amd_nb; |
364 | int i; | |
365 | ||
f22f54f4 PZ |
366 | /* |
367 | * need to scan whole list because event may not have | |
368 | * been assigned during scheduling | |
369 | * | |
370 | * no race condition possible because event can only | |
371 | * be removed on one CPU at a time AND PMU is disabled | |
372 | * when we come here | |
373 | */ | |
948b1bb8 | 374 | for (i = 0; i < x86_pmu.num_counters; i++) { |
5f09fc68 | 375 | if (cmpxchg(nb->owners + i, event, NULL) == event) |
f22f54f4 | 376 | break; |
f22f54f4 PZ |
377 | } |
378 | } | |
379 | ||
380 | /* | |
381 | * AMD64 NorthBridge events need special treatment because | |
382 | * counter access needs to be synchronized across all cores | |
383 | * of a package. Refer to BKDG section 3.12 | |
384 | * | |
385 | * NB events are events measuring L3 cache, Hypertransport | |
386 | * traffic. They are identified by an event code >= 0xe00. | |
387 | * They measure events on the NorthBride which is shared | |
388 | * by all cores on a package. NB events are counted on a | |
389 | * shared set of counters. When a NB event is programmed | |
390 | * in a counter, the data actually comes from a shared | |
391 | * counter. Thus, access to those counters needs to be | |
392 | * synchronized. | |
393 | * | |
394 | * We implement the synchronization such that no two cores | |
395 | * can be measuring NB events using the same counters. Thus, | |
396 | * we maintain a per-NB allocation table. The available slot | |
397 | * is propagated using the event_constraint structure. | |
398 | * | |
399 | * We provide only one choice for each NB event based on | |
400 | * the fact that only NB events have restrictions. Consequently, | |
401 | * if a counter is available, there is a guarantee the NB event | |
402 | * will be assigned to it. If no slot is available, an empty | |
403 | * constraint is returned and scheduling will eventually fail | |
404 | * for this event. | |
405 | * | |
406 | * Note that all cores attached the same NB compete for the same | |
407 | * counters to host NB events, this is why we use atomic ops. Some | |
408 | * multi-chip CPUs may have more than one NB. | |
409 | * | |
410 | * Given that resources are allocated (cmpxchg), they must be | |
411 | * eventually freed for others to use. This is accomplished by | |
4dd4c2ae | 412 | * calling __amd_put_nb_event_constraints() |
f22f54f4 PZ |
413 | * |
414 | * Non NB events are not impacted by this restriction. | |
415 | */ | |
416 | static struct event_constraint * | |
4dd4c2ae RR |
417 | __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, |
418 | struct event_constraint *c) | |
f22f54f4 PZ |
419 | { |
420 | struct hw_perf_event *hwc = &event->hw; | |
421 | struct amd_nb *nb = cpuc->amd_nb; | |
2c53c3dd RR |
422 | struct perf_event *old; |
423 | int idx, new = -1; | |
f22f54f4 | 424 | |
e259514e JS |
425 | if (!c) |
426 | c = &unconstrained; | |
427 | ||
428 | if (cpuc->is_fake) | |
429 | return c; | |
430 | ||
f22f54f4 PZ |
431 | /* |
432 | * detect if already present, if so reuse | |
433 | * | |
434 | * cannot merge with actual allocation | |
435 | * because of possible holes | |
436 | * | |
437 | * event can already be present yet not assigned (in hwc->idx) | |
438 | * because of successive calls to x86_schedule_events() from | |
439 | * hw_perf_group_sched_in() without hw_perf_enable() | |
440 | */ | |
4dd4c2ae | 441 | for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) { |
2c53c3dd RR |
442 | if (new == -1 || hwc->idx == idx) |
443 | /* assign free slot, prefer hwc->idx */ | |
444 | old = cmpxchg(nb->owners + idx, NULL, event); | |
445 | else if (nb->owners[idx] == event) | |
446 | /* event already present */ | |
447 | old = event; | |
448 | else | |
449 | continue; | |
450 | ||
451 | if (old && old != event) | |
452 | continue; | |
453 | ||
454 | /* reassign to this slot */ | |
455 | if (new != -1) | |
456 | cmpxchg(nb->owners + new, event, NULL); | |
457 | new = idx; | |
f22f54f4 PZ |
458 | |
459 | /* already present, reuse */ | |
2c53c3dd | 460 | if (old == event) |
f22f54f4 | 461 | break; |
2c53c3dd RR |
462 | } |
463 | ||
464 | if (new == -1) | |
465 | return &emptyconstraint; | |
466 | ||
467 | return &nb->event_constraints[new]; | |
f22f54f4 PZ |
468 | } |
469 | ||
c079c791 | 470 | static struct amd_nb *amd_alloc_nb(int cpu) |
f22f54f4 PZ |
471 | { |
472 | struct amd_nb *nb; | |
473 | int i; | |
474 | ||
7bfb7e6b | 475 | nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu)); |
f22f54f4 PZ |
476 | if (!nb) |
477 | return NULL; | |
478 | ||
c079c791 | 479 | nb->nb_id = -1; |
f22f54f4 PZ |
480 | |
481 | /* | |
482 | * initialize all possible NB constraints | |
483 | */ | |
948b1bb8 | 484 | for (i = 0; i < x86_pmu.num_counters; i++) { |
34538ee7 | 485 | __set_bit(i, nb->event_constraints[i].idxmsk); |
f22f54f4 PZ |
486 | nb->event_constraints[i].weight = 1; |
487 | } | |
488 | return nb; | |
489 | } | |
490 | ||
b38b24ea PZ |
491 | static int amd_pmu_cpu_prepare(int cpu) |
492 | { | |
493 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | |
494 | ||
495 | WARN_ON_ONCE(cpuc->amd_nb); | |
496 | ||
32b62f44 | 497 | if (!x86_pmu.amd_nb_constraints) |
95ca792c | 498 | return 0; |
b38b24ea | 499 | |
c079c791 | 500 | cpuc->amd_nb = amd_alloc_nb(cpu); |
b38b24ea | 501 | if (!cpuc->amd_nb) |
95ca792c | 502 | return -ENOMEM; |
b38b24ea | 503 | |
95ca792c | 504 | return 0; |
b38b24ea PZ |
505 | } |
506 | ||
507 | static void amd_pmu_cpu_starting(int cpu) | |
f22f54f4 | 508 | { |
b38b24ea | 509 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
90413464 | 510 | void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED]; |
b38b24ea | 511 | struct amd_nb *nb; |
f22f54f4 PZ |
512 | int i, nb_id; |
513 | ||
9f19010a | 514 | cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; |
1018faa6 | 515 | |
32b62f44 | 516 | if (!x86_pmu.amd_nb_constraints) |
f22f54f4 PZ |
517 | return; |
518 | ||
f22f54f4 | 519 | nb_id = amd_get_nb_id(cpu); |
b38b24ea | 520 | WARN_ON_ONCE(nb_id == BAD_APICID); |
f22f54f4 | 521 | |
f22f54f4 | 522 | for_each_online_cpu(i) { |
b38b24ea PZ |
523 | nb = per_cpu(cpu_hw_events, i).amd_nb; |
524 | if (WARN_ON_ONCE(!nb)) | |
f22f54f4 | 525 | continue; |
f22f54f4 | 526 | |
b38b24ea | 527 | if (nb->nb_id == nb_id) { |
90413464 | 528 | *onln = cpuc->amd_nb; |
b38b24ea PZ |
529 | cpuc->amd_nb = nb; |
530 | break; | |
531 | } | |
f22f54f4 | 532 | } |
b38b24ea PZ |
533 | |
534 | cpuc->amd_nb->nb_id = nb_id; | |
535 | cpuc->amd_nb->refcnt++; | |
f22f54f4 PZ |
536 | } |
537 | ||
b38b24ea | 538 | static void amd_pmu_cpu_dead(int cpu) |
f22f54f4 PZ |
539 | { |
540 | struct cpu_hw_events *cpuhw; | |
541 | ||
32b62f44 | 542 | if (!x86_pmu.amd_nb_constraints) |
f22f54f4 PZ |
543 | return; |
544 | ||
545 | cpuhw = &per_cpu(cpu_hw_events, cpu); | |
546 | ||
a90110c6 | 547 | if (cpuhw->amd_nb) { |
b38b24ea PZ |
548 | struct amd_nb *nb = cpuhw->amd_nb; |
549 | ||
550 | if (nb->nb_id == -1 || --nb->refcnt == 0) | |
551 | kfree(nb); | |
f22f54f4 | 552 | |
a90110c6 RW |
553 | cpuhw->amd_nb = NULL; |
554 | } | |
f22f54f4 PZ |
555 | } |
556 | ||
914123fa LT |
557 | /* |
558 | * When a PMC counter overflows, an NMI is used to process the event and | |
559 | * reset the counter. NMI latency can result in the counter being updated | |
560 | * before the NMI can run, which can result in what appear to be spurious | |
561 | * NMIs. This function is intended to wait for the NMI to run and reset | |
562 | * the counter to avoid possible unhandled NMI messages. | |
563 | */ | |
564 | #define OVERFLOW_WAIT_COUNT 50 | |
565 | ||
566 | static void amd_pmu_wait_on_overflow(int idx) | |
567 | { | |
568 | unsigned int i; | |
569 | u64 counter; | |
570 | ||
571 | /* | |
572 | * Wait for the counter to be reset if it has overflowed. This loop | |
573 | * should exit very, very quickly, but just in case, don't wait | |
574 | * forever... | |
575 | */ | |
576 | for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) { | |
577 | rdmsrl(x86_pmu_event_addr(idx), counter); | |
578 | if (counter & (1ULL << (x86_pmu.cntval_bits - 1))) | |
579 | break; | |
580 | ||
581 | /* Might be in IRQ context, so can't sleep */ | |
582 | udelay(1); | |
583 | } | |
584 | } | |
585 | ||
586 | static void amd_pmu_disable_all(void) | |
587 | { | |
588 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | |
589 | int idx; | |
590 | ||
591 | x86_pmu_disable_all(); | |
592 | ||
593 | /* | |
594 | * This shouldn't be called from NMI context, but add a safeguard here | |
595 | * to return, since if we're in NMI context we can't wait for an NMI | |
596 | * to reset an overflowed counter value. | |
597 | */ | |
598 | if (in_nmi()) | |
599 | return; | |
600 | ||
601 | /* | |
602 | * Check each counter for overflow and wait for it to be reset by the | |
603 | * NMI if it has overflowed. This relies on the fact that all active | |
604 | * counters are always enabled when this function is caled and | |
605 | * ARCH_PERFMON_EVENTSEL_INT is always set. | |
606 | */ | |
607 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | |
608 | if (!test_bit(idx, cpuc->active_mask)) | |
609 | continue; | |
610 | ||
611 | amd_pmu_wait_on_overflow(idx); | |
612 | } | |
613 | } | |
614 | ||
3966c3fe LT |
615 | static void amd_pmu_disable_event(struct perf_event *event) |
616 | { | |
617 | x86_pmu_disable_event(event); | |
618 | ||
619 | /* | |
620 | * This can be called from NMI context (via x86_pmu_stop). The counter | |
621 | * may have overflowed, but either way, we'll never see it get reset | |
622 | * by the NMI if we're already in the NMI. And the NMI latency support | |
623 | * below will take care of any pending NMI that might have been | |
624 | * generated by the overflow. | |
625 | */ | |
626 | if (in_nmi()) | |
627 | return; | |
628 | ||
629 | amd_pmu_wait_on_overflow(event->hw.idx); | |
630 | } | |
631 | ||
6d3edaae LT |
632 | /* |
633 | * Because of NMI latency, if multiple PMC counters are active or other sources | |
634 | * of NMIs are received, the perf NMI handler can handle one or more overflowed | |
635 | * PMC counters outside of the NMI associated with the PMC overflow. If the NMI | |
636 | * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel | |
637 | * back-to-back NMI support won't be active. This PMC handler needs to take into | |
638 | * account that this can occur, otherwise this could result in unknown NMI | |
639 | * messages being issued. Examples of this is PMC overflow while in the NMI | |
640 | * handler when multiple PMCs are active or PMC overflow while handling some | |
641 | * other source of an NMI. | |
642 | * | |
643 | * Attempt to mitigate this by using the number of active PMCs to determine | |
644 | * whether to return NMI_HANDLED if the perf NMI handler did not handle/reset | |
645 | * any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the | |
646 | * number of active PMCs or 2. The value of 2 is used in case an NMI does not | |
647 | * arrive at the LAPIC in time to be collapsed into an already pending NMI. | |
648 | */ | |
649 | static int amd_pmu_handle_irq(struct pt_regs *regs) | |
650 | { | |
651 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | |
652 | int active, handled; | |
653 | ||
654 | /* | |
655 | * Obtain the active count before calling x86_pmu_handle_irq() since | |
656 | * it is possible that x86_pmu_handle_irq() may make a counter | |
657 | * inactive (through x86_pmu_stop). | |
658 | */ | |
659 | active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX); | |
660 | ||
661 | /* Process any counter overflows */ | |
662 | handled = x86_pmu_handle_irq(regs); | |
663 | ||
664 | /* | |
665 | * If a counter was handled, record the number of possible remaining | |
666 | * NMIs that can occur. | |
667 | */ | |
668 | if (handled) { | |
669 | this_cpu_write(perf_nmi_counter, | |
670 | min_t(unsigned int, 2, active)); | |
671 | ||
672 | return handled; | |
673 | } | |
674 | ||
675 | if (!this_cpu_read(perf_nmi_counter)) | |
676 | return NMI_DONE; | |
677 | ||
678 | this_cpu_dec(perf_nmi_counter); | |
679 | ||
680 | return NMI_HANDLED; | |
681 | } | |
682 | ||
4dd4c2ae | 683 | static struct event_constraint * |
79cba822 SE |
684 | amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx, |
685 | struct perf_event *event) | |
4dd4c2ae RR |
686 | { |
687 | /* | |
688 | * if not NB event or no NB, then no constraints | |
689 | */ | |
690 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))) | |
691 | return &unconstrained; | |
692 | ||
0cf5f432 | 693 | return __amd_get_nb_event_constraints(cpuc, event, NULL); |
4dd4c2ae RR |
694 | } |
695 | ||
696 | static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | |
697 | struct perf_event *event) | |
698 | { | |
699 | if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)) | |
700 | __amd_put_nb_event_constraints(cpuc, event); | |
701 | } | |
702 | ||
641cc938 JO |
703 | PMU_FORMAT_ATTR(event, "config:0-7,32-35"); |
704 | PMU_FORMAT_ATTR(umask, "config:8-15" ); | |
705 | PMU_FORMAT_ATTR(edge, "config:18" ); | |
706 | PMU_FORMAT_ATTR(inv, "config:23" ); | |
707 | PMU_FORMAT_ATTR(cmask, "config:24-31" ); | |
708 | ||
709 | static struct attribute *amd_format_attr[] = { | |
710 | &format_attr_event.attr, | |
711 | &format_attr_umask.attr, | |
712 | &format_attr_edge.attr, | |
713 | &format_attr_inv.attr, | |
714 | &format_attr_cmask.attr, | |
715 | NULL, | |
716 | }; | |
717 | ||
4979d272 RR |
718 | /* AMD Family 15h */ |
719 | ||
720 | #define AMD_EVENT_TYPE_MASK 0x000000F0ULL | |
721 | ||
722 | #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL | |
723 | #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL | |
724 | #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL | |
725 | #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL | |
726 | #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL | |
727 | #define AMD_EVENT_EX_LS 0x000000C0ULL | |
728 | #define AMD_EVENT_DE 0x000000D0ULL | |
729 | #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL | |
730 | ||
731 | /* | |
732 | * AMD family 15h event code/PMC mappings: | |
733 | * | |
734 | * type = event_code & 0x0F0: | |
735 | * | |
736 | * 0x000 FP PERF_CTL[5:3] | |
737 | * 0x010 FP PERF_CTL[5:3] | |
738 | * 0x020 LS PERF_CTL[5:0] | |
739 | * 0x030 LS PERF_CTL[5:0] | |
740 | * 0x040 DC PERF_CTL[5:0] | |
741 | * 0x050 DC PERF_CTL[5:0] | |
742 | * 0x060 CU PERF_CTL[2:0] | |
743 | * 0x070 CU PERF_CTL[2:0] | |
744 | * 0x080 IC/DE PERF_CTL[2:0] | |
745 | * 0x090 IC/DE PERF_CTL[2:0] | |
746 | * 0x0A0 --- | |
747 | * 0x0B0 --- | |
748 | * 0x0C0 EX/LS PERF_CTL[5:0] | |
749 | * 0x0D0 DE PERF_CTL[2:0] | |
750 | * 0x0E0 NB NB_PERF_CTL[3:0] | |
751 | * 0x0F0 NB NB_PERF_CTL[3:0] | |
752 | * | |
753 | * Exceptions: | |
754 | * | |
855357a2 | 755 | * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*) |
4979d272 | 756 | * 0x003 FP PERF_CTL[3] |
855357a2 | 757 | * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*) |
4979d272 RR |
758 | * 0x00B FP PERF_CTL[3] |
759 | * 0x00D FP PERF_CTL[3] | |
760 | * 0x023 DE PERF_CTL[2:0] | |
761 | * 0x02D LS PERF_CTL[3] | |
762 | * 0x02E LS PERF_CTL[3,0] | |
5bcdf5e4 | 763 | * 0x031 LS PERF_CTL[2:0] (**) |
4979d272 RR |
764 | * 0x043 CU PERF_CTL[2:0] |
765 | * 0x045 CU PERF_CTL[2:0] | |
766 | * 0x046 CU PERF_CTL[2:0] | |
767 | * 0x054 CU PERF_CTL[2:0] | |
768 | * 0x055 CU PERF_CTL[2:0] | |
769 | * 0x08F IC PERF_CTL[0] | |
770 | * 0x187 DE PERF_CTL[0] | |
771 | * 0x188 DE PERF_CTL[0] | |
772 | * 0x0DB EX PERF_CTL[5:0] | |
773 | * 0x0DC LS PERF_CTL[5:0] | |
774 | * 0x0DD LS PERF_CTL[5:0] | |
775 | * 0x0DE LS PERF_CTL[5:0] | |
776 | * 0x0DF LS PERF_CTL[5:0] | |
5bcdf5e4 | 777 | * 0x1C0 EX PERF_CTL[5:3] |
4979d272 RR |
778 | * 0x1D6 EX PERF_CTL[5:0] |
779 | * 0x1D8 EX PERF_CTL[5:0] | |
855357a2 | 780 | * |
5bcdf5e4 RR |
781 | * (*) depending on the umask all FPU counters may be used |
782 | * (**) only one unitmask enabled at a time | |
4979d272 RR |
783 | */ |
784 | ||
785 | static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); | |
786 | static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0); | |
787 | static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0); | |
bc1738f6 | 788 | static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); |
4979d272 RR |
789 | static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); |
790 | static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); | |
791 | ||
792 | static struct event_constraint * | |
79cba822 SE |
793 | amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx, |
794 | struct perf_event *event) | |
4979d272 | 795 | { |
855357a2 RR |
796 | struct hw_perf_event *hwc = &event->hw; |
797 | unsigned int event_code = amd_get_event_code(hwc); | |
4979d272 RR |
798 | |
799 | switch (event_code & AMD_EVENT_TYPE_MASK) { | |
800 | case AMD_EVENT_FP: | |
801 | switch (event_code) { | |
855357a2 RR |
802 | case 0x000: |
803 | if (!(hwc->config & 0x0000F000ULL)) | |
804 | break; | |
805 | if (!(hwc->config & 0x00000F00ULL)) | |
806 | break; | |
807 | return &amd_f15_PMC3; | |
808 | case 0x004: | |
809 | if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1) | |
810 | break; | |
811 | return &amd_f15_PMC3; | |
4979d272 RR |
812 | case 0x003: |
813 | case 0x00B: | |
814 | case 0x00D: | |
815 | return &amd_f15_PMC3; | |
4979d272 | 816 | } |
855357a2 | 817 | return &amd_f15_PMC53; |
4979d272 RR |
818 | case AMD_EVENT_LS: |
819 | case AMD_EVENT_DC: | |
820 | case AMD_EVENT_EX_LS: | |
821 | switch (event_code) { | |
822 | case 0x023: | |
823 | case 0x043: | |
824 | case 0x045: | |
825 | case 0x046: | |
826 | case 0x054: | |
827 | case 0x055: | |
828 | return &amd_f15_PMC20; | |
829 | case 0x02D: | |
830 | return &amd_f15_PMC3; | |
831 | case 0x02E: | |
832 | return &amd_f15_PMC30; | |
5bcdf5e4 RR |
833 | case 0x031: |
834 | if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1) | |
835 | return &amd_f15_PMC20; | |
836 | return &emptyconstraint; | |
837 | case 0x1C0: | |
838 | return &amd_f15_PMC53; | |
4979d272 RR |
839 | default: |
840 | return &amd_f15_PMC50; | |
841 | } | |
842 | case AMD_EVENT_CU: | |
843 | case AMD_EVENT_IC_DE: | |
844 | case AMD_EVENT_DE: | |
845 | switch (event_code) { | |
846 | case 0x08F: | |
847 | case 0x187: | |
848 | case 0x188: | |
849 | return &amd_f15_PMC0; | |
850 | case 0x0DB ... 0x0DF: | |
851 | case 0x1D6: | |
852 | case 0x1D8: | |
853 | return &amd_f15_PMC50; | |
854 | default: | |
855 | return &amd_f15_PMC20; | |
856 | } | |
857 | case AMD_EVENT_NB: | |
940b2f2f | 858 | /* moved to uncore.c */ |
0cf5f432 | 859 | return &emptyconstraint; |
4979d272 RR |
860 | default: |
861 | return &emptyconstraint; | |
862 | } | |
863 | } | |
864 | ||
0bf79d44 JO |
865 | static ssize_t amd_event_sysfs_show(char *page, u64 config) |
866 | { | |
867 | u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) | | |
868 | (config & AMD64_EVENTSEL_EVENT) >> 24; | |
869 | ||
870 | return x86_event_sysfs_show(page, config, event); | |
871 | } | |
872 | ||
b1dc3c48 RR |
873 | static __initconst const struct x86_pmu amd_pmu = { |
874 | .name = "AMD", | |
6d3edaae | 875 | .handle_irq = amd_pmu_handle_irq, |
914123fa | 876 | .disable_all = amd_pmu_disable_all, |
4979d272 RR |
877 | .enable_all = x86_pmu_enable_all, |
878 | .enable = x86_pmu_enable_event, | |
3966c3fe | 879 | .disable = amd_pmu_disable_event, |
4979d272 RR |
880 | .hw_config = amd_pmu_hw_config, |
881 | .schedule_events = x86_schedule_events, | |
b1dc3c48 RR |
882 | .eventsel = MSR_K7_EVNTSEL0, |
883 | .perfctr = MSR_K7_PERFCTR0, | |
4c1fd17a | 884 | .addr_offset = amd_pmu_addr_offset, |
4979d272 RR |
885 | .event_map = amd_pmu_event_map, |
886 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), | |
b1dc3c48 | 887 | .num_counters = AMD64_NUM_COUNTERS, |
4979d272 RR |
888 | .cntval_bits = 48, |
889 | .cntval_mask = (1ULL << 48) - 1, | |
890 | .apic = 1, | |
891 | /* use highest bit to detect overflow */ | |
892 | .max_period = (1ULL << 47) - 1, | |
b1dc3c48 | 893 | .get_event_constraints = amd_get_event_constraints, |
4979d272 RR |
894 | .put_event_constraints = amd_put_event_constraints, |
895 | ||
b1dc3c48 | 896 | .format_attrs = amd_format_attr, |
0bf79d44 | 897 | .events_sysfs_show = amd_event_sysfs_show, |
b1dc3c48 | 898 | |
4979d272 | 899 | .cpu_prepare = amd_pmu_cpu_prepare, |
1018faa6 | 900 | .cpu_starting = amd_pmu_cpu_starting, |
b1dc3c48 | 901 | .cpu_dead = amd_pmu_cpu_dead, |
32b62f44 PZ |
902 | |
903 | .amd_nb_constraints = 1, | |
4979d272 RR |
904 | }; |
905 | ||
1b45adcd | 906 | static int __init amd_core_pmu_init(void) |
b1dc3c48 | 907 | { |
362f924b | 908 | if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) |
1b45adcd PZ |
909 | return 0; |
910 | ||
911 | switch (boot_cpu_data.x86) { | |
912 | case 0x15: | |
913 | pr_cont("Fam15h "); | |
b1dc3c48 | 914 | x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; |
1b45adcd | 915 | break; |
e40ed154 JN |
916 | case 0x17: |
917 | pr_cont("Fam17h "); | |
918 | /* | |
919 | * In family 17h, there are no event constraints in the PMC hardware. | |
920 | * We fallback to using default amd_get_event_constraints. | |
921 | */ | |
922 | break; | |
6d0ef316 PW |
923 | case 0x18: |
924 | pr_cont("Fam18h "); | |
925 | /* Using default amd_get_event_constraints. */ | |
926 | break; | |
1b45adcd PZ |
927 | default: |
928 | pr_err("core perfctr but no constraints; unknown hardware!\n"); | |
b1dc3c48 RR |
929 | return -ENODEV; |
930 | } | |
931 | ||
b1dc3c48 RR |
932 | /* |
933 | * If core performance counter extensions exists, we must use | |
934 | * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also | |
1b45adcd | 935 | * amd_pmu_addr_offset(). |
b1dc3c48 RR |
936 | */ |
937 | x86_pmu.eventsel = MSR_F15H_PERF_CTL; | |
938 | x86_pmu.perfctr = MSR_F15H_PERF_CTR; | |
939 | x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE; | |
32b62f44 PZ |
940 | /* |
941 | * AMD Core perfctr has separate MSRs for the NB events, see | |
942 | * the amd/uncore.c driver. | |
943 | */ | |
944 | x86_pmu.amd_nb_constraints = 0; | |
b1dc3c48 | 945 | |
1b45adcd | 946 | pr_cont("core perfctr, "); |
b1dc3c48 RR |
947 | return 0; |
948 | } | |
949 | ||
de0428a7 | 950 | __init int amd_pmu_init(void) |
f22f54f4 | 951 | { |
1b45adcd PZ |
952 | int ret; |
953 | ||
f22f54f4 PZ |
954 | /* Performance-monitoring supported from K7 and later: */ |
955 | if (boot_cpu_data.x86 < 6) | |
956 | return -ENODEV; | |
957 | ||
b1dc3c48 RR |
958 | x86_pmu = amd_pmu; |
959 | ||
1b45adcd PZ |
960 | ret = amd_core_pmu_init(); |
961 | if (ret) | |
962 | return ret; | |
f22f54f4 | 963 | |
32b62f44 PZ |
964 | if (num_possible_cpus() == 1) { |
965 | /* | |
966 | * No point in allocating data structures to serialize | |
967 | * against other CPUs, when there is only the one CPU. | |
968 | */ | |
969 | x86_pmu.amd_nb_constraints = 0; | |
970 | } | |
971 | ||
0e3b74e2 KP |
972 | if (boot_cpu_data.x86 >= 0x17) |
973 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids)); | |
974 | else | |
975 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids)); | |
f22f54f4 | 976 | |
f22f54f4 PZ |
977 | return 0; |
978 | } | |
1018faa6 JR |
979 | |
980 | void amd_pmu_enable_virt(void) | |
981 | { | |
89cbc767 | 982 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1018faa6 JR |
983 | |
984 | cpuc->perf_ctr_virt_mask = 0; | |
985 | ||
986 | /* Reload all events */ | |
914123fa | 987 | amd_pmu_disable_all(); |
1018faa6 JR |
988 | x86_pmu_enable_all(0); |
989 | } | |
990 | EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); | |
991 | ||
992 | void amd_pmu_disable_virt(void) | |
993 | { | |
89cbc767 | 994 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1018faa6 JR |
995 | |
996 | /* | |
997 | * We only mask out the Host-only bit so that host-only counting works | |
998 | * when SVM is disabled. If someone sets up a guest-only counter when | |
999 | * SVM is disabled the Guest-only bits still gets set and the counter | |
1000 | * will not count anything. | |
1001 | */ | |
9f19010a | 1002 | cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; |
1018faa6 JR |
1003 | |
1004 | /* Reload all events */ | |
914123fa | 1005 | amd_pmu_disable_all(); |
1018faa6 JR |
1006 | x86_pmu_enable_all(0); |
1007 | } | |
1008 | EXPORT_SYMBOL_GPL(amd_pmu_disable_virt); |