]>
Commit | Line | Data |
---|---|---|
de0428a7 | 1 | #include <linux/perf_event.h> |
1018faa6 | 2 | #include <linux/export.h> |
de0428a7 KW |
3 | #include <linux/types.h> |
4 | #include <linux/init.h> | |
5 | #include <linux/slab.h> | |
b9b7cf3f | 6 | #include <linux/delay.h> |
556bacf5 | 7 | #include <linux/jiffies.h> |
d6eed550 | 8 | #include <asm/apicdef.h> |
3c638b3e | 9 | #include <asm/nmi.h> |
de0428a7 | 10 | |
27f6d22b | 11 | #include "../perf_event.h" |
f22f54f4 | 12 | |
556bacf5 TL |
13 | static DEFINE_PER_CPU(unsigned long, perf_nmi_tstamp); |
14 | static unsigned long perf_nmi_window; | |
b66fbf27 | 15 | |
caaa8be3 | 16 | static __initconst const u64 amd_hw_cache_event_ids |
f22f54f4 PZ |
17 | [PERF_COUNT_HW_CACHE_MAX] |
18 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
19 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
20 | { | |
21 | [ C(L1D) ] = { | |
22 | [ C(OP_READ) ] = { | |
23 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ | |
83112e68 | 24 | [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */ |
f22f54f4 PZ |
25 | }, |
26 | [ C(OP_WRITE) ] = { | |
9cc2617d | 27 | [ C(RESULT_ACCESS) ] = 0, |
f22f54f4 PZ |
28 | [ C(RESULT_MISS) ] = 0, |
29 | }, | |
30 | [ C(OP_PREFETCH) ] = { | |
31 | [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */ | |
32 | [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */ | |
33 | }, | |
34 | }, | |
35 | [ C(L1I ) ] = { | |
36 | [ C(OP_READ) ] = { | |
37 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */ | |
38 | [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */ | |
39 | }, | |
40 | [ C(OP_WRITE) ] = { | |
41 | [ C(RESULT_ACCESS) ] = -1, | |
42 | [ C(RESULT_MISS) ] = -1, | |
43 | }, | |
44 | [ C(OP_PREFETCH) ] = { | |
45 | [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */ | |
46 | [ C(RESULT_MISS) ] = 0, | |
47 | }, | |
48 | }, | |
49 | [ C(LL ) ] = { | |
50 | [ C(OP_READ) ] = { | |
51 | [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */ | |
52 | [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */ | |
53 | }, | |
54 | [ C(OP_WRITE) ] = { | |
55 | [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */ | |
56 | [ C(RESULT_MISS) ] = 0, | |
57 | }, | |
58 | [ C(OP_PREFETCH) ] = { | |
59 | [ C(RESULT_ACCESS) ] = 0, | |
60 | [ C(RESULT_MISS) ] = 0, | |
61 | }, | |
62 | }, | |
63 | [ C(DTLB) ] = { | |
64 | [ C(OP_READ) ] = { | |
65 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ | |
ba0cef3d | 66 | [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */ |
f22f54f4 PZ |
67 | }, |
68 | [ C(OP_WRITE) ] = { | |
69 | [ C(RESULT_ACCESS) ] = 0, | |
70 | [ C(RESULT_MISS) ] = 0, | |
71 | }, | |
72 | [ C(OP_PREFETCH) ] = { | |
73 | [ C(RESULT_ACCESS) ] = 0, | |
74 | [ C(RESULT_MISS) ] = 0, | |
75 | }, | |
76 | }, | |
77 | [ C(ITLB) ] = { | |
78 | [ C(OP_READ) ] = { | |
79 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ | |
ba0cef3d | 80 | [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */ |
f22f54f4 PZ |
81 | }, |
82 | [ C(OP_WRITE) ] = { | |
83 | [ C(RESULT_ACCESS) ] = -1, | |
84 | [ C(RESULT_MISS) ] = -1, | |
85 | }, | |
86 | [ C(OP_PREFETCH) ] = { | |
87 | [ C(RESULT_ACCESS) ] = -1, | |
88 | [ C(RESULT_MISS) ] = -1, | |
89 | }, | |
90 | }, | |
91 | [ C(BPU ) ] = { | |
92 | [ C(OP_READ) ] = { | |
93 | [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */ | |
94 | [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */ | |
95 | }, | |
96 | [ C(OP_WRITE) ] = { | |
97 | [ C(RESULT_ACCESS) ] = -1, | |
98 | [ C(RESULT_MISS) ] = -1, | |
99 | }, | |
100 | [ C(OP_PREFETCH) ] = { | |
101 | [ C(RESULT_ACCESS) ] = -1, | |
102 | [ C(RESULT_MISS) ] = -1, | |
103 | }, | |
104 | }, | |
89d6c0b5 PZ |
105 | [ C(NODE) ] = { |
106 | [ C(OP_READ) ] = { | |
107 | [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */ | |
108 | [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */ | |
109 | }, | |
110 | [ C(OP_WRITE) ] = { | |
111 | [ C(RESULT_ACCESS) ] = -1, | |
112 | [ C(RESULT_MISS) ] = -1, | |
113 | }, | |
114 | [ C(OP_PREFETCH) ] = { | |
115 | [ C(RESULT_ACCESS) ] = -1, | |
116 | [ C(RESULT_MISS) ] = -1, | |
117 | }, | |
118 | }, | |
f22f54f4 PZ |
119 | }; |
120 | ||
3ffab2af KP |
121 | static __initconst const u64 amd_hw_cache_event_ids_f17h |
122 | [PERF_COUNT_HW_CACHE_MAX] | |
123 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
124 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
125 | [C(L1D)] = { | |
126 | [C(OP_READ)] = { | |
127 | [C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */ | |
128 | [C(RESULT_MISS)] = 0xc860, /* L2$ access from DC Miss */ | |
129 | }, | |
130 | [C(OP_WRITE)] = { | |
131 | [C(RESULT_ACCESS)] = 0, | |
132 | [C(RESULT_MISS)] = 0, | |
133 | }, | |
134 | [C(OP_PREFETCH)] = { | |
135 | [C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */ | |
136 | [C(RESULT_MISS)] = 0, | |
137 | }, | |
138 | }, | |
139 | [C(L1I)] = { | |
140 | [C(OP_READ)] = { | |
141 | [C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches */ | |
142 | [C(RESULT_MISS)] = 0x0081, /* Instruction cache misses */ | |
143 | }, | |
144 | [C(OP_WRITE)] = { | |
145 | [C(RESULT_ACCESS)] = -1, | |
146 | [C(RESULT_MISS)] = -1, | |
147 | }, | |
148 | [C(OP_PREFETCH)] = { | |
149 | [C(RESULT_ACCESS)] = 0, | |
150 | [C(RESULT_MISS)] = 0, | |
151 | }, | |
152 | }, | |
153 | [C(LL)] = { | |
154 | [C(OP_READ)] = { | |
155 | [C(RESULT_ACCESS)] = 0, | |
156 | [C(RESULT_MISS)] = 0, | |
157 | }, | |
158 | [C(OP_WRITE)] = { | |
159 | [C(RESULT_ACCESS)] = 0, | |
160 | [C(RESULT_MISS)] = 0, | |
161 | }, | |
162 | [C(OP_PREFETCH)] = { | |
163 | [C(RESULT_ACCESS)] = 0, | |
164 | [C(RESULT_MISS)] = 0, | |
165 | }, | |
166 | }, | |
167 | [C(DTLB)] = { | |
168 | [C(OP_READ)] = { | |
169 | [C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */ | |
170 | [C(RESULT_MISS)] = 0xf045, /* L2 DTLB misses (PT walks) */ | |
171 | }, | |
172 | [C(OP_WRITE)] = { | |
173 | [C(RESULT_ACCESS)] = 0, | |
174 | [C(RESULT_MISS)] = 0, | |
175 | }, | |
176 | [C(OP_PREFETCH)] = { | |
177 | [C(RESULT_ACCESS)] = 0, | |
178 | [C(RESULT_MISS)] = 0, | |
179 | }, | |
180 | }, | |
181 | [C(ITLB)] = { | |
182 | [C(OP_READ)] = { | |
183 | [C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */ | |
184 | [C(RESULT_MISS)] = 0xff85, /* L1 ITLB misses, L2 misses */ | |
185 | }, | |
186 | [C(OP_WRITE)] = { | |
187 | [C(RESULT_ACCESS)] = -1, | |
188 | [C(RESULT_MISS)] = -1, | |
189 | }, | |
190 | [C(OP_PREFETCH)] = { | |
191 | [C(RESULT_ACCESS)] = -1, | |
192 | [C(RESULT_MISS)] = -1, | |
193 | }, | |
194 | }, | |
195 | [C(BPU)] = { | |
196 | [C(OP_READ)] = { | |
197 | [C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr. */ | |
198 | [C(RESULT_MISS)] = 0x00c3, /* Retired Mispredicted BI */ | |
199 | }, | |
200 | [C(OP_WRITE)] = { | |
201 | [C(RESULT_ACCESS)] = -1, | |
202 | [C(RESULT_MISS)] = -1, | |
203 | }, | |
204 | [C(OP_PREFETCH)] = { | |
205 | [C(RESULT_ACCESS)] = -1, | |
206 | [C(RESULT_MISS)] = -1, | |
207 | }, | |
208 | }, | |
209 | [C(NODE)] = { | |
210 | [C(OP_READ)] = { | |
211 | [C(RESULT_ACCESS)] = 0, | |
212 | [C(RESULT_MISS)] = 0, | |
213 | }, | |
214 | [C(OP_WRITE)] = { | |
215 | [C(RESULT_ACCESS)] = -1, | |
216 | [C(RESULT_MISS)] = -1, | |
217 | }, | |
218 | [C(OP_PREFETCH)] = { | |
219 | [C(RESULT_ACCESS)] = -1, | |
220 | [C(RESULT_MISS)] = -1, | |
221 | }, | |
222 | }, | |
223 | }; | |
224 | ||
f22f54f4 | 225 | /* |
558f89e7 | 226 | * AMD Performance Monitor K7 and later, up to and including Family 16h: |
f22f54f4 | 227 | */ |
0a25556f | 228 | static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] = |
f22f54f4 | 229 | { |
558f89e7 KP |
230 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, |
231 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | |
232 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d, | |
233 | [PERF_COUNT_HW_CACHE_MISSES] = 0x077e, | |
234 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, | |
235 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, | |
236 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ | |
237 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */ | |
238 | }; | |
239 | ||
240 | /* | |
241 | * AMD Performance Monitor Family 17h and later: | |
242 | */ | |
243 | static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] = | |
244 | { | |
245 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, | |
246 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | |
247 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60, | |
a1e52d91 | 248 | [PERF_COUNT_HW_CACHE_MISSES] = 0x0964, |
558f89e7 KP |
249 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, |
250 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, | |
251 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287, | |
252 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187, | |
f22f54f4 PZ |
253 | }; |
254 | ||
255 | static u64 amd_pmu_event_map(int hw_event) | |
256 | { | |
558f89e7 KP |
257 | if (boot_cpu_data.x86 >= 0x17) |
258 | return amd_f17h_perfmon_event_map[hw_event]; | |
259 | ||
f22f54f4 PZ |
260 | return amd_perfmon_event_map[hw_event]; |
261 | } | |
262 | ||
4c1fd17a JS |
263 | /* |
264 | * Previously calculated offsets | |
265 | */ | |
266 | static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly; | |
267 | static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly; | |
268 | ||
269 | /* | |
270 | * Legacy CPUs: | |
271 | * 4 counters starting at 0xc0010000 each offset by 1 | |
272 | * | |
273 | * CPUs with core performance counter extensions: | |
274 | * 6 counters starting at 0xc0010200 each offset by 2 | |
275 | */ | |
276 | static inline int amd_pmu_addr_offset(int index, bool eventsel) | |
277 | { | |
0cf5f432 | 278 | int offset; |
4c1fd17a JS |
279 | |
280 | if (!index) | |
281 | return index; | |
282 | ||
283 | if (eventsel) | |
284 | offset = event_offsets[index]; | |
285 | else | |
286 | offset = count_offsets[index]; | |
287 | ||
288 | if (offset) | |
289 | return offset; | |
290 | ||
362f924b | 291 | if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) |
4c1fd17a JS |
292 | offset = index; |
293 | else | |
294 | offset = index << 1; | |
295 | ||
296 | if (eventsel) | |
297 | event_offsets[index] = offset; | |
298 | else | |
299 | count_offsets[index] = offset; | |
300 | ||
301 | return offset; | |
302 | } | |
303 | ||
e259514e JS |
304 | static int amd_core_hw_config(struct perf_event *event) |
305 | { | |
011af857 JR |
306 | if (event->attr.exclude_host && event->attr.exclude_guest) |
307 | /* | |
308 | * When HO == GO == 1 the hardware treats that as GO == HO == 0 | |
309 | * and will count in both modes. We don't want to count in that | |
310 | * case so we emulate no-counting by setting US = OS = 0. | |
311 | */ | |
312 | event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | | |
313 | ARCH_PERFMON_EVENTSEL_OS); | |
314 | else if (event->attr.exclude_host) | |
9f19010a | 315 | event->hw.config |= AMD64_EVENTSEL_GUESTONLY; |
011af857 | 316 | else if (event->attr.exclude_guest) |
9f19010a | 317 | event->hw.config |= AMD64_EVENTSEL_HOSTONLY; |
011af857 | 318 | |
e259514e JS |
319 | return 0; |
320 | } | |
b4cdc5c2 | 321 | |
f22f54f4 PZ |
322 | /* |
323 | * AMD64 events are detected based on their event codes. | |
324 | */ | |
4979d272 RR |
325 | static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc) |
326 | { | |
327 | return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff); | |
328 | } | |
329 | ||
f22f54f4 PZ |
330 | static inline int amd_is_nb_event(struct hw_perf_event *hwc) |
331 | { | |
332 | return (hwc->config & 0xe0) == 0xe0; | |
333 | } | |
334 | ||
b38b24ea PZ |
335 | static inline int amd_has_nb(struct cpu_hw_events *cpuc) |
336 | { | |
337 | struct amd_nb *nb = cpuc->amd_nb; | |
338 | ||
339 | return nb && nb->nb_id != -1; | |
340 | } | |
341 | ||
e259514e JS |
342 | static int amd_pmu_hw_config(struct perf_event *event) |
343 | { | |
344 | int ret; | |
345 | ||
346 | /* pass precise event sampling to ibs: */ | |
347 | if (event->attr.precise_ip && get_ibs_caps()) | |
348 | return -ENOENT; | |
349 | ||
350 | if (has_branch_stack(event)) | |
351 | return -EOPNOTSUPP; | |
352 | ||
353 | ret = x86_pmu_hw_config(event); | |
354 | if (ret) | |
355 | return ret; | |
356 | ||
357 | if (event->attr.type == PERF_TYPE_RAW) | |
358 | event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; | |
359 | ||
e259514e JS |
360 | return amd_core_hw_config(event); |
361 | } | |
362 | ||
4dd4c2ae RR |
363 | static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc, |
364 | struct perf_event *event) | |
f22f54f4 | 365 | { |
f22f54f4 PZ |
366 | struct amd_nb *nb = cpuc->amd_nb; |
367 | int i; | |
368 | ||
f22f54f4 PZ |
369 | /* |
370 | * need to scan whole list because event may not have | |
371 | * been assigned during scheduling | |
372 | * | |
373 | * no race condition possible because event can only | |
374 | * be removed on one CPU at a time AND PMU is disabled | |
375 | * when we come here | |
376 | */ | |
948b1bb8 | 377 | for (i = 0; i < x86_pmu.num_counters; i++) { |
5f09fc68 | 378 | if (cmpxchg(nb->owners + i, event, NULL) == event) |
f22f54f4 | 379 | break; |
f22f54f4 PZ |
380 | } |
381 | } | |
382 | ||
383 | /* | |
384 | * AMD64 NorthBridge events need special treatment because | |
385 | * counter access needs to be synchronized across all cores | |
386 | * of a package. Refer to BKDG section 3.12 | |
387 | * | |
388 | * NB events are events measuring L3 cache, Hypertransport | |
389 | * traffic. They are identified by an event code >= 0xe00. | |
390 | * They measure events on the NorthBride which is shared | |
391 | * by all cores on a package. NB events are counted on a | |
392 | * shared set of counters. When a NB event is programmed | |
393 | * in a counter, the data actually comes from a shared | |
394 | * counter. Thus, access to those counters needs to be | |
395 | * synchronized. | |
396 | * | |
397 | * We implement the synchronization such that no two cores | |
398 | * can be measuring NB events using the same counters. Thus, | |
399 | * we maintain a per-NB allocation table. The available slot | |
400 | * is propagated using the event_constraint structure. | |
401 | * | |
402 | * We provide only one choice for each NB event based on | |
403 | * the fact that only NB events have restrictions. Consequently, | |
404 | * if a counter is available, there is a guarantee the NB event | |
405 | * will be assigned to it. If no slot is available, an empty | |
406 | * constraint is returned and scheduling will eventually fail | |
407 | * for this event. | |
408 | * | |
409 | * Note that all cores attached the same NB compete for the same | |
410 | * counters to host NB events, this is why we use atomic ops. Some | |
411 | * multi-chip CPUs may have more than one NB. | |
412 | * | |
413 | * Given that resources are allocated (cmpxchg), they must be | |
414 | * eventually freed for others to use. This is accomplished by | |
4dd4c2ae | 415 | * calling __amd_put_nb_event_constraints() |
f22f54f4 PZ |
416 | * |
417 | * Non NB events are not impacted by this restriction. | |
418 | */ | |
419 | static struct event_constraint * | |
4dd4c2ae RR |
420 | __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, |
421 | struct event_constraint *c) | |
f22f54f4 PZ |
422 | { |
423 | struct hw_perf_event *hwc = &event->hw; | |
424 | struct amd_nb *nb = cpuc->amd_nb; | |
2c53c3dd RR |
425 | struct perf_event *old; |
426 | int idx, new = -1; | |
f22f54f4 | 427 | |
e259514e JS |
428 | if (!c) |
429 | c = &unconstrained; | |
430 | ||
431 | if (cpuc->is_fake) | |
432 | return c; | |
433 | ||
f22f54f4 PZ |
434 | /* |
435 | * detect if already present, if so reuse | |
436 | * | |
437 | * cannot merge with actual allocation | |
438 | * because of possible holes | |
439 | * | |
440 | * event can already be present yet not assigned (in hwc->idx) | |
441 | * because of successive calls to x86_schedule_events() from | |
442 | * hw_perf_group_sched_in() without hw_perf_enable() | |
443 | */ | |
4dd4c2ae | 444 | for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) { |
2c53c3dd RR |
445 | if (new == -1 || hwc->idx == idx) |
446 | /* assign free slot, prefer hwc->idx */ | |
447 | old = cmpxchg(nb->owners + idx, NULL, event); | |
448 | else if (nb->owners[idx] == event) | |
449 | /* event already present */ | |
450 | old = event; | |
451 | else | |
452 | continue; | |
453 | ||
454 | if (old && old != event) | |
455 | continue; | |
456 | ||
457 | /* reassign to this slot */ | |
458 | if (new != -1) | |
459 | cmpxchg(nb->owners + new, event, NULL); | |
460 | new = idx; | |
f22f54f4 PZ |
461 | |
462 | /* already present, reuse */ | |
2c53c3dd | 463 | if (old == event) |
f22f54f4 | 464 | break; |
2c53c3dd RR |
465 | } |
466 | ||
467 | if (new == -1) | |
468 | return &emptyconstraint; | |
469 | ||
470 | return &nb->event_constraints[new]; | |
f22f54f4 PZ |
471 | } |
472 | ||
c079c791 | 473 | static struct amd_nb *amd_alloc_nb(int cpu) |
f22f54f4 PZ |
474 | { |
475 | struct amd_nb *nb; | |
476 | int i; | |
477 | ||
7bfb7e6b | 478 | nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu)); |
f22f54f4 PZ |
479 | if (!nb) |
480 | return NULL; | |
481 | ||
c079c791 | 482 | nb->nb_id = -1; |
f22f54f4 PZ |
483 | |
484 | /* | |
485 | * initialize all possible NB constraints | |
486 | */ | |
948b1bb8 | 487 | for (i = 0; i < x86_pmu.num_counters; i++) { |
34538ee7 | 488 | __set_bit(i, nb->event_constraints[i].idxmsk); |
f22f54f4 PZ |
489 | nb->event_constraints[i].weight = 1; |
490 | } | |
491 | return nb; | |
492 | } | |
493 | ||
b38b24ea PZ |
494 | static int amd_pmu_cpu_prepare(int cpu) |
495 | { | |
496 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | |
497 | ||
498 | WARN_ON_ONCE(cpuc->amd_nb); | |
499 | ||
32b62f44 | 500 | if (!x86_pmu.amd_nb_constraints) |
95ca792c | 501 | return 0; |
b38b24ea | 502 | |
c079c791 | 503 | cpuc->amd_nb = amd_alloc_nb(cpu); |
b38b24ea | 504 | if (!cpuc->amd_nb) |
95ca792c | 505 | return -ENOMEM; |
b38b24ea | 506 | |
95ca792c | 507 | return 0; |
b38b24ea PZ |
508 | } |
509 | ||
510 | static void amd_pmu_cpu_starting(int cpu) | |
f22f54f4 | 511 | { |
b38b24ea | 512 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
90413464 | 513 | void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED]; |
b38b24ea | 514 | struct amd_nb *nb; |
f22f54f4 PZ |
515 | int i, nb_id; |
516 | ||
9f19010a | 517 | cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; |
1018faa6 | 518 | |
32b62f44 | 519 | if (!x86_pmu.amd_nb_constraints) |
f22f54f4 PZ |
520 | return; |
521 | ||
f22f54f4 | 522 | nb_id = amd_get_nb_id(cpu); |
b38b24ea | 523 | WARN_ON_ONCE(nb_id == BAD_APICID); |
f22f54f4 | 524 | |
f22f54f4 | 525 | for_each_online_cpu(i) { |
b38b24ea PZ |
526 | nb = per_cpu(cpu_hw_events, i).amd_nb; |
527 | if (WARN_ON_ONCE(!nb)) | |
f22f54f4 | 528 | continue; |
f22f54f4 | 529 | |
b38b24ea | 530 | if (nb->nb_id == nb_id) { |
90413464 | 531 | *onln = cpuc->amd_nb; |
b38b24ea PZ |
532 | cpuc->amd_nb = nb; |
533 | break; | |
534 | } | |
f22f54f4 | 535 | } |
b38b24ea PZ |
536 | |
537 | cpuc->amd_nb->nb_id = nb_id; | |
538 | cpuc->amd_nb->refcnt++; | |
f22f54f4 PZ |
539 | } |
540 | ||
b38b24ea | 541 | static void amd_pmu_cpu_dead(int cpu) |
f22f54f4 PZ |
542 | { |
543 | struct cpu_hw_events *cpuhw; | |
544 | ||
32b62f44 | 545 | if (!x86_pmu.amd_nb_constraints) |
f22f54f4 PZ |
546 | return; |
547 | ||
548 | cpuhw = &per_cpu(cpu_hw_events, cpu); | |
549 | ||
a90110c6 | 550 | if (cpuhw->amd_nb) { |
b38b24ea PZ |
551 | struct amd_nb *nb = cpuhw->amd_nb; |
552 | ||
553 | if (nb->nb_id == -1 || --nb->refcnt == 0) | |
554 | kfree(nb); | |
f22f54f4 | 555 | |
a90110c6 RW |
556 | cpuhw->amd_nb = NULL; |
557 | } | |
f22f54f4 PZ |
558 | } |
559 | ||
b9b7cf3f LT |
560 | /* |
561 | * When a PMC counter overflows, an NMI is used to process the event and | |
562 | * reset the counter. NMI latency can result in the counter being updated | |
563 | * before the NMI can run, which can result in what appear to be spurious | |
564 | * NMIs. This function is intended to wait for the NMI to run and reset | |
565 | * the counter to avoid possible unhandled NMI messages. | |
566 | */ | |
567 | #define OVERFLOW_WAIT_COUNT 50 | |
568 | ||
569 | static void amd_pmu_wait_on_overflow(int idx) | |
570 | { | |
571 | unsigned int i; | |
572 | u64 counter; | |
573 | ||
574 | /* | |
575 | * Wait for the counter to be reset if it has overflowed. This loop | |
576 | * should exit very, very quickly, but just in case, don't wait | |
577 | * forever... | |
578 | */ | |
579 | for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) { | |
580 | rdmsrl(x86_pmu_event_addr(idx), counter); | |
581 | if (counter & (1ULL << (x86_pmu.cntval_bits - 1))) | |
582 | break; | |
583 | ||
584 | /* Might be in IRQ context, so can't sleep */ | |
585 | udelay(1); | |
586 | } | |
587 | } | |
588 | ||
589 | static void amd_pmu_disable_all(void) | |
590 | { | |
591 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | |
592 | int idx; | |
593 | ||
594 | x86_pmu_disable_all(); | |
595 | ||
596 | /* | |
597 | * This shouldn't be called from NMI context, but add a safeguard here | |
598 | * to return, since if we're in NMI context we can't wait for an NMI | |
599 | * to reset an overflowed counter value. | |
600 | */ | |
601 | if (in_nmi()) | |
602 | return; | |
603 | ||
604 | /* | |
605 | * Check each counter for overflow and wait for it to be reset by the | |
606 | * NMI if it has overflowed. This relies on the fact that all active | |
607 | * counters are always enabled when this function is caled and | |
608 | * ARCH_PERFMON_EVENTSEL_INT is always set. | |
609 | */ | |
610 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | |
611 | if (!test_bit(idx, cpuc->active_mask)) | |
612 | continue; | |
613 | ||
614 | amd_pmu_wait_on_overflow(idx); | |
615 | } | |
616 | } | |
617 | ||
3c638b3e LT |
618 | static void amd_pmu_disable_event(struct perf_event *event) |
619 | { | |
620 | x86_pmu_disable_event(event); | |
621 | ||
622 | /* | |
623 | * This can be called from NMI context (via x86_pmu_stop). The counter | |
624 | * may have overflowed, but either way, we'll never see it get reset | |
625 | * by the NMI if we're already in the NMI. And the NMI latency support | |
626 | * below will take care of any pending NMI that might have been | |
627 | * generated by the overflow. | |
628 | */ | |
629 | if (in_nmi()) | |
630 | return; | |
631 | ||
632 | amd_pmu_wait_on_overflow(event->hw.idx); | |
633 | } | |
634 | ||
b66fbf27 LT |
635 | /* |
636 | * Because of NMI latency, if multiple PMC counters are active or other sources | |
637 | * of NMIs are received, the perf NMI handler can handle one or more overflowed | |
638 | * PMC counters outside of the NMI associated with the PMC overflow. If the NMI | |
639 | * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel | |
640 | * back-to-back NMI support won't be active. This PMC handler needs to take into | |
641 | * account that this can occur, otherwise this could result in unknown NMI | |
642 | * messages being issued. Examples of this is PMC overflow while in the NMI | |
643 | * handler when multiple PMCs are active or PMC overflow while handling some | |
644 | * other source of an NMI. | |
645 | * | |
556bacf5 TL |
646 | * Attempt to mitigate this by creating an NMI window in which un-handled NMIs |
647 | * received during this window will be claimed. This prevents extending the | |
648 | * window past when it is possible that latent NMIs should be received. The | |
649 | * per-CPU perf_nmi_tstamp will be set to the window end time whenever perf has | |
650 | * handled a counter. When an un-handled NMI is received, it will be claimed | |
651 | * only if arriving within that window. | |
b66fbf27 LT |
652 | */ |
653 | static int amd_pmu_handle_irq(struct pt_regs *regs) | |
654 | { | |
655 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | |
656 | int active, handled; | |
657 | ||
658 | /* | |
659 | * Obtain the active count before calling x86_pmu_handle_irq() since | |
660 | * it is possible that x86_pmu_handle_irq() may make a counter | |
661 | * inactive (through x86_pmu_stop). | |
662 | */ | |
663 | active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX); | |
664 | ||
665 | /* Process any counter overflows */ | |
666 | handled = x86_pmu_handle_irq(regs); | |
667 | ||
668 | /* | |
556bacf5 TL |
669 | * If a counter was handled, record a timestamp such that un-handled |
670 | * NMIs will be claimed if arriving within that window. | |
b66fbf27 LT |
671 | */ |
672 | if (handled) { | |
556bacf5 TL |
673 | this_cpu_write(perf_nmi_tstamp, |
674 | jiffies + perf_nmi_window); | |
b66fbf27 LT |
675 | |
676 | return handled; | |
677 | } | |
678 | ||
556bacf5 | 679 | if (time_after(jiffies, this_cpu_read(perf_nmi_tstamp))) |
b66fbf27 LT |
680 | return NMI_DONE; |
681 | ||
b66fbf27 LT |
682 | return NMI_HANDLED; |
683 | } | |
684 | ||
4dd4c2ae | 685 | static struct event_constraint * |
79cba822 SE |
686 | amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx, |
687 | struct perf_event *event) | |
4dd4c2ae RR |
688 | { |
689 | /* | |
690 | * if not NB event or no NB, then no constraints | |
691 | */ | |
692 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))) | |
693 | return &unconstrained; | |
694 | ||
0cf5f432 | 695 | return __amd_get_nb_event_constraints(cpuc, event, NULL); |
4dd4c2ae RR |
696 | } |
697 | ||
698 | static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | |
699 | struct perf_event *event) | |
700 | { | |
701 | if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)) | |
702 | __amd_put_nb_event_constraints(cpuc, event); | |
703 | } | |
704 | ||
641cc938 JO |
705 | PMU_FORMAT_ATTR(event, "config:0-7,32-35"); |
706 | PMU_FORMAT_ATTR(umask, "config:8-15" ); | |
707 | PMU_FORMAT_ATTR(edge, "config:18" ); | |
708 | PMU_FORMAT_ATTR(inv, "config:23" ); | |
709 | PMU_FORMAT_ATTR(cmask, "config:24-31" ); | |
710 | ||
711 | static struct attribute *amd_format_attr[] = { | |
712 | &format_attr_event.attr, | |
713 | &format_attr_umask.attr, | |
714 | &format_attr_edge.attr, | |
715 | &format_attr_inv.attr, | |
716 | &format_attr_cmask.attr, | |
717 | NULL, | |
718 | }; | |
719 | ||
4979d272 RR |
720 | /* AMD Family 15h */ |
721 | ||
722 | #define AMD_EVENT_TYPE_MASK 0x000000F0ULL | |
723 | ||
724 | #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL | |
725 | #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL | |
726 | #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL | |
727 | #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL | |
728 | #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL | |
729 | #define AMD_EVENT_EX_LS 0x000000C0ULL | |
730 | #define AMD_EVENT_DE 0x000000D0ULL | |
731 | #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL | |
732 | ||
733 | /* | |
734 | * AMD family 15h event code/PMC mappings: | |
735 | * | |
736 | * type = event_code & 0x0F0: | |
737 | * | |
738 | * 0x000 FP PERF_CTL[5:3] | |
739 | * 0x010 FP PERF_CTL[5:3] | |
740 | * 0x020 LS PERF_CTL[5:0] | |
741 | * 0x030 LS PERF_CTL[5:0] | |
742 | * 0x040 DC PERF_CTL[5:0] | |
743 | * 0x050 DC PERF_CTL[5:0] | |
744 | * 0x060 CU PERF_CTL[2:0] | |
745 | * 0x070 CU PERF_CTL[2:0] | |
746 | * 0x080 IC/DE PERF_CTL[2:0] | |
747 | * 0x090 IC/DE PERF_CTL[2:0] | |
748 | * 0x0A0 --- | |
749 | * 0x0B0 --- | |
750 | * 0x0C0 EX/LS PERF_CTL[5:0] | |
751 | * 0x0D0 DE PERF_CTL[2:0] | |
752 | * 0x0E0 NB NB_PERF_CTL[3:0] | |
753 | * 0x0F0 NB NB_PERF_CTL[3:0] | |
754 | * | |
755 | * Exceptions: | |
756 | * | |
855357a2 | 757 | * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*) |
4979d272 | 758 | * 0x003 FP PERF_CTL[3] |
855357a2 | 759 | * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*) |
4979d272 RR |
760 | * 0x00B FP PERF_CTL[3] |
761 | * 0x00D FP PERF_CTL[3] | |
762 | * 0x023 DE PERF_CTL[2:0] | |
763 | * 0x02D LS PERF_CTL[3] | |
764 | * 0x02E LS PERF_CTL[3,0] | |
5bcdf5e4 | 765 | * 0x031 LS PERF_CTL[2:0] (**) |
4979d272 RR |
766 | * 0x043 CU PERF_CTL[2:0] |
767 | * 0x045 CU PERF_CTL[2:0] | |
768 | * 0x046 CU PERF_CTL[2:0] | |
769 | * 0x054 CU PERF_CTL[2:0] | |
770 | * 0x055 CU PERF_CTL[2:0] | |
771 | * 0x08F IC PERF_CTL[0] | |
772 | * 0x187 DE PERF_CTL[0] | |
773 | * 0x188 DE PERF_CTL[0] | |
774 | * 0x0DB EX PERF_CTL[5:0] | |
775 | * 0x0DC LS PERF_CTL[5:0] | |
776 | * 0x0DD LS PERF_CTL[5:0] | |
777 | * 0x0DE LS PERF_CTL[5:0] | |
778 | * 0x0DF LS PERF_CTL[5:0] | |
5bcdf5e4 | 779 | * 0x1C0 EX PERF_CTL[5:3] |
4979d272 RR |
780 | * 0x1D6 EX PERF_CTL[5:0] |
781 | * 0x1D8 EX PERF_CTL[5:0] | |
855357a2 | 782 | * |
5bcdf5e4 RR |
783 | * (*) depending on the umask all FPU counters may be used |
784 | * (**) only one unitmask enabled at a time | |
4979d272 RR |
785 | */ |
786 | ||
787 | static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); | |
788 | static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0); | |
789 | static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0); | |
bc1738f6 | 790 | static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); |
4979d272 RR |
791 | static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); |
792 | static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); | |
793 | ||
794 | static struct event_constraint * | |
79cba822 SE |
795 | amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx, |
796 | struct perf_event *event) | |
4979d272 | 797 | { |
855357a2 RR |
798 | struct hw_perf_event *hwc = &event->hw; |
799 | unsigned int event_code = amd_get_event_code(hwc); | |
4979d272 RR |
800 | |
801 | switch (event_code & AMD_EVENT_TYPE_MASK) { | |
802 | case AMD_EVENT_FP: | |
803 | switch (event_code) { | |
855357a2 RR |
804 | case 0x000: |
805 | if (!(hwc->config & 0x0000F000ULL)) | |
806 | break; | |
807 | if (!(hwc->config & 0x00000F00ULL)) | |
808 | break; | |
809 | return &amd_f15_PMC3; | |
810 | case 0x004: | |
811 | if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1) | |
812 | break; | |
813 | return &amd_f15_PMC3; | |
4979d272 RR |
814 | case 0x003: |
815 | case 0x00B: | |
816 | case 0x00D: | |
817 | return &amd_f15_PMC3; | |
4979d272 | 818 | } |
855357a2 | 819 | return &amd_f15_PMC53; |
4979d272 RR |
820 | case AMD_EVENT_LS: |
821 | case AMD_EVENT_DC: | |
822 | case AMD_EVENT_EX_LS: | |
823 | switch (event_code) { | |
824 | case 0x023: | |
825 | case 0x043: | |
826 | case 0x045: | |
827 | case 0x046: | |
828 | case 0x054: | |
829 | case 0x055: | |
830 | return &amd_f15_PMC20; | |
831 | case 0x02D: | |
832 | return &amd_f15_PMC3; | |
833 | case 0x02E: | |
834 | return &amd_f15_PMC30; | |
5bcdf5e4 RR |
835 | case 0x031: |
836 | if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1) | |
837 | return &amd_f15_PMC20; | |
838 | return &emptyconstraint; | |
839 | case 0x1C0: | |
840 | return &amd_f15_PMC53; | |
4979d272 RR |
841 | default: |
842 | return &amd_f15_PMC50; | |
843 | } | |
844 | case AMD_EVENT_CU: | |
845 | case AMD_EVENT_IC_DE: | |
846 | case AMD_EVENT_DE: | |
847 | switch (event_code) { | |
848 | case 0x08F: | |
849 | case 0x187: | |
850 | case 0x188: | |
851 | return &amd_f15_PMC0; | |
852 | case 0x0DB ... 0x0DF: | |
853 | case 0x1D6: | |
854 | case 0x1D8: | |
855 | return &amd_f15_PMC50; | |
856 | default: | |
857 | return &amd_f15_PMC20; | |
858 | } | |
859 | case AMD_EVENT_NB: | |
940b2f2f | 860 | /* moved to uncore.c */ |
0cf5f432 | 861 | return &emptyconstraint; |
4979d272 RR |
862 | default: |
863 | return &emptyconstraint; | |
864 | } | |
865 | } | |
866 | ||
0bf79d44 JO |
867 | static ssize_t amd_event_sysfs_show(char *page, u64 config) |
868 | { | |
869 | u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) | | |
870 | (config & AMD64_EVENTSEL_EVENT) >> 24; | |
871 | ||
872 | return x86_event_sysfs_show(page, config, event); | |
873 | } | |
874 | ||
b1dc3c48 RR |
875 | static __initconst const struct x86_pmu amd_pmu = { |
876 | .name = "AMD", | |
b66fbf27 | 877 | .handle_irq = amd_pmu_handle_irq, |
b9b7cf3f | 878 | .disable_all = amd_pmu_disable_all, |
4979d272 RR |
879 | .enable_all = x86_pmu_enable_all, |
880 | .enable = x86_pmu_enable_event, | |
3c638b3e | 881 | .disable = amd_pmu_disable_event, |
4979d272 RR |
882 | .hw_config = amd_pmu_hw_config, |
883 | .schedule_events = x86_schedule_events, | |
b1dc3c48 RR |
884 | .eventsel = MSR_K7_EVNTSEL0, |
885 | .perfctr = MSR_K7_PERFCTR0, | |
4c1fd17a | 886 | .addr_offset = amd_pmu_addr_offset, |
4979d272 RR |
887 | .event_map = amd_pmu_event_map, |
888 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), | |
b1dc3c48 | 889 | .num_counters = AMD64_NUM_COUNTERS, |
4979d272 RR |
890 | .cntval_bits = 48, |
891 | .cntval_mask = (1ULL << 48) - 1, | |
892 | .apic = 1, | |
893 | /* use highest bit to detect overflow */ | |
894 | .max_period = (1ULL << 47) - 1, | |
b1dc3c48 | 895 | .get_event_constraints = amd_get_event_constraints, |
4979d272 RR |
896 | .put_event_constraints = amd_put_event_constraints, |
897 | ||
b1dc3c48 | 898 | .format_attrs = amd_format_attr, |
0bf79d44 | 899 | .events_sysfs_show = amd_event_sysfs_show, |
b1dc3c48 | 900 | |
4979d272 | 901 | .cpu_prepare = amd_pmu_cpu_prepare, |
1018faa6 | 902 | .cpu_starting = amd_pmu_cpu_starting, |
b1dc3c48 | 903 | .cpu_dead = amd_pmu_cpu_dead, |
32b62f44 PZ |
904 | |
905 | .amd_nb_constraints = 1, | |
4979d272 RR |
906 | }; |
907 | ||
1b45adcd | 908 | static int __init amd_core_pmu_init(void) |
b1dc3c48 | 909 | { |
362f924b | 910 | if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) |
1b45adcd PZ |
911 | return 0; |
912 | ||
556bacf5 TL |
913 | /* Avoid calulating the value each time in the NMI handler */ |
914 | perf_nmi_window = msecs_to_jiffies(100); | |
915 | ||
1b45adcd PZ |
916 | switch (boot_cpu_data.x86) { |
917 | case 0x15: | |
918 | pr_cont("Fam15h "); | |
b1dc3c48 | 919 | x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; |
1b45adcd | 920 | break; |
e40ed154 JN |
921 | case 0x17: |
922 | pr_cont("Fam17h "); | |
923 | /* | |
924 | * In family 17h, there are no event constraints in the PMC hardware. | |
925 | * We fallback to using default amd_get_event_constraints. | |
926 | */ | |
927 | break; | |
1b45adcd PZ |
928 | default: |
929 | pr_err("core perfctr but no constraints; unknown hardware!\n"); | |
b1dc3c48 RR |
930 | return -ENODEV; |
931 | } | |
932 | ||
b1dc3c48 RR |
933 | /* |
934 | * If core performance counter extensions exists, we must use | |
935 | * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also | |
1b45adcd | 936 | * amd_pmu_addr_offset(). |
b1dc3c48 RR |
937 | */ |
938 | x86_pmu.eventsel = MSR_F15H_PERF_CTL; | |
939 | x86_pmu.perfctr = MSR_F15H_PERF_CTR; | |
940 | x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE; | |
32b62f44 PZ |
941 | /* |
942 | * AMD Core perfctr has separate MSRs for the NB events, see | |
943 | * the amd/uncore.c driver. | |
944 | */ | |
945 | x86_pmu.amd_nb_constraints = 0; | |
b1dc3c48 | 946 | |
1b45adcd | 947 | pr_cont("core perfctr, "); |
b1dc3c48 RR |
948 | return 0; |
949 | } | |
950 | ||
de0428a7 | 951 | __init int amd_pmu_init(void) |
f22f54f4 | 952 | { |
1b45adcd PZ |
953 | int ret; |
954 | ||
f22f54f4 PZ |
955 | /* Performance-monitoring supported from K7 and later: */ |
956 | if (boot_cpu_data.x86 < 6) | |
957 | return -ENODEV; | |
958 | ||
b1dc3c48 RR |
959 | x86_pmu = amd_pmu; |
960 | ||
1b45adcd PZ |
961 | ret = amd_core_pmu_init(); |
962 | if (ret) | |
963 | return ret; | |
f22f54f4 | 964 | |
32b62f44 PZ |
965 | if (num_possible_cpus() == 1) { |
966 | /* | |
967 | * No point in allocating data structures to serialize | |
968 | * against other CPUs, when there is only the one CPU. | |
969 | */ | |
970 | x86_pmu.amd_nb_constraints = 0; | |
971 | } | |
972 | ||
3ffab2af KP |
973 | if (boot_cpu_data.x86 >= 0x17) |
974 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids)); | |
975 | else | |
976 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids)); | |
f22f54f4 | 977 | |
f22f54f4 PZ |
978 | return 0; |
979 | } | |
1018faa6 JR |
980 | |
981 | void amd_pmu_enable_virt(void) | |
982 | { | |
89cbc767 | 983 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1018faa6 JR |
984 | |
985 | cpuc->perf_ctr_virt_mask = 0; | |
986 | ||
987 | /* Reload all events */ | |
b9b7cf3f | 988 | amd_pmu_disable_all(); |
1018faa6 JR |
989 | x86_pmu_enable_all(0); |
990 | } | |
991 | EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); | |
992 | ||
993 | void amd_pmu_disable_virt(void) | |
994 | { | |
89cbc767 | 995 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1018faa6 JR |
996 | |
997 | /* | |
998 | * We only mask out the Host-only bit so that host-only counting works | |
999 | * when SVM is disabled. If someone sets up a guest-only counter when | |
1000 | * SVM is disabled the Guest-only bits still gets set and the counter | |
1001 | * will not count anything. | |
1002 | */ | |
9f19010a | 1003 | cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; |
1018faa6 JR |
1004 | |
1005 | /* Reload all events */ | |
b9b7cf3f | 1006 | amd_pmu_disable_all(); |
1018faa6 JR |
1007 | x86_pmu_enable_all(0); |
1008 | } | |
1009 | EXPORT_SYMBOL_GPL(amd_pmu_disable_virt); |