]>
Commit | Line | Data |
---|---|---|
de0428a7 | 1 | #include <linux/perf_event.h> |
1018faa6 | 2 | #include <linux/export.h> |
de0428a7 KW |
3 | #include <linux/types.h> |
4 | #include <linux/init.h> | |
5 | #include <linux/slab.h> | |
d6eed550 | 6 | #include <asm/apicdef.h> |
de0428a7 KW |
7 | |
8 | #include "perf_event.h" | |
f22f54f4 | 9 | |
caaa8be3 | 10 | static __initconst const u64 amd_hw_cache_event_ids |
f22f54f4 PZ |
11 | [PERF_COUNT_HW_CACHE_MAX] |
12 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
13 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
14 | { | |
15 | [ C(L1D) ] = { | |
16 | [ C(OP_READ) ] = { | |
17 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ | |
83112e68 | 18 | [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */ |
f22f54f4 PZ |
19 | }, |
20 | [ C(OP_WRITE) ] = { | |
21 | [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */ | |
22 | [ C(RESULT_MISS) ] = 0, | |
23 | }, | |
24 | [ C(OP_PREFETCH) ] = { | |
25 | [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */ | |
26 | [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */ | |
27 | }, | |
28 | }, | |
29 | [ C(L1I ) ] = { | |
30 | [ C(OP_READ) ] = { | |
31 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */ | |
32 | [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */ | |
33 | }, | |
34 | [ C(OP_WRITE) ] = { | |
35 | [ C(RESULT_ACCESS) ] = -1, | |
36 | [ C(RESULT_MISS) ] = -1, | |
37 | }, | |
38 | [ C(OP_PREFETCH) ] = { | |
39 | [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */ | |
40 | [ C(RESULT_MISS) ] = 0, | |
41 | }, | |
42 | }, | |
43 | [ C(LL ) ] = { | |
44 | [ C(OP_READ) ] = { | |
45 | [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */ | |
46 | [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */ | |
47 | }, | |
48 | [ C(OP_WRITE) ] = { | |
49 | [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */ | |
50 | [ C(RESULT_MISS) ] = 0, | |
51 | }, | |
52 | [ C(OP_PREFETCH) ] = { | |
53 | [ C(RESULT_ACCESS) ] = 0, | |
54 | [ C(RESULT_MISS) ] = 0, | |
55 | }, | |
56 | }, | |
57 | [ C(DTLB) ] = { | |
58 | [ C(OP_READ) ] = { | |
59 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ | |
ba0cef3d | 60 | [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */ |
f22f54f4 PZ |
61 | }, |
62 | [ C(OP_WRITE) ] = { | |
63 | [ C(RESULT_ACCESS) ] = 0, | |
64 | [ C(RESULT_MISS) ] = 0, | |
65 | }, | |
66 | [ C(OP_PREFETCH) ] = { | |
67 | [ C(RESULT_ACCESS) ] = 0, | |
68 | [ C(RESULT_MISS) ] = 0, | |
69 | }, | |
70 | }, | |
71 | [ C(ITLB) ] = { | |
72 | [ C(OP_READ) ] = { | |
73 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ | |
ba0cef3d | 74 | [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */ |
f22f54f4 PZ |
75 | }, |
76 | [ C(OP_WRITE) ] = { | |
77 | [ C(RESULT_ACCESS) ] = -1, | |
78 | [ C(RESULT_MISS) ] = -1, | |
79 | }, | |
80 | [ C(OP_PREFETCH) ] = { | |
81 | [ C(RESULT_ACCESS) ] = -1, | |
82 | [ C(RESULT_MISS) ] = -1, | |
83 | }, | |
84 | }, | |
85 | [ C(BPU ) ] = { | |
86 | [ C(OP_READ) ] = { | |
87 | [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */ | |
88 | [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */ | |
89 | }, | |
90 | [ C(OP_WRITE) ] = { | |
91 | [ C(RESULT_ACCESS) ] = -1, | |
92 | [ C(RESULT_MISS) ] = -1, | |
93 | }, | |
94 | [ C(OP_PREFETCH) ] = { | |
95 | [ C(RESULT_ACCESS) ] = -1, | |
96 | [ C(RESULT_MISS) ] = -1, | |
97 | }, | |
98 | }, | |
89d6c0b5 PZ |
99 | [ C(NODE) ] = { |
100 | [ C(OP_READ) ] = { | |
101 | [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */ | |
102 | [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */ | |
103 | }, | |
104 | [ C(OP_WRITE) ] = { | |
105 | [ C(RESULT_ACCESS) ] = -1, | |
106 | [ C(RESULT_MISS) ] = -1, | |
107 | }, | |
108 | [ C(OP_PREFETCH) ] = { | |
109 | [ C(RESULT_ACCESS) ] = -1, | |
110 | [ C(RESULT_MISS) ] = -1, | |
111 | }, | |
112 | }, | |
f22f54f4 PZ |
113 | }; |
114 | ||
115 | /* | |
116 | * AMD Performance Monitor K7 and later. | |
117 | */ | |
118 | static const u64 amd_perfmon_event_map[] = | |
119 | { | |
91fc4cc0 IM |
120 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, |
121 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | |
122 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, | |
123 | [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, | |
124 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, | |
125 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, | |
126 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ | |
127 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */ | |
f22f54f4 PZ |
128 | }; |
129 | ||
130 | static u64 amd_pmu_event_map(int hw_event) | |
131 | { | |
132 | return amd_perfmon_event_map[hw_event]; | |
133 | } | |
134 | ||
4c1fd17a JS |
135 | /* |
136 | * Previously calculated offsets | |
137 | */ | |
138 | static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly; | |
139 | static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly; | |
140 | ||
141 | /* | |
142 | * Legacy CPUs: | |
143 | * 4 counters starting at 0xc0010000 each offset by 1 | |
144 | * | |
145 | * CPUs with core performance counter extensions: | |
146 | * 6 counters starting at 0xc0010200 each offset by 2 | |
147 | */ | |
148 | static inline int amd_pmu_addr_offset(int index, bool eventsel) | |
149 | { | |
0cf5f432 | 150 | int offset; |
4c1fd17a JS |
151 | |
152 | if (!index) | |
153 | return index; | |
154 | ||
155 | if (eventsel) | |
156 | offset = event_offsets[index]; | |
157 | else | |
158 | offset = count_offsets[index]; | |
159 | ||
160 | if (offset) | |
161 | return offset; | |
162 | ||
0cf5f432 | 163 | if (!cpu_has_perfctr_core) |
4c1fd17a JS |
164 | offset = index; |
165 | else | |
166 | offset = index << 1; | |
167 | ||
168 | if (eventsel) | |
169 | event_offsets[index] = offset; | |
170 | else | |
171 | count_offsets[index] = offset; | |
172 | ||
173 | return offset; | |
174 | } | |
175 | ||
e259514e JS |
176 | static int amd_core_hw_config(struct perf_event *event) |
177 | { | |
011af857 JR |
178 | if (event->attr.exclude_host && event->attr.exclude_guest) |
179 | /* | |
180 | * When HO == GO == 1 the hardware treats that as GO == HO == 0 | |
181 | * and will count in both modes. We don't want to count in that | |
182 | * case so we emulate no-counting by setting US = OS = 0. | |
183 | */ | |
184 | event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | | |
185 | ARCH_PERFMON_EVENTSEL_OS); | |
186 | else if (event->attr.exclude_host) | |
9f19010a | 187 | event->hw.config |= AMD64_EVENTSEL_GUESTONLY; |
011af857 | 188 | else if (event->attr.exclude_guest) |
9f19010a | 189 | event->hw.config |= AMD64_EVENTSEL_HOSTONLY; |
011af857 | 190 | |
e259514e JS |
191 | return 0; |
192 | } | |
b4cdc5c2 | 193 | |
f22f54f4 PZ |
194 | /* |
195 | * AMD64 events are detected based on their event codes. | |
196 | */ | |
4979d272 RR |
197 | static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc) |
198 | { | |
199 | return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff); | |
200 | } | |
201 | ||
f22f54f4 PZ |
202 | static inline int amd_is_nb_event(struct hw_perf_event *hwc) |
203 | { | |
204 | return (hwc->config & 0xe0) == 0xe0; | |
205 | } | |
206 | ||
b38b24ea PZ |
207 | static inline int amd_has_nb(struct cpu_hw_events *cpuc) |
208 | { | |
209 | struct amd_nb *nb = cpuc->amd_nb; | |
210 | ||
211 | return nb && nb->nb_id != -1; | |
212 | } | |
213 | ||
e259514e JS |
214 | static int amd_pmu_hw_config(struct perf_event *event) |
215 | { | |
216 | int ret; | |
217 | ||
218 | /* pass precise event sampling to ibs: */ | |
219 | if (event->attr.precise_ip && get_ibs_caps()) | |
220 | return -ENOENT; | |
221 | ||
222 | if (has_branch_stack(event)) | |
223 | return -EOPNOTSUPP; | |
224 | ||
225 | ret = x86_pmu_hw_config(event); | |
226 | if (ret) | |
227 | return ret; | |
228 | ||
229 | if (event->attr.type == PERF_TYPE_RAW) | |
230 | event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; | |
231 | ||
e259514e JS |
232 | return amd_core_hw_config(event); |
233 | } | |
234 | ||
4dd4c2ae RR |
235 | static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc, |
236 | struct perf_event *event) | |
f22f54f4 | 237 | { |
f22f54f4 PZ |
238 | struct amd_nb *nb = cpuc->amd_nb; |
239 | int i; | |
240 | ||
f22f54f4 PZ |
241 | /* |
242 | * need to scan whole list because event may not have | |
243 | * been assigned during scheduling | |
244 | * | |
245 | * no race condition possible because event can only | |
246 | * be removed on one CPU at a time AND PMU is disabled | |
247 | * when we come here | |
248 | */ | |
948b1bb8 | 249 | for (i = 0; i < x86_pmu.num_counters; i++) { |
5f09fc68 | 250 | if (cmpxchg(nb->owners + i, event, NULL) == event) |
f22f54f4 | 251 | break; |
f22f54f4 PZ |
252 | } |
253 | } | |
254 | ||
255 | /* | |
256 | * AMD64 NorthBridge events need special treatment because | |
257 | * counter access needs to be synchronized across all cores | |
258 | * of a package. Refer to BKDG section 3.12 | |
259 | * | |
260 | * NB events are events measuring L3 cache, Hypertransport | |
261 | * traffic. They are identified by an event code >= 0xe00. | |
262 | * They measure events on the NorthBride which is shared | |
263 | * by all cores on a package. NB events are counted on a | |
264 | * shared set of counters. When a NB event is programmed | |
265 | * in a counter, the data actually comes from a shared | |
266 | * counter. Thus, access to those counters needs to be | |
267 | * synchronized. | |
268 | * | |
269 | * We implement the synchronization such that no two cores | |
270 | * can be measuring NB events using the same counters. Thus, | |
271 | * we maintain a per-NB allocation table. The available slot | |
272 | * is propagated using the event_constraint structure. | |
273 | * | |
274 | * We provide only one choice for each NB event based on | |
275 | * the fact that only NB events have restrictions. Consequently, | |
276 | * if a counter is available, there is a guarantee the NB event | |
277 | * will be assigned to it. If no slot is available, an empty | |
278 | * constraint is returned and scheduling will eventually fail | |
279 | * for this event. | |
280 | * | |
281 | * Note that all cores attached the same NB compete for the same | |
282 | * counters to host NB events, this is why we use atomic ops. Some | |
283 | * multi-chip CPUs may have more than one NB. | |
284 | * | |
285 | * Given that resources are allocated (cmpxchg), they must be | |
286 | * eventually freed for others to use. This is accomplished by | |
4dd4c2ae | 287 | * calling __amd_put_nb_event_constraints() |
f22f54f4 PZ |
288 | * |
289 | * Non NB events are not impacted by this restriction. | |
290 | */ | |
291 | static struct event_constraint * | |
4dd4c2ae RR |
292 | __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, |
293 | struct event_constraint *c) | |
f22f54f4 PZ |
294 | { |
295 | struct hw_perf_event *hwc = &event->hw; | |
296 | struct amd_nb *nb = cpuc->amd_nb; | |
2c53c3dd RR |
297 | struct perf_event *old; |
298 | int idx, new = -1; | |
f22f54f4 | 299 | |
e259514e JS |
300 | if (!c) |
301 | c = &unconstrained; | |
302 | ||
303 | if (cpuc->is_fake) | |
304 | return c; | |
305 | ||
f22f54f4 PZ |
306 | /* |
307 | * detect if already present, if so reuse | |
308 | * | |
309 | * cannot merge with actual allocation | |
310 | * because of possible holes | |
311 | * | |
312 | * event can already be present yet not assigned (in hwc->idx) | |
313 | * because of successive calls to x86_schedule_events() from | |
314 | * hw_perf_group_sched_in() without hw_perf_enable() | |
315 | */ | |
4dd4c2ae | 316 | for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) { |
2c53c3dd RR |
317 | if (new == -1 || hwc->idx == idx) |
318 | /* assign free slot, prefer hwc->idx */ | |
319 | old = cmpxchg(nb->owners + idx, NULL, event); | |
320 | else if (nb->owners[idx] == event) | |
321 | /* event already present */ | |
322 | old = event; | |
323 | else | |
324 | continue; | |
325 | ||
326 | if (old && old != event) | |
327 | continue; | |
328 | ||
329 | /* reassign to this slot */ | |
330 | if (new != -1) | |
331 | cmpxchg(nb->owners + new, event, NULL); | |
332 | new = idx; | |
f22f54f4 PZ |
333 | |
334 | /* already present, reuse */ | |
2c53c3dd | 335 | if (old == event) |
f22f54f4 | 336 | break; |
2c53c3dd RR |
337 | } |
338 | ||
339 | if (new == -1) | |
340 | return &emptyconstraint; | |
341 | ||
342 | return &nb->event_constraints[new]; | |
f22f54f4 PZ |
343 | } |
344 | ||
c079c791 | 345 | static struct amd_nb *amd_alloc_nb(int cpu) |
f22f54f4 PZ |
346 | { |
347 | struct amd_nb *nb; | |
348 | int i; | |
349 | ||
034c6efa PZ |
350 | nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO, |
351 | cpu_to_node(cpu)); | |
f22f54f4 PZ |
352 | if (!nb) |
353 | return NULL; | |
354 | ||
c079c791 | 355 | nb->nb_id = -1; |
f22f54f4 PZ |
356 | |
357 | /* | |
358 | * initialize all possible NB constraints | |
359 | */ | |
948b1bb8 | 360 | for (i = 0; i < x86_pmu.num_counters; i++) { |
34538ee7 | 361 | __set_bit(i, nb->event_constraints[i].idxmsk); |
f22f54f4 PZ |
362 | nb->event_constraints[i].weight = 1; |
363 | } | |
364 | return nb; | |
365 | } | |
366 | ||
b38b24ea PZ |
367 | static int amd_pmu_cpu_prepare(int cpu) |
368 | { | |
369 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | |
370 | ||
371 | WARN_ON_ONCE(cpuc->amd_nb); | |
372 | ||
373 | if (boot_cpu_data.x86_max_cores < 2) | |
374 | return NOTIFY_OK; | |
375 | ||
c079c791 | 376 | cpuc->amd_nb = amd_alloc_nb(cpu); |
b38b24ea PZ |
377 | if (!cpuc->amd_nb) |
378 | return NOTIFY_BAD; | |
379 | ||
380 | return NOTIFY_OK; | |
381 | } | |
382 | ||
383 | static void amd_pmu_cpu_starting(int cpu) | |
f22f54f4 | 384 | { |
b38b24ea PZ |
385 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
386 | struct amd_nb *nb; | |
f22f54f4 PZ |
387 | int i, nb_id; |
388 | ||
9f19010a | 389 | cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; |
1018faa6 | 390 | |
b1dc3c48 | 391 | if (boot_cpu_data.x86_max_cores < 2) |
f22f54f4 PZ |
392 | return; |
393 | ||
f22f54f4 | 394 | nb_id = amd_get_nb_id(cpu); |
b38b24ea | 395 | WARN_ON_ONCE(nb_id == BAD_APICID); |
f22f54f4 | 396 | |
f22f54f4 | 397 | for_each_online_cpu(i) { |
b38b24ea PZ |
398 | nb = per_cpu(cpu_hw_events, i).amd_nb; |
399 | if (WARN_ON_ONCE(!nb)) | |
f22f54f4 | 400 | continue; |
f22f54f4 | 401 | |
b38b24ea | 402 | if (nb->nb_id == nb_id) { |
7fdba1ca | 403 | cpuc->kfree_on_online = cpuc->amd_nb; |
b38b24ea PZ |
404 | cpuc->amd_nb = nb; |
405 | break; | |
406 | } | |
f22f54f4 | 407 | } |
b38b24ea PZ |
408 | |
409 | cpuc->amd_nb->nb_id = nb_id; | |
410 | cpuc->amd_nb->refcnt++; | |
f22f54f4 PZ |
411 | } |
412 | ||
b38b24ea | 413 | static void amd_pmu_cpu_dead(int cpu) |
f22f54f4 PZ |
414 | { |
415 | struct cpu_hw_events *cpuhw; | |
416 | ||
417 | if (boot_cpu_data.x86_max_cores < 2) | |
418 | return; | |
419 | ||
420 | cpuhw = &per_cpu(cpu_hw_events, cpu); | |
421 | ||
a90110c6 | 422 | if (cpuhw->amd_nb) { |
b38b24ea PZ |
423 | struct amd_nb *nb = cpuhw->amd_nb; |
424 | ||
425 | if (nb->nb_id == -1 || --nb->refcnt == 0) | |
426 | kfree(nb); | |
f22f54f4 | 427 | |
a90110c6 RW |
428 | cpuhw->amd_nb = NULL; |
429 | } | |
f22f54f4 PZ |
430 | } |
431 | ||
4dd4c2ae RR |
432 | static struct event_constraint * |
433 | amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | |
434 | { | |
435 | /* | |
436 | * if not NB event or no NB, then no constraints | |
437 | */ | |
438 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))) | |
439 | return &unconstrained; | |
440 | ||
0cf5f432 | 441 | return __amd_get_nb_event_constraints(cpuc, event, NULL); |
4dd4c2ae RR |
442 | } |
443 | ||
444 | static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | |
445 | struct perf_event *event) | |
446 | { | |
447 | if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)) | |
448 | __amd_put_nb_event_constraints(cpuc, event); | |
449 | } | |
450 | ||
641cc938 JO |
451 | PMU_FORMAT_ATTR(event, "config:0-7,32-35"); |
452 | PMU_FORMAT_ATTR(umask, "config:8-15" ); | |
453 | PMU_FORMAT_ATTR(edge, "config:18" ); | |
454 | PMU_FORMAT_ATTR(inv, "config:23" ); | |
455 | PMU_FORMAT_ATTR(cmask, "config:24-31" ); | |
456 | ||
457 | static struct attribute *amd_format_attr[] = { | |
458 | &format_attr_event.attr, | |
459 | &format_attr_umask.attr, | |
460 | &format_attr_edge.attr, | |
461 | &format_attr_inv.attr, | |
462 | &format_attr_cmask.attr, | |
463 | NULL, | |
464 | }; | |
465 | ||
4979d272 RR |
466 | /* AMD Family 15h */ |
467 | ||
468 | #define AMD_EVENT_TYPE_MASK 0x000000F0ULL | |
469 | ||
470 | #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL | |
471 | #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL | |
472 | #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL | |
473 | #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL | |
474 | #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL | |
475 | #define AMD_EVENT_EX_LS 0x000000C0ULL | |
476 | #define AMD_EVENT_DE 0x000000D0ULL | |
477 | #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL | |
478 | ||
479 | /* | |
480 | * AMD family 15h event code/PMC mappings: | |
481 | * | |
482 | * type = event_code & 0x0F0: | |
483 | * | |
484 | * 0x000 FP PERF_CTL[5:3] | |
485 | * 0x010 FP PERF_CTL[5:3] | |
486 | * 0x020 LS PERF_CTL[5:0] | |
487 | * 0x030 LS PERF_CTL[5:0] | |
488 | * 0x040 DC PERF_CTL[5:0] | |
489 | * 0x050 DC PERF_CTL[5:0] | |
490 | * 0x060 CU PERF_CTL[2:0] | |
491 | * 0x070 CU PERF_CTL[2:0] | |
492 | * 0x080 IC/DE PERF_CTL[2:0] | |
493 | * 0x090 IC/DE PERF_CTL[2:0] | |
494 | * 0x0A0 --- | |
495 | * 0x0B0 --- | |
496 | * 0x0C0 EX/LS PERF_CTL[5:0] | |
497 | * 0x0D0 DE PERF_CTL[2:0] | |
498 | * 0x0E0 NB NB_PERF_CTL[3:0] | |
499 | * 0x0F0 NB NB_PERF_CTL[3:0] | |
500 | * | |
501 | * Exceptions: | |
502 | * | |
855357a2 | 503 | * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*) |
4979d272 | 504 | * 0x003 FP PERF_CTL[3] |
855357a2 | 505 | * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*) |
4979d272 RR |
506 | * 0x00B FP PERF_CTL[3] |
507 | * 0x00D FP PERF_CTL[3] | |
508 | * 0x023 DE PERF_CTL[2:0] | |
509 | * 0x02D LS PERF_CTL[3] | |
510 | * 0x02E LS PERF_CTL[3,0] | |
5bcdf5e4 | 511 | * 0x031 LS PERF_CTL[2:0] (**) |
4979d272 RR |
512 | * 0x043 CU PERF_CTL[2:0] |
513 | * 0x045 CU PERF_CTL[2:0] | |
514 | * 0x046 CU PERF_CTL[2:0] | |
515 | * 0x054 CU PERF_CTL[2:0] | |
516 | * 0x055 CU PERF_CTL[2:0] | |
517 | * 0x08F IC PERF_CTL[0] | |
518 | * 0x187 DE PERF_CTL[0] | |
519 | * 0x188 DE PERF_CTL[0] | |
520 | * 0x0DB EX PERF_CTL[5:0] | |
521 | * 0x0DC LS PERF_CTL[5:0] | |
522 | * 0x0DD LS PERF_CTL[5:0] | |
523 | * 0x0DE LS PERF_CTL[5:0] | |
524 | * 0x0DF LS PERF_CTL[5:0] | |
5bcdf5e4 | 525 | * 0x1C0 EX PERF_CTL[5:3] |
4979d272 RR |
526 | * 0x1D6 EX PERF_CTL[5:0] |
527 | * 0x1D8 EX PERF_CTL[5:0] | |
855357a2 | 528 | * |
5bcdf5e4 RR |
529 | * (*) depending on the umask all FPU counters may be used |
530 | * (**) only one unitmask enabled at a time | |
4979d272 RR |
531 | */ |
532 | ||
533 | static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); | |
534 | static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0); | |
535 | static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0); | |
bc1738f6 | 536 | static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); |
4979d272 RR |
537 | static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); |
538 | static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); | |
539 | ||
540 | static struct event_constraint * | |
541 | amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) | |
542 | { | |
855357a2 RR |
543 | struct hw_perf_event *hwc = &event->hw; |
544 | unsigned int event_code = amd_get_event_code(hwc); | |
4979d272 RR |
545 | |
546 | switch (event_code & AMD_EVENT_TYPE_MASK) { | |
547 | case AMD_EVENT_FP: | |
548 | switch (event_code) { | |
855357a2 RR |
549 | case 0x000: |
550 | if (!(hwc->config & 0x0000F000ULL)) | |
551 | break; | |
552 | if (!(hwc->config & 0x00000F00ULL)) | |
553 | break; | |
554 | return &amd_f15_PMC3; | |
555 | case 0x004: | |
556 | if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1) | |
557 | break; | |
558 | return &amd_f15_PMC3; | |
4979d272 RR |
559 | case 0x003: |
560 | case 0x00B: | |
561 | case 0x00D: | |
562 | return &amd_f15_PMC3; | |
4979d272 | 563 | } |
855357a2 | 564 | return &amd_f15_PMC53; |
4979d272 RR |
565 | case AMD_EVENT_LS: |
566 | case AMD_EVENT_DC: | |
567 | case AMD_EVENT_EX_LS: | |
568 | switch (event_code) { | |
569 | case 0x023: | |
570 | case 0x043: | |
571 | case 0x045: | |
572 | case 0x046: | |
573 | case 0x054: | |
574 | case 0x055: | |
575 | return &amd_f15_PMC20; | |
576 | case 0x02D: | |
577 | return &amd_f15_PMC3; | |
578 | case 0x02E: | |
579 | return &amd_f15_PMC30; | |
5bcdf5e4 RR |
580 | case 0x031: |
581 | if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1) | |
582 | return &amd_f15_PMC20; | |
583 | return &emptyconstraint; | |
584 | case 0x1C0: | |
585 | return &amd_f15_PMC53; | |
4979d272 RR |
586 | default: |
587 | return &amd_f15_PMC50; | |
588 | } | |
589 | case AMD_EVENT_CU: | |
590 | case AMD_EVENT_IC_DE: | |
591 | case AMD_EVENT_DE: | |
592 | switch (event_code) { | |
593 | case 0x08F: | |
594 | case 0x187: | |
595 | case 0x188: | |
596 | return &amd_f15_PMC0; | |
597 | case 0x0DB ... 0x0DF: | |
598 | case 0x1D6: | |
599 | case 0x1D8: | |
600 | return &amd_f15_PMC50; | |
601 | default: | |
602 | return &amd_f15_PMC20; | |
603 | } | |
604 | case AMD_EVENT_NB: | |
0cf5f432 JS |
605 | /* moved to perf_event_amd_uncore.c */ |
606 | return &emptyconstraint; | |
4979d272 RR |
607 | default: |
608 | return &emptyconstraint; | |
609 | } | |
610 | } | |
611 | ||
0bf79d44 JO |
612 | static ssize_t amd_event_sysfs_show(char *page, u64 config) |
613 | { | |
614 | u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) | | |
615 | (config & AMD64_EVENTSEL_EVENT) >> 24; | |
616 | ||
617 | return x86_event_sysfs_show(page, config, event); | |
618 | } | |
619 | ||
b1dc3c48 RR |
620 | static __initconst const struct x86_pmu amd_pmu = { |
621 | .name = "AMD", | |
4979d272 RR |
622 | .handle_irq = x86_pmu_handle_irq, |
623 | .disable_all = x86_pmu_disable_all, | |
624 | .enable_all = x86_pmu_enable_all, | |
625 | .enable = x86_pmu_enable_event, | |
626 | .disable = x86_pmu_disable_event, | |
627 | .hw_config = amd_pmu_hw_config, | |
628 | .schedule_events = x86_schedule_events, | |
b1dc3c48 RR |
629 | .eventsel = MSR_K7_EVNTSEL0, |
630 | .perfctr = MSR_K7_PERFCTR0, | |
4c1fd17a | 631 | .addr_offset = amd_pmu_addr_offset, |
4979d272 RR |
632 | .event_map = amd_pmu_event_map, |
633 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), | |
b1dc3c48 | 634 | .num_counters = AMD64_NUM_COUNTERS, |
4979d272 RR |
635 | .cntval_bits = 48, |
636 | .cntval_mask = (1ULL << 48) - 1, | |
637 | .apic = 1, | |
638 | /* use highest bit to detect overflow */ | |
639 | .max_period = (1ULL << 47) - 1, | |
b1dc3c48 | 640 | .get_event_constraints = amd_get_event_constraints, |
4979d272 RR |
641 | .put_event_constraints = amd_put_event_constraints, |
642 | ||
b1dc3c48 | 643 | .format_attrs = amd_format_attr, |
0bf79d44 | 644 | .events_sysfs_show = amd_event_sysfs_show, |
b1dc3c48 | 645 | |
4979d272 | 646 | .cpu_prepare = amd_pmu_cpu_prepare, |
1018faa6 | 647 | .cpu_starting = amd_pmu_cpu_starting, |
b1dc3c48 | 648 | .cpu_dead = amd_pmu_cpu_dead, |
4979d272 RR |
649 | }; |
650 | ||
b1dc3c48 RR |
651 | static int setup_event_constraints(void) |
652 | { | |
e259514e | 653 | if (boot_cpu_data.x86 == 0x15) |
b1dc3c48 RR |
654 | x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; |
655 | return 0; | |
656 | } | |
657 | ||
658 | static int setup_perfctr_core(void) | |
659 | { | |
660 | if (!cpu_has_perfctr_core) { | |
661 | WARN(x86_pmu.get_event_constraints == amd_get_event_constraints_f15h, | |
662 | KERN_ERR "Odd, counter constraints enabled but no core perfctrs detected!"); | |
663 | return -ENODEV; | |
664 | } | |
665 | ||
666 | WARN(x86_pmu.get_event_constraints == amd_get_event_constraints, | |
667 | KERN_ERR "hw perf events core counters need constraints handler!"); | |
668 | ||
669 | /* | |
670 | * If core performance counter extensions exists, we must use | |
671 | * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also | |
672 | * x86_pmu_addr_offset(). | |
673 | */ | |
674 | x86_pmu.eventsel = MSR_F15H_PERF_CTL; | |
675 | x86_pmu.perfctr = MSR_F15H_PERF_CTR; | |
676 | x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE; | |
677 | ||
678 | printk(KERN_INFO "perf: AMD core performance counters detected\n"); | |
679 | ||
680 | return 0; | |
681 | } | |
682 | ||
de0428a7 | 683 | __init int amd_pmu_init(void) |
f22f54f4 PZ |
684 | { |
685 | /* Performance-monitoring supported from K7 and later: */ | |
686 | if (boot_cpu_data.x86 < 6) | |
687 | return -ENODEV; | |
688 | ||
b1dc3c48 RR |
689 | x86_pmu = amd_pmu; |
690 | ||
691 | setup_event_constraints(); | |
692 | setup_perfctr_core(); | |
f22f54f4 PZ |
693 | |
694 | /* Events are common for all AMDs */ | |
695 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, | |
696 | sizeof(hw_cache_event_ids)); | |
697 | ||
f22f54f4 PZ |
698 | return 0; |
699 | } | |
1018faa6 JR |
700 | |
701 | void amd_pmu_enable_virt(void) | |
702 | { | |
703 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
704 | ||
705 | cpuc->perf_ctr_virt_mask = 0; | |
706 | ||
707 | /* Reload all events */ | |
708 | x86_pmu_disable_all(); | |
709 | x86_pmu_enable_all(0); | |
710 | } | |
711 | EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); | |
712 | ||
713 | void amd_pmu_disable_virt(void) | |
714 | { | |
715 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
716 | ||
717 | /* | |
718 | * We only mask out the Host-only bit so that host-only counting works | |
719 | * when SVM is disabled. If someone sets up a guest-only counter when | |
720 | * SVM is disabled the Guest-only bits still gets set and the counter | |
721 | * will not count anything. | |
722 | */ | |
9f19010a | 723 | cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; |
1018faa6 JR |
724 | |
725 | /* Reload all events */ | |
726 | x86_pmu_disable_all(); | |
727 | x86_pmu_enable_all(0); | |
728 | } | |
729 | EXPORT_SYMBOL_GPL(amd_pmu_disable_virt); |