]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Performance events x86 architecture header | |
3 | * | |
4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | |
6 | * Copyright (C) 2009 Jaswinder Singh Rajput | |
7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | |
8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra | |
9 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> | |
10 | * Copyright (C) 2009 Google, Inc., Stephane Eranian | |
11 | * | |
12 | * For licencing details see kernel-base/COPYING | |
13 | */ | |
14 | ||
15 | #include <linux/perf_event.h> | |
16 | ||
17 | /* To enable MSR tracing please use the generic trace points. */ | |
18 | ||
19 | /* | |
20 | * | NHM/WSM | SNB | | |
21 | * register ------------------------------- | |
22 | * | HT | no HT | HT | no HT | | |
23 | *----------------------------------------- | |
24 | * offcore | core | core | cpu | core | | |
25 | * lbr_sel | core | core | cpu | core | | |
26 | * ld_lat | cpu | core | cpu | core | | |
27 | *----------------------------------------- | |
28 | * | |
29 | * Given that there is a small number of shared regs, | |
30 | * we can pre-allocate their slot in the per-cpu | |
31 | * per-core reg tables. | |
32 | */ | |
33 | enum extra_reg_type { | |
34 | EXTRA_REG_NONE = -1, /* not used */ | |
35 | ||
36 | EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ | |
37 | EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ | |
38 | EXTRA_REG_LBR = 2, /* lbr_select */ | |
39 | EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */ | |
40 | EXTRA_REG_FE = 4, /* fe_* */ | |
41 | ||
42 | EXTRA_REG_MAX /* number of entries needed */ | |
43 | }; | |
44 | ||
45 | struct event_constraint { | |
46 | union { | |
47 | unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | |
48 | u64 idxmsk64; | |
49 | }; | |
50 | u64 code; | |
51 | u64 cmask; | |
52 | int weight; | |
53 | int overlap; | |
54 | int flags; | |
55 | }; | |
56 | /* | |
57 | * struct hw_perf_event.flags flags | |
58 | */ | |
59 | #define PERF_X86_EVENT_PEBS_LDLAT 0x0001 /* ld+ldlat data address sampling */ | |
60 | #define PERF_X86_EVENT_PEBS_ST 0x0002 /* st data address sampling */ | |
61 | #define PERF_X86_EVENT_PEBS_ST_HSW 0x0004 /* haswell style datala, store */ | |
62 | #define PERF_X86_EVENT_COMMITTED 0x0008 /* event passed commit_txn */ | |
63 | #define PERF_X86_EVENT_PEBS_LD_HSW 0x0010 /* haswell style datala, load */ | |
64 | #define PERF_X86_EVENT_PEBS_NA_HSW 0x0020 /* haswell style datala, unknown */ | |
65 | #define PERF_X86_EVENT_EXCL 0x0040 /* HT exclusivity on counter */ | |
66 | #define PERF_X86_EVENT_DYNAMIC 0x0080 /* dynamic alloc'd constraint */ | |
67 | #define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */ | |
68 | #define PERF_X86_EVENT_EXCL_ACCT 0x0200 /* accounted EXCL event */ | |
69 | #define PERF_X86_EVENT_AUTO_RELOAD 0x0400 /* use PEBS auto-reload */ | |
70 | #define PERF_X86_EVENT_FREERUNNING 0x0800 /* use freerunning PEBS */ | |
71 | ||
72 | ||
73 | struct amd_nb { | |
74 | int nb_id; /* NorthBridge id */ | |
75 | int refcnt; /* reference count */ | |
76 | struct perf_event *owners[X86_PMC_IDX_MAX]; | |
77 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; | |
78 | }; | |
79 | ||
80 | /* The maximal number of PEBS events: */ | |
81 | #define MAX_PEBS_EVENTS 8 | |
82 | ||
83 | /* | |
84 | * Flags PEBS can handle without an PMI. | |
85 | * | |
86 | * TID can only be handled by flushing at context switch. | |
87 | * | |
88 | */ | |
89 | #define PEBS_FREERUNNING_FLAGS \ | |
90 | (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \ | |
91 | PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \ | |
92 | PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \ | |
93 | PERF_SAMPLE_TRANSACTION) | |
94 | ||
95 | /* | |
96 | * A debug store configuration. | |
97 | * | |
98 | * We only support architectures that use 64bit fields. | |
99 | */ | |
100 | struct debug_store { | |
101 | u64 bts_buffer_base; | |
102 | u64 bts_index; | |
103 | u64 bts_absolute_maximum; | |
104 | u64 bts_interrupt_threshold; | |
105 | u64 pebs_buffer_base; | |
106 | u64 pebs_index; | |
107 | u64 pebs_absolute_maximum; | |
108 | u64 pebs_interrupt_threshold; | |
109 | u64 pebs_event_reset[MAX_PEBS_EVENTS]; | |
110 | }; | |
111 | ||
112 | /* | |
113 | * Per register state. | |
114 | */ | |
115 | struct er_account { | |
116 | raw_spinlock_t lock; /* per-core: protect structure */ | |
117 | u64 config; /* extra MSR config */ | |
118 | u64 reg; /* extra MSR number */ | |
119 | atomic_t ref; /* reference count */ | |
120 | }; | |
121 | ||
122 | /* | |
123 | * Per core/cpu state | |
124 | * | |
125 | * Used to coordinate shared registers between HT threads or | |
126 | * among events on a single PMU. | |
127 | */ | |
128 | struct intel_shared_regs { | |
129 | struct er_account regs[EXTRA_REG_MAX]; | |
130 | int refcnt; /* per-core: #HT threads */ | |
131 | unsigned core_id; /* per-core: core id */ | |
132 | }; | |
133 | ||
134 | enum intel_excl_state_type { | |
135 | INTEL_EXCL_UNUSED = 0, /* counter is unused */ | |
136 | INTEL_EXCL_SHARED = 1, /* counter can be used by both threads */ | |
137 | INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */ | |
138 | }; | |
139 | ||
140 | struct intel_excl_states { | |
141 | enum intel_excl_state_type state[X86_PMC_IDX_MAX]; | |
142 | bool sched_started; /* true if scheduling has started */ | |
143 | }; | |
144 | ||
145 | struct intel_excl_cntrs { | |
146 | raw_spinlock_t lock; | |
147 | ||
148 | struct intel_excl_states states[2]; | |
149 | ||
150 | union { | |
151 | u16 has_exclusive[2]; | |
152 | u32 exclusive_present; | |
153 | }; | |
154 | ||
155 | int refcnt; /* per-core: #HT threads */ | |
156 | unsigned core_id; /* per-core: core id */ | |
157 | }; | |
158 | ||
159 | #define MAX_LBR_ENTRIES 32 | |
160 | ||
161 | enum { | |
162 | X86_PERF_KFREE_SHARED = 0, | |
163 | X86_PERF_KFREE_EXCL = 1, | |
164 | X86_PERF_KFREE_MAX | |
165 | }; | |
166 | ||
167 | struct cpu_hw_events { | |
168 | /* | |
169 | * Generic x86 PMC bits | |
170 | */ | |
171 | struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ | |
172 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | |
173 | unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | |
174 | int enabled; | |
175 | ||
176 | int n_events; /* the # of events in the below arrays */ | |
177 | int n_added; /* the # last events in the below arrays; | |
178 | they've never been enabled yet */ | |
179 | int n_txn; /* the # last events in the below arrays; | |
180 | added in the current transaction */ | |
181 | int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ | |
182 | u64 tags[X86_PMC_IDX_MAX]; | |
183 | ||
184 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ | |
185 | struct event_constraint *event_constraint[X86_PMC_IDX_MAX]; | |
186 | ||
187 | int n_excl; /* the number of exclusive events */ | |
188 | ||
189 | unsigned int txn_flags; | |
190 | int is_fake; | |
191 | ||
192 | /* | |
193 | * Intel DebugStore bits | |
194 | */ | |
195 | struct debug_store *ds; | |
196 | u64 pebs_enabled; | |
197 | ||
198 | /* | |
199 | * Intel LBR bits | |
200 | */ | |
201 | int lbr_users; | |
202 | void *lbr_context; | |
203 | struct perf_branch_stack lbr_stack; | |
204 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; | |
205 | struct er_account *lbr_sel; | |
206 | u64 br_sel; | |
207 | ||
208 | /* | |
209 | * Intel host/guest exclude bits | |
210 | */ | |
211 | u64 intel_ctrl_guest_mask; | |
212 | u64 intel_ctrl_host_mask; | |
213 | struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX]; | |
214 | ||
215 | /* | |
216 | * Intel checkpoint mask | |
217 | */ | |
218 | u64 intel_cp_status; | |
219 | ||
220 | /* | |
221 | * manage shared (per-core, per-cpu) registers | |
222 | * used on Intel NHM/WSM/SNB | |
223 | */ | |
224 | struct intel_shared_regs *shared_regs; | |
225 | /* | |
226 | * manage exclusive counter access between hyperthread | |
227 | */ | |
228 | struct event_constraint *constraint_list; /* in enable order */ | |
229 | struct intel_excl_cntrs *excl_cntrs; | |
230 | int excl_thread_id; /* 0 or 1 */ | |
231 | ||
232 | /* | |
233 | * AMD specific bits | |
234 | */ | |
235 | struct amd_nb *amd_nb; | |
236 | /* Inverted mask of bits to clear in the perf_ctr ctrl registers */ | |
237 | u64 perf_ctr_virt_mask; | |
238 | ||
239 | void *kfree_on_online[X86_PERF_KFREE_MAX]; | |
240 | }; | |
241 | ||
242 | #define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\ | |
243 | { .idxmsk64 = (n) }, \ | |
244 | .code = (c), \ | |
245 | .cmask = (m), \ | |
246 | .weight = (w), \ | |
247 | .overlap = (o), \ | |
248 | .flags = f, \ | |
249 | } | |
250 | ||
251 | #define EVENT_CONSTRAINT(c, n, m) \ | |
252 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0) | |
253 | ||
254 | #define INTEL_EXCLEVT_CONSTRAINT(c, n) \ | |
255 | __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\ | |
256 | 0, PERF_X86_EVENT_EXCL) | |
257 | ||
258 | /* | |
259 | * The overlap flag marks event constraints with overlapping counter | |
260 | * masks. This is the case if the counter mask of such an event is not | |
261 | * a subset of any other counter mask of a constraint with an equal or | |
262 | * higher weight, e.g.: | |
263 | * | |
264 | * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); | |
265 | * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0); | |
266 | * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0); | |
267 | * | |
268 | * The event scheduler may not select the correct counter in the first | |
269 | * cycle because it needs to know which subsequent events will be | |
270 | * scheduled. It may fail to schedule the events then. So we set the | |
271 | * overlap flag for such constraints to give the scheduler a hint which | |
272 | * events to select for counter rescheduling. | |
273 | * | |
274 | * Care must be taken as the rescheduling algorithm is O(n!) which | |
275 | * will increase scheduling cycles for an over-committed system | |
276 | * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros | |
277 | * and its counter masks must be kept at a minimum. | |
278 | */ | |
279 | #define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ | |
280 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0) | |
281 | ||
282 | /* | |
283 | * Constraint on the Event code. | |
284 | */ | |
285 | #define INTEL_EVENT_CONSTRAINT(c, n) \ | |
286 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT) | |
287 | ||
288 | /* | |
289 | * Constraint on the Event code + UMask + fixed-mask | |
290 | * | |
291 | * filter mask to validate fixed counter events. | |
292 | * the following filters disqualify for fixed counters: | |
293 | * - inv | |
294 | * - edge | |
295 | * - cnt-mask | |
296 | * - in_tx | |
297 | * - in_tx_checkpointed | |
298 | * The other filters are supported by fixed counters. | |
299 | * The any-thread option is supported starting with v3. | |
300 | */ | |
301 | #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED) | |
302 | #define FIXED_EVENT_CONSTRAINT(c, n) \ | |
303 | EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS) | |
304 | ||
305 | /* | |
306 | * Constraint on the Event code + UMask | |
307 | */ | |
308 | #define INTEL_UEVENT_CONSTRAINT(c, n) \ | |
309 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) | |
310 | ||
311 | /* Constraint on specific umask bit only + event */ | |
312 | #define INTEL_UBIT_EVENT_CONSTRAINT(c, n) \ | |
313 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c)) | |
314 | ||
315 | /* Like UEVENT_CONSTRAINT, but match flags too */ | |
316 | #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \ | |
317 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) | |
318 | ||
319 | #define INTEL_EXCLUEVT_CONSTRAINT(c, n) \ | |
320 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ | |
321 | HWEIGHT(n), 0, PERF_X86_EVENT_EXCL) | |
322 | ||
323 | #define INTEL_PLD_CONSTRAINT(c, n) \ | |
324 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | |
325 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT) | |
326 | ||
327 | #define INTEL_PST_CONSTRAINT(c, n) \ | |
328 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | |
329 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST) | |
330 | ||
331 | /* Event constraint, but match on all event flags too. */ | |
332 | #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \ | |
333 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) | |
334 | ||
335 | /* Check only flags, but allow all event/umask */ | |
336 | #define INTEL_ALL_EVENT_CONSTRAINT(code, n) \ | |
337 | EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS) | |
338 | ||
339 | /* Check flags and event code, and set the HSW store flag */ | |
340 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \ | |
341 | __EVENT_CONSTRAINT(code, n, \ | |
342 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ | |
343 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) | |
344 | ||
345 | /* Check flags and event code, and set the HSW load flag */ | |
346 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \ | |
347 | __EVENT_CONSTRAINT(code, n, \ | |
348 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ | |
349 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) | |
350 | ||
351 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \ | |
352 | __EVENT_CONSTRAINT(code, n, \ | |
353 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ | |
354 | HWEIGHT(n), 0, \ | |
355 | PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL) | |
356 | ||
357 | /* Check flags and event code/umask, and set the HSW store flag */ | |
358 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \ | |
359 | __EVENT_CONSTRAINT(code, n, \ | |
360 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | |
361 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) | |
362 | ||
363 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \ | |
364 | __EVENT_CONSTRAINT(code, n, \ | |
365 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | |
366 | HWEIGHT(n), 0, \ | |
367 | PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL) | |
368 | ||
369 | /* Check flags and event code/umask, and set the HSW load flag */ | |
370 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \ | |
371 | __EVENT_CONSTRAINT(code, n, \ | |
372 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | |
373 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) | |
374 | ||
375 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \ | |
376 | __EVENT_CONSTRAINT(code, n, \ | |
377 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | |
378 | HWEIGHT(n), 0, \ | |
379 | PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL) | |
380 | ||
381 | /* Check flags and event code/umask, and set the HSW N/A flag */ | |
382 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \ | |
383 | __EVENT_CONSTRAINT(code, n, \ | |
384 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | |
385 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW) | |
386 | ||
387 | ||
388 | /* | |
389 | * We define the end marker as having a weight of -1 | |
390 | * to enable blacklisting of events using a counter bitmask | |
391 | * of zero and thus a weight of zero. | |
392 | * The end marker has a weight that cannot possibly be | |
393 | * obtained from counting the bits in the bitmask. | |
394 | */ | |
395 | #define EVENT_CONSTRAINT_END { .weight = -1 } | |
396 | ||
397 | /* | |
398 | * Check for end marker with weight == -1 | |
399 | */ | |
400 | #define for_each_event_constraint(e, c) \ | |
401 | for ((e) = (c); (e)->weight != -1; (e)++) | |
402 | ||
403 | /* | |
404 | * Extra registers for specific events. | |
405 | * | |
406 | * Some events need large masks and require external MSRs. | |
407 | * Those extra MSRs end up being shared for all events on | |
408 | * a PMU and sometimes between PMU of sibling HT threads. | |
409 | * In either case, the kernel needs to handle conflicting | |
410 | * accesses to those extra, shared, regs. The data structure | |
411 | * to manage those registers is stored in cpu_hw_event. | |
412 | */ | |
413 | struct extra_reg { | |
414 | unsigned int event; | |
415 | unsigned int msr; | |
416 | u64 config_mask; | |
417 | u64 valid_mask; | |
418 | int idx; /* per_xxx->regs[] reg index */ | |
419 | bool extra_msr_access; | |
420 | }; | |
421 | ||
422 | #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ | |
423 | .event = (e), \ | |
424 | .msr = (ms), \ | |
425 | .config_mask = (m), \ | |
426 | .valid_mask = (vm), \ | |
427 | .idx = EXTRA_REG_##i, \ | |
428 | .extra_msr_access = true, \ | |
429 | } | |
430 | ||
431 | #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ | |
432 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) | |
433 | ||
434 | #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \ | |
435 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \ | |
436 | ARCH_PERFMON_EVENTSEL_UMASK, vm, idx) | |
437 | ||
438 | #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \ | |
439 | INTEL_UEVENT_EXTRA_REG(c, \ | |
440 | MSR_PEBS_LD_LAT_THRESHOLD, \ | |
441 | 0xffff, \ | |
442 | LDLAT) | |
443 | ||
444 | #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) | |
445 | ||
446 | union perf_capabilities { | |
447 | struct { | |
448 | u64 lbr_format:6; | |
449 | u64 pebs_trap:1; | |
450 | u64 pebs_arch_reg:1; | |
451 | u64 pebs_format:4; | |
452 | u64 smm_freeze:1; | |
453 | /* | |
454 | * PMU supports separate counter range for writing | |
455 | * values > 32bit. | |
456 | */ | |
457 | u64 full_width_write:1; | |
458 | }; | |
459 | u64 capabilities; | |
460 | }; | |
461 | ||
462 | struct x86_pmu_quirk { | |
463 | struct x86_pmu_quirk *next; | |
464 | void (*func)(void); | |
465 | }; | |
466 | ||
467 | union x86_pmu_config { | |
468 | struct { | |
469 | u64 event:8, | |
470 | umask:8, | |
471 | usr:1, | |
472 | os:1, | |
473 | edge:1, | |
474 | pc:1, | |
475 | interrupt:1, | |
476 | __reserved1:1, | |
477 | en:1, | |
478 | inv:1, | |
479 | cmask:8, | |
480 | event2:4, | |
481 | __reserved2:4, | |
482 | go:1, | |
483 | ho:1; | |
484 | } bits; | |
485 | u64 value; | |
486 | }; | |
487 | ||
488 | #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value | |
489 | ||
490 | enum { | |
491 | x86_lbr_exclusive_lbr, | |
492 | x86_lbr_exclusive_bts, | |
493 | x86_lbr_exclusive_pt, | |
494 | x86_lbr_exclusive_max, | |
495 | }; | |
496 | ||
497 | /* | |
498 | * struct x86_pmu - generic x86 pmu | |
499 | */ | |
500 | struct x86_pmu { | |
501 | /* | |
502 | * Generic x86 PMC bits | |
503 | */ | |
504 | const char *name; | |
505 | int version; | |
506 | int (*handle_irq)(struct pt_regs *); | |
507 | void (*disable_all)(void); | |
508 | void (*enable_all)(int added); | |
509 | void (*enable)(struct perf_event *); | |
510 | void (*disable)(struct perf_event *); | |
511 | int (*hw_config)(struct perf_event *event); | |
512 | int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); | |
513 | unsigned eventsel; | |
514 | unsigned perfctr; | |
515 | int (*addr_offset)(int index, bool eventsel); | |
516 | int (*rdpmc_index)(int index); | |
517 | u64 (*event_map)(int); | |
518 | int max_events; | |
519 | int num_counters; | |
520 | int num_counters_fixed; | |
521 | int cntval_bits; | |
522 | u64 cntval_mask; | |
523 | union { | |
524 | unsigned long events_maskl; | |
525 | unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)]; | |
526 | }; | |
527 | int events_mask_len; | |
528 | int apic; | |
529 | u64 max_period; | |
530 | struct event_constraint * | |
531 | (*get_event_constraints)(struct cpu_hw_events *cpuc, | |
532 | int idx, | |
533 | struct perf_event *event); | |
534 | ||
535 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, | |
536 | struct perf_event *event); | |
537 | ||
538 | void (*start_scheduling)(struct cpu_hw_events *cpuc); | |
539 | ||
540 | void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr); | |
541 | ||
542 | void (*stop_scheduling)(struct cpu_hw_events *cpuc); | |
543 | ||
544 | struct event_constraint *event_constraints; | |
545 | struct x86_pmu_quirk *quirks; | |
546 | int perfctr_second_write; | |
547 | bool late_ack; | |
548 | unsigned (*limit_period)(struct perf_event *event, unsigned l); | |
549 | ||
550 | /* | |
551 | * sysfs attrs | |
552 | */ | |
553 | int attr_rdpmc_broken; | |
554 | int attr_rdpmc; | |
555 | struct attribute **format_attrs; | |
556 | struct attribute **event_attrs; | |
557 | ||
558 | ssize_t (*events_sysfs_show)(char *page, u64 config); | |
559 | struct attribute **cpu_events; | |
560 | ||
561 | /* | |
562 | * CPU Hotplug hooks | |
563 | */ | |
564 | int (*cpu_prepare)(int cpu); | |
565 | void (*cpu_starting)(int cpu); | |
566 | void (*cpu_dying)(int cpu); | |
567 | void (*cpu_dead)(int cpu); | |
568 | ||
569 | void (*check_microcode)(void); | |
570 | void (*sched_task)(struct perf_event_context *ctx, | |
571 | bool sched_in); | |
572 | ||
573 | /* | |
574 | * Intel Arch Perfmon v2+ | |
575 | */ | |
576 | u64 intel_ctrl; | |
577 | union perf_capabilities intel_cap; | |
578 | ||
579 | /* | |
580 | * Intel DebugStore bits | |
581 | */ | |
582 | unsigned int bts :1, | |
583 | bts_active :1, | |
584 | pebs :1, | |
585 | pebs_active :1, | |
586 | pebs_broken :1, | |
587 | pebs_prec_dist :1; | |
588 | int pebs_record_size; | |
589 | int pebs_buffer_size; | |
590 | void (*drain_pebs)(struct pt_regs *regs); | |
591 | struct event_constraint *pebs_constraints; | |
592 | void (*pebs_aliases)(struct perf_event *event); | |
593 | int max_pebs_events; | |
594 | unsigned long free_running_flags; | |
595 | ||
596 | /* | |
597 | * Intel LBR | |
598 | */ | |
599 | unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ | |
600 | int lbr_nr; /* hardware stack size */ | |
601 | u64 lbr_sel_mask; /* LBR_SELECT valid bits */ | |
602 | const int *lbr_sel_map; /* lbr_select mappings */ | |
603 | bool lbr_double_abort; /* duplicated lbr aborts */ | |
604 | ||
605 | /* | |
606 | * Intel PT/LBR/BTS are exclusive | |
607 | */ | |
608 | atomic_t lbr_exclusive[x86_lbr_exclusive_max]; | |
609 | ||
610 | /* | |
611 | * AMD bits | |
612 | */ | |
613 | unsigned int amd_nb_constraints : 1; | |
614 | ||
615 | /* | |
616 | * Extra registers for events | |
617 | */ | |
618 | struct extra_reg *extra_regs; | |
619 | unsigned int flags; | |
620 | ||
621 | /* | |
622 | * Intel host/guest support (KVM) | |
623 | */ | |
624 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); | |
625 | }; | |
626 | ||
627 | struct x86_perf_task_context { | |
628 | u64 lbr_from[MAX_LBR_ENTRIES]; | |
629 | u64 lbr_to[MAX_LBR_ENTRIES]; | |
630 | u64 lbr_info[MAX_LBR_ENTRIES]; | |
631 | int tos; | |
632 | int lbr_callstack_users; | |
633 | int lbr_stack_state; | |
634 | }; | |
635 | ||
636 | #define x86_add_quirk(func_) \ | |
637 | do { \ | |
638 | static struct x86_pmu_quirk __quirk __initdata = { \ | |
639 | .func = func_, \ | |
640 | }; \ | |
641 | __quirk.next = x86_pmu.quirks; \ | |
642 | x86_pmu.quirks = &__quirk; \ | |
643 | } while (0) | |
644 | ||
645 | /* | |
646 | * x86_pmu flags | |
647 | */ | |
648 | #define PMU_FL_NO_HT_SHARING 0x1 /* no hyper-threading resource sharing */ | |
649 | #define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */ | |
650 | #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */ | |
651 | #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */ | |
652 | ||
653 | #define EVENT_VAR(_id) event_attr_##_id | |
654 | #define EVENT_PTR(_id) &event_attr_##_id.attr.attr | |
655 | ||
656 | #define EVENT_ATTR(_name, _id) \ | |
657 | static struct perf_pmu_events_attr EVENT_VAR(_id) = { \ | |
658 | .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ | |
659 | .id = PERF_COUNT_HW_##_id, \ | |
660 | .event_str = NULL, \ | |
661 | }; | |
662 | ||
663 | #define EVENT_ATTR_STR(_name, v, str) \ | |
664 | static struct perf_pmu_events_attr event_attr_##v = { \ | |
665 | .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ | |
666 | .id = 0, \ | |
667 | .event_str = str, \ | |
668 | }; | |
669 | ||
670 | extern struct x86_pmu x86_pmu __read_mostly; | |
671 | ||
672 | static inline bool x86_pmu_has_lbr_callstack(void) | |
673 | { | |
674 | return x86_pmu.lbr_sel_map && | |
675 | x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0; | |
676 | } | |
677 | ||
678 | DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | |
679 | ||
680 | int x86_perf_event_set_period(struct perf_event *event); | |
681 | ||
682 | /* | |
683 | * Generalized hw caching related hw_event table, filled | |
684 | * in on a per model basis. A value of 0 means | |
685 | * 'not supported', -1 means 'hw_event makes no sense on | |
686 | * this CPU', any other value means the raw hw_event | |
687 | * ID. | |
688 | */ | |
689 | ||
690 | #define C(x) PERF_COUNT_HW_CACHE_##x | |
691 | ||
692 | extern u64 __read_mostly hw_cache_event_ids | |
693 | [PERF_COUNT_HW_CACHE_MAX] | |
694 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
695 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | |
696 | extern u64 __read_mostly hw_cache_extra_regs | |
697 | [PERF_COUNT_HW_CACHE_MAX] | |
698 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
699 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | |
700 | ||
701 | u64 x86_perf_event_update(struct perf_event *event); | |
702 | ||
703 | static inline unsigned int x86_pmu_config_addr(int index) | |
704 | { | |
705 | return x86_pmu.eventsel + (x86_pmu.addr_offset ? | |
706 | x86_pmu.addr_offset(index, true) : index); | |
707 | } | |
708 | ||
709 | static inline unsigned int x86_pmu_event_addr(int index) | |
710 | { | |
711 | return x86_pmu.perfctr + (x86_pmu.addr_offset ? | |
712 | x86_pmu.addr_offset(index, false) : index); | |
713 | } | |
714 | ||
715 | static inline int x86_pmu_rdpmc_index(int index) | |
716 | { | |
717 | return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index; | |
718 | } | |
719 | ||
720 | int x86_add_exclusive(unsigned int what); | |
721 | ||
722 | void x86_del_exclusive(unsigned int what); | |
723 | ||
724 | int x86_reserve_hardware(void); | |
725 | ||
726 | void x86_release_hardware(void); | |
727 | ||
728 | void hw_perf_lbr_event_destroy(struct perf_event *event); | |
729 | ||
730 | int x86_setup_perfctr(struct perf_event *event); | |
731 | ||
732 | int x86_pmu_hw_config(struct perf_event *event); | |
733 | ||
734 | void x86_pmu_disable_all(void); | |
735 | ||
736 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | |
737 | u64 enable_mask) | |
738 | { | |
739 | u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); | |
740 | ||
741 | if (hwc->extra_reg.reg) | |
742 | wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); | |
743 | wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); | |
744 | } | |
745 | ||
746 | void x86_pmu_enable_all(int added); | |
747 | ||
748 | int perf_assign_events(struct event_constraint **constraints, int n, | |
749 | int wmin, int wmax, int gpmax, int *assign); | |
750 | int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); | |
751 | ||
752 | void x86_pmu_stop(struct perf_event *event, int flags); | |
753 | ||
754 | static inline void x86_pmu_disable_event(struct perf_event *event) | |
755 | { | |
756 | struct hw_perf_event *hwc = &event->hw; | |
757 | ||
758 | wrmsrl(hwc->config_base, hwc->config); | |
759 | } | |
760 | ||
761 | void x86_pmu_enable_event(struct perf_event *event); | |
762 | ||
763 | int x86_pmu_handle_irq(struct pt_regs *regs); | |
764 | ||
765 | extern struct event_constraint emptyconstraint; | |
766 | ||
767 | extern struct event_constraint unconstrained; | |
768 | ||
769 | static inline bool kernel_ip(unsigned long ip) | |
770 | { | |
771 | #ifdef CONFIG_X86_32 | |
772 | return ip > PAGE_OFFSET; | |
773 | #else | |
774 | return (long)ip < 0; | |
775 | #endif | |
776 | } | |
777 | ||
778 | /* | |
779 | * Not all PMUs provide the right context information to place the reported IP | |
780 | * into full context. Specifically segment registers are typically not | |
781 | * supplied. | |
782 | * | |
783 | * Assuming the address is a linear address (it is for IBS), we fake the CS and | |
784 | * vm86 mode using the known zero-based code segment and 'fix up' the registers | |
785 | * to reflect this. | |
786 | * | |
787 | * Intel PEBS/LBR appear to typically provide the effective address, nothing | |
788 | * much we can do about that but pray and treat it like a linear address. | |
789 | */ | |
790 | static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip) | |
791 | { | |
792 | regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS; | |
793 | if (regs->flags & X86_VM_MASK) | |
794 | regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK); | |
795 | regs->ip = ip; | |
796 | } | |
797 | ||
798 | ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event); | |
799 | ssize_t intel_event_sysfs_show(char *page, u64 config); | |
800 | ||
801 | struct attribute **merge_attr(struct attribute **a, struct attribute **b); | |
802 | ||
803 | ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, | |
804 | char *page); | |
805 | ||
806 | #ifdef CONFIG_CPU_SUP_AMD | |
807 | ||
808 | int amd_pmu_init(void); | |
809 | ||
810 | #else /* CONFIG_CPU_SUP_AMD */ | |
811 | ||
812 | static inline int amd_pmu_init(void) | |
813 | { | |
814 | return 0; | |
815 | } | |
816 | ||
817 | #endif /* CONFIG_CPU_SUP_AMD */ | |
818 | ||
819 | #ifdef CONFIG_CPU_SUP_INTEL | |
820 | ||
821 | static inline bool intel_pmu_has_bts(struct perf_event *event) | |
822 | { | |
823 | if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS && | |
824 | !event->attr.freq && event->hw.sample_period == 1) | |
825 | return true; | |
826 | ||
827 | return false; | |
828 | } | |
829 | ||
830 | int intel_pmu_save_and_restart(struct perf_event *event); | |
831 | ||
832 | struct event_constraint * | |
833 | x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, | |
834 | struct perf_event *event); | |
835 | ||
836 | struct intel_shared_regs *allocate_shared_regs(int cpu); | |
837 | ||
838 | int intel_pmu_init(void); | |
839 | ||
840 | void init_debug_store_on_cpu(int cpu); | |
841 | ||
842 | void fini_debug_store_on_cpu(int cpu); | |
843 | ||
844 | void release_ds_buffers(void); | |
845 | ||
846 | void reserve_ds_buffers(void); | |
847 | ||
848 | extern struct event_constraint bts_constraint; | |
849 | ||
850 | void intel_pmu_enable_bts(u64 config); | |
851 | ||
852 | void intel_pmu_disable_bts(void); | |
853 | ||
854 | int intel_pmu_drain_bts_buffer(void); | |
855 | ||
856 | extern struct event_constraint intel_core2_pebs_event_constraints[]; | |
857 | ||
858 | extern struct event_constraint intel_atom_pebs_event_constraints[]; | |
859 | ||
860 | extern struct event_constraint intel_slm_pebs_event_constraints[]; | |
861 | ||
862 | extern struct event_constraint intel_glm_pebs_event_constraints[]; | |
863 | ||
864 | extern struct event_constraint intel_nehalem_pebs_event_constraints[]; | |
865 | ||
866 | extern struct event_constraint intel_westmere_pebs_event_constraints[]; | |
867 | ||
868 | extern struct event_constraint intel_snb_pebs_event_constraints[]; | |
869 | ||
870 | extern struct event_constraint intel_ivb_pebs_event_constraints[]; | |
871 | ||
872 | extern struct event_constraint intel_hsw_pebs_event_constraints[]; | |
873 | ||
874 | extern struct event_constraint intel_bdw_pebs_event_constraints[]; | |
875 | ||
876 | extern struct event_constraint intel_skl_pebs_event_constraints[]; | |
877 | ||
878 | struct event_constraint *intel_pebs_constraints(struct perf_event *event); | |
879 | ||
880 | void intel_pmu_pebs_enable(struct perf_event *event); | |
881 | ||
882 | void intel_pmu_pebs_disable(struct perf_event *event); | |
883 | ||
884 | void intel_pmu_pebs_enable_all(void); | |
885 | ||
886 | void intel_pmu_pebs_disable_all(void); | |
887 | ||
888 | void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in); | |
889 | ||
890 | void intel_ds_init(void); | |
891 | ||
892 | void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in); | |
893 | ||
894 | void intel_pmu_lbr_reset(void); | |
895 | ||
896 | void intel_pmu_lbr_enable(struct perf_event *event); | |
897 | ||
898 | void intel_pmu_lbr_disable(struct perf_event *event); | |
899 | ||
900 | void intel_pmu_lbr_enable_all(bool pmi); | |
901 | ||
902 | void intel_pmu_lbr_disable_all(void); | |
903 | ||
904 | void intel_pmu_lbr_read(void); | |
905 | ||
906 | void intel_pmu_lbr_init_core(void); | |
907 | ||
908 | void intel_pmu_lbr_init_nhm(void); | |
909 | ||
910 | void intel_pmu_lbr_init_atom(void); | |
911 | ||
912 | void intel_pmu_lbr_init_slm(void); | |
913 | ||
914 | void intel_pmu_lbr_init_snb(void); | |
915 | ||
916 | void intel_pmu_lbr_init_hsw(void); | |
917 | ||
918 | void intel_pmu_lbr_init_skl(void); | |
919 | ||
920 | void intel_pmu_lbr_init_knl(void); | |
921 | ||
922 | void intel_pmu_pebs_data_source_nhm(void); | |
923 | ||
924 | int intel_pmu_setup_lbr_filter(struct perf_event *event); | |
925 | ||
926 | void intel_pt_interrupt(void); | |
927 | ||
928 | int intel_bts_interrupt(void); | |
929 | ||
930 | void intel_bts_enable_local(void); | |
931 | ||
932 | void intel_bts_disable_local(void); | |
933 | ||
934 | int p4_pmu_init(void); | |
935 | ||
936 | int p6_pmu_init(void); | |
937 | ||
938 | int knc_pmu_init(void); | |
939 | ||
940 | static inline int is_ht_workaround_enabled(void) | |
941 | { | |
942 | return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED); | |
943 | } | |
944 | ||
945 | #else /* CONFIG_CPU_SUP_INTEL */ | |
946 | ||
947 | static inline void reserve_ds_buffers(void) | |
948 | { | |
949 | } | |
950 | ||
951 | static inline void release_ds_buffers(void) | |
952 | { | |
953 | } | |
954 | ||
955 | static inline int intel_pmu_init(void) | |
956 | { | |
957 | return 0; | |
958 | } | |
959 | ||
960 | static inline struct intel_shared_regs *allocate_shared_regs(int cpu) | |
961 | { | |
962 | return NULL; | |
963 | } | |
964 | ||
965 | static inline int is_ht_workaround_enabled(void) | |
966 | { | |
967 | return 0; | |
968 | } | |
969 | #endif /* CONFIG_CPU_SUP_INTEL */ |