]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - arch/x86/events/intel/lbr.c
Input: wm97xx: add new AC97 bus support
[mirror_ubuntu-focal-kernel.git] / arch / x86 / events / intel / lbr.c
1 #include <linux/perf_event.h>
2 #include <linux/types.h>
3
4 #include <asm/perf_event.h>
5 #include <asm/msr.h>
6 #include <asm/insn.h>
7
8 #include "../perf_event.h"
9
10 enum {
11 LBR_FORMAT_32 = 0x00,
12 LBR_FORMAT_LIP = 0x01,
13 LBR_FORMAT_EIP = 0x02,
14 LBR_FORMAT_EIP_FLAGS = 0x03,
15 LBR_FORMAT_EIP_FLAGS2 = 0x04,
16 LBR_FORMAT_INFO = 0x05,
17 LBR_FORMAT_TIME = 0x06,
18 LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_TIME,
19 };
20
21 static const enum {
22 LBR_EIP_FLAGS = 1,
23 LBR_TSX = 2,
24 } lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = {
25 [LBR_FORMAT_EIP_FLAGS] = LBR_EIP_FLAGS,
26 [LBR_FORMAT_EIP_FLAGS2] = LBR_EIP_FLAGS | LBR_TSX,
27 };
28
29 /*
30 * Intel LBR_SELECT bits
31 * Intel Vol3a, April 2011, Section 16.7 Table 16-10
32 *
33 * Hardware branch filter (not available on all CPUs)
34 */
35 #define LBR_KERNEL_BIT 0 /* do not capture at ring0 */
36 #define LBR_USER_BIT 1 /* do not capture at ring > 0 */
37 #define LBR_JCC_BIT 2 /* do not capture conditional branches */
38 #define LBR_REL_CALL_BIT 3 /* do not capture relative calls */
39 #define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */
40 #define LBR_RETURN_BIT 5 /* do not capture near returns */
41 #define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */
42 #define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */
43 #define LBR_FAR_BIT 8 /* do not capture far branches */
44 #define LBR_CALL_STACK_BIT 9 /* enable call stack */
45
46 /*
47 * Following bit only exists in Linux; we mask it out before writing it to
48 * the actual MSR. But it helps the constraint perf code to understand
49 * that this is a separate configuration.
50 */
51 #define LBR_NO_INFO_BIT 63 /* don't read LBR_INFO. */
52
53 #define LBR_KERNEL (1 << LBR_KERNEL_BIT)
54 #define LBR_USER (1 << LBR_USER_BIT)
55 #define LBR_JCC (1 << LBR_JCC_BIT)
56 #define LBR_REL_CALL (1 << LBR_REL_CALL_BIT)
57 #define LBR_IND_CALL (1 << LBR_IND_CALL_BIT)
58 #define LBR_RETURN (1 << LBR_RETURN_BIT)
59 #define LBR_REL_JMP (1 << LBR_REL_JMP_BIT)
60 #define LBR_IND_JMP (1 << LBR_IND_JMP_BIT)
61 #define LBR_FAR (1 << LBR_FAR_BIT)
62 #define LBR_CALL_STACK (1 << LBR_CALL_STACK_BIT)
63 #define LBR_NO_INFO (1ULL << LBR_NO_INFO_BIT)
64
65 #define LBR_PLM (LBR_KERNEL | LBR_USER)
66
67 #define LBR_SEL_MASK 0x3ff /* valid bits in LBR_SELECT */
68 #define LBR_NOT_SUPP -1 /* LBR filter not supported */
69 #define LBR_IGN 0 /* ignored */
70
71 #define LBR_ANY \
72 (LBR_JCC |\
73 LBR_REL_CALL |\
74 LBR_IND_CALL |\
75 LBR_RETURN |\
76 LBR_REL_JMP |\
77 LBR_IND_JMP |\
78 LBR_FAR)
79
80 #define LBR_FROM_FLAG_MISPRED BIT_ULL(63)
81 #define LBR_FROM_FLAG_IN_TX BIT_ULL(62)
82 #define LBR_FROM_FLAG_ABORT BIT_ULL(61)
83
84 #define LBR_FROM_SIGNEXT_2MSB (BIT_ULL(60) | BIT_ULL(59))
85
86 /*
87 * x86control flow change classification
88 * x86control flow changes include branches, interrupts, traps, faults
89 */
90 enum {
91 X86_BR_NONE = 0, /* unknown */
92
93 X86_BR_USER = 1 << 0, /* branch target is user */
94 X86_BR_KERNEL = 1 << 1, /* branch target is kernel */
95
96 X86_BR_CALL = 1 << 2, /* call */
97 X86_BR_RET = 1 << 3, /* return */
98 X86_BR_SYSCALL = 1 << 4, /* syscall */
99 X86_BR_SYSRET = 1 << 5, /* syscall return */
100 X86_BR_INT = 1 << 6, /* sw interrupt */
101 X86_BR_IRET = 1 << 7, /* return from interrupt */
102 X86_BR_JCC = 1 << 8, /* conditional */
103 X86_BR_JMP = 1 << 9, /* jump */
104 X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */
105 X86_BR_IND_CALL = 1 << 11,/* indirect calls */
106 X86_BR_ABORT = 1 << 12,/* transaction abort */
107 X86_BR_IN_TX = 1 << 13,/* in transaction */
108 X86_BR_NO_TX = 1 << 14,/* not in transaction */
109 X86_BR_ZERO_CALL = 1 << 15,/* zero length call */
110 X86_BR_CALL_STACK = 1 << 16,/* call stack */
111 X86_BR_IND_JMP = 1 << 17,/* indirect jump */
112
113 X86_BR_TYPE_SAVE = 1 << 18,/* indicate to save branch type */
114
115 };
116
117 #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
118 #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX)
119
120 #define X86_BR_ANY \
121 (X86_BR_CALL |\
122 X86_BR_RET |\
123 X86_BR_SYSCALL |\
124 X86_BR_SYSRET |\
125 X86_BR_INT |\
126 X86_BR_IRET |\
127 X86_BR_JCC |\
128 X86_BR_JMP |\
129 X86_BR_IRQ |\
130 X86_BR_ABORT |\
131 X86_BR_IND_CALL |\
132 X86_BR_IND_JMP |\
133 X86_BR_ZERO_CALL)
134
135 #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
136
137 #define X86_BR_ANY_CALL \
138 (X86_BR_CALL |\
139 X86_BR_IND_CALL |\
140 X86_BR_ZERO_CALL |\
141 X86_BR_SYSCALL |\
142 X86_BR_IRQ |\
143 X86_BR_INT)
144
145 static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
146
147 /*
148 * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
149 * otherwise it becomes near impossible to get a reliable stack.
150 */
151
152 static void __intel_pmu_lbr_enable(bool pmi)
153 {
154 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
155 u64 debugctl, lbr_select = 0, orig_debugctl;
156
157 /*
158 * No need to unfreeze manually, as v4 can do that as part
159 * of the GLOBAL_STATUS ack.
160 */
161 if (pmi && x86_pmu.version >= 4)
162 return;
163
164 /*
165 * No need to reprogram LBR_SELECT in a PMI, as it
166 * did not change.
167 */
168 if (cpuc->lbr_sel)
169 lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask;
170 if (!pmi && cpuc->lbr_sel)
171 wrmsrl(MSR_LBR_SELECT, lbr_select);
172
173 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
174 orig_debugctl = debugctl;
175 debugctl |= DEBUGCTLMSR_LBR;
176 /*
177 * LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
178 * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
179 * may cause superfluous increase/decrease of LBR_TOS.
180 */
181 if (!(lbr_select & LBR_CALL_STACK))
182 debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
183 if (orig_debugctl != debugctl)
184 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
185 }
186
187 static void __intel_pmu_lbr_disable(void)
188 {
189 u64 debugctl;
190
191 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
192 debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
193 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
194 }
195
196 static void intel_pmu_lbr_reset_32(void)
197 {
198 int i;
199
200 for (i = 0; i < x86_pmu.lbr_nr; i++)
201 wrmsrl(x86_pmu.lbr_from + i, 0);
202 }
203
204 static void intel_pmu_lbr_reset_64(void)
205 {
206 int i;
207
208 for (i = 0; i < x86_pmu.lbr_nr; i++) {
209 wrmsrl(x86_pmu.lbr_from + i, 0);
210 wrmsrl(x86_pmu.lbr_to + i, 0);
211 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
212 wrmsrl(MSR_LBR_INFO_0 + i, 0);
213 }
214 }
215
216 void intel_pmu_lbr_reset(void)
217 {
218 if (!x86_pmu.lbr_nr)
219 return;
220
221 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
222 intel_pmu_lbr_reset_32();
223 else
224 intel_pmu_lbr_reset_64();
225 }
226
227 /*
228 * TOS = most recently recorded branch
229 */
230 static inline u64 intel_pmu_lbr_tos(void)
231 {
232 u64 tos;
233
234 rdmsrl(x86_pmu.lbr_tos, tos);
235 return tos;
236 }
237
238 enum {
239 LBR_NONE,
240 LBR_VALID,
241 };
242
243 /*
244 * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
245 * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
246 * TSX is not supported they have no consistent behavior:
247 *
248 * - For wrmsr(), bits 61:62 are considered part of the sign extension.
249 * - For HW updates (branch captures) bits 61:62 are always OFF and are not
250 * part of the sign extension.
251 *
252 * Therefore, if:
253 *
254 * 1) LBR has TSX format
255 * 2) CPU has no TSX support enabled
256 *
257 * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
258 * value from rdmsr() must be converted to have a 61 bits sign extension,
259 * ignoring the TSX flags.
260 */
261 static inline bool lbr_from_signext_quirk_needed(void)
262 {
263 int lbr_format = x86_pmu.intel_cap.lbr_format;
264 bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
265 boot_cpu_has(X86_FEATURE_RTM);
266
267 return !tsx_support && (lbr_desc[lbr_format] & LBR_TSX);
268 }
269
270 DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
271
272 /* If quirk is enabled, ensure sign extension is 63 bits: */
273 inline u64 lbr_from_signext_quirk_wr(u64 val)
274 {
275 if (static_branch_unlikely(&lbr_from_quirk_key)) {
276 /*
277 * Sign extend into bits 61:62 while preserving bit 63.
278 *
279 * Quirk is enabled when TSX is disabled. Therefore TSX bits
280 * in val are always OFF and must be changed to be sign
281 * extension bits. Since bits 59:60 are guaranteed to be
282 * part of the sign extension bits, we can just copy them
283 * to 61:62.
284 */
285 val |= (LBR_FROM_SIGNEXT_2MSB & val) << 2;
286 }
287 return val;
288 }
289
290 /*
291 * If quirk is needed, ensure sign extension is 61 bits:
292 */
293 static u64 lbr_from_signext_quirk_rd(u64 val)
294 {
295 if (static_branch_unlikely(&lbr_from_quirk_key)) {
296 /*
297 * Quirk is on when TSX is not enabled. Therefore TSX
298 * flags must be read as OFF.
299 */
300 val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
301 }
302 return val;
303 }
304
305 static inline void wrlbr_from(unsigned int idx, u64 val)
306 {
307 val = lbr_from_signext_quirk_wr(val);
308 wrmsrl(x86_pmu.lbr_from + idx, val);
309 }
310
311 static inline void wrlbr_to(unsigned int idx, u64 val)
312 {
313 wrmsrl(x86_pmu.lbr_to + idx, val);
314 }
315
316 static inline u64 rdlbr_from(unsigned int idx)
317 {
318 u64 val;
319
320 rdmsrl(x86_pmu.lbr_from + idx, val);
321
322 return lbr_from_signext_quirk_rd(val);
323 }
324
325 static inline u64 rdlbr_to(unsigned int idx)
326 {
327 u64 val;
328
329 rdmsrl(x86_pmu.lbr_to + idx, val);
330
331 return val;
332 }
333
334 static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
335 {
336 int i;
337 unsigned lbr_idx, mask;
338 u64 tos;
339
340 if (task_ctx->lbr_callstack_users == 0 ||
341 task_ctx->lbr_stack_state == LBR_NONE) {
342 intel_pmu_lbr_reset();
343 return;
344 }
345
346 mask = x86_pmu.lbr_nr - 1;
347 tos = task_ctx->tos;
348 for (i = 0; i < tos; i++) {
349 lbr_idx = (tos - i) & mask;
350 wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
351 wrlbr_to (lbr_idx, task_ctx->lbr_to[i]);
352
353 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
354 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
355 }
356 wrmsrl(x86_pmu.lbr_tos, tos);
357 task_ctx->lbr_stack_state = LBR_NONE;
358 }
359
360 static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
361 {
362 unsigned lbr_idx, mask;
363 u64 tos;
364 int i;
365
366 if (task_ctx->lbr_callstack_users == 0) {
367 task_ctx->lbr_stack_state = LBR_NONE;
368 return;
369 }
370
371 mask = x86_pmu.lbr_nr - 1;
372 tos = intel_pmu_lbr_tos();
373 for (i = 0; i < tos; i++) {
374 lbr_idx = (tos - i) & mask;
375 task_ctx->lbr_from[i] = rdlbr_from(lbr_idx);
376 task_ctx->lbr_to[i] = rdlbr_to(lbr_idx);
377 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
378 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
379 }
380 task_ctx->tos = tos;
381 task_ctx->lbr_stack_state = LBR_VALID;
382 }
383
384 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
385 {
386 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
387 struct x86_perf_task_context *task_ctx;
388
389 if (!cpuc->lbr_users)
390 return;
391
392 /*
393 * If LBR callstack feature is enabled and the stack was saved when
394 * the task was scheduled out, restore the stack. Otherwise flush
395 * the LBR stack.
396 */
397 task_ctx = ctx ? ctx->task_ctx_data : NULL;
398 if (task_ctx) {
399 if (sched_in)
400 __intel_pmu_lbr_restore(task_ctx);
401 else
402 __intel_pmu_lbr_save(task_ctx);
403 return;
404 }
405
406 /*
407 * Since a context switch can flip the address space and LBR entries
408 * are not tagged with an identifier, we need to wipe the LBR, even for
409 * per-cpu events. You simply cannot resolve the branches from the old
410 * address space.
411 */
412 if (sched_in)
413 intel_pmu_lbr_reset();
414 }
415
416 static inline bool branch_user_callstack(unsigned br_sel)
417 {
418 return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK);
419 }
420
421 void intel_pmu_lbr_add(struct perf_event *event)
422 {
423 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
424 struct x86_perf_task_context *task_ctx;
425
426 if (!x86_pmu.lbr_nr)
427 return;
428
429 cpuc->br_sel = event->hw.branch_reg.reg;
430
431 if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data) {
432 task_ctx = event->ctx->task_ctx_data;
433 task_ctx->lbr_callstack_users++;
434 }
435
436 /*
437 * Request pmu::sched_task() callback, which will fire inside the
438 * regular perf event scheduling, so that call will:
439 *
440 * - restore or wipe; when LBR-callstack,
441 * - wipe; otherwise,
442 *
443 * when this is from __perf_event_task_sched_in().
444 *
445 * However, if this is from perf_install_in_context(), no such callback
446 * will follow and we'll need to reset the LBR here if this is the
447 * first LBR event.
448 *
449 * The problem is, we cannot tell these cases apart... but we can
450 * exclude the biggest chunk of cases by looking at
451 * event->total_time_running. An event that has accrued runtime cannot
452 * be 'new'. Conversely, a new event can get installed through the
453 * context switch path for the first time.
454 */
455 perf_sched_cb_inc(event->ctx->pmu);
456 if (!cpuc->lbr_users++ && !event->total_time_running)
457 intel_pmu_lbr_reset();
458 }
459
460 void intel_pmu_lbr_del(struct perf_event *event)
461 {
462 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
463 struct x86_perf_task_context *task_ctx;
464
465 if (!x86_pmu.lbr_nr)
466 return;
467
468 if (branch_user_callstack(cpuc->br_sel) &&
469 event->ctx->task_ctx_data) {
470 task_ctx = event->ctx->task_ctx_data;
471 task_ctx->lbr_callstack_users--;
472 }
473
474 cpuc->lbr_users--;
475 WARN_ON_ONCE(cpuc->lbr_users < 0);
476 perf_sched_cb_dec(event->ctx->pmu);
477 }
478
479 void intel_pmu_lbr_enable_all(bool pmi)
480 {
481 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
482
483 if (cpuc->lbr_users)
484 __intel_pmu_lbr_enable(pmi);
485 }
486
487 void intel_pmu_lbr_disable_all(void)
488 {
489 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
490
491 if (cpuc->lbr_users)
492 __intel_pmu_lbr_disable();
493 }
494
495 static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
496 {
497 unsigned long mask = x86_pmu.lbr_nr - 1;
498 u64 tos = intel_pmu_lbr_tos();
499 int i;
500
501 for (i = 0; i < x86_pmu.lbr_nr; i++) {
502 unsigned long lbr_idx = (tos - i) & mask;
503 union {
504 struct {
505 u32 from;
506 u32 to;
507 };
508 u64 lbr;
509 } msr_lastbranch;
510
511 rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
512
513 cpuc->lbr_entries[i].from = msr_lastbranch.from;
514 cpuc->lbr_entries[i].to = msr_lastbranch.to;
515 cpuc->lbr_entries[i].mispred = 0;
516 cpuc->lbr_entries[i].predicted = 0;
517 cpuc->lbr_entries[i].in_tx = 0;
518 cpuc->lbr_entries[i].abort = 0;
519 cpuc->lbr_entries[i].cycles = 0;
520 cpuc->lbr_entries[i].type = 0;
521 cpuc->lbr_entries[i].reserved = 0;
522 }
523 cpuc->lbr_stack.nr = i;
524 }
525
526 /*
527 * Due to lack of segmentation in Linux the effective address (offset)
528 * is the same as the linear address, allowing us to merge the LIP and EIP
529 * LBR formats.
530 */
531 static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
532 {
533 bool need_info = false;
534 unsigned long mask = x86_pmu.lbr_nr - 1;
535 int lbr_format = x86_pmu.intel_cap.lbr_format;
536 u64 tos = intel_pmu_lbr_tos();
537 int i;
538 int out = 0;
539 int num = x86_pmu.lbr_nr;
540
541 if (cpuc->lbr_sel) {
542 need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
543 if (cpuc->lbr_sel->config & LBR_CALL_STACK)
544 num = tos;
545 }
546
547 for (i = 0; i < num; i++) {
548 unsigned long lbr_idx = (tos - i) & mask;
549 u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0;
550 int skip = 0;
551 u16 cycles = 0;
552 int lbr_flags = lbr_desc[lbr_format];
553
554 from = rdlbr_from(lbr_idx);
555 to = rdlbr_to(lbr_idx);
556
557 if (lbr_format == LBR_FORMAT_INFO && need_info) {
558 u64 info;
559
560 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, info);
561 mis = !!(info & LBR_INFO_MISPRED);
562 pred = !mis;
563 in_tx = !!(info & LBR_INFO_IN_TX);
564 abort = !!(info & LBR_INFO_ABORT);
565 cycles = (info & LBR_INFO_CYCLES);
566 }
567
568 if (lbr_format == LBR_FORMAT_TIME) {
569 mis = !!(from & LBR_FROM_FLAG_MISPRED);
570 pred = !mis;
571 skip = 1;
572 cycles = ((to >> 48) & LBR_INFO_CYCLES);
573
574 to = (u64)((((s64)to) << 16) >> 16);
575 }
576
577 if (lbr_flags & LBR_EIP_FLAGS) {
578 mis = !!(from & LBR_FROM_FLAG_MISPRED);
579 pred = !mis;
580 skip = 1;
581 }
582 if (lbr_flags & LBR_TSX) {
583 in_tx = !!(from & LBR_FROM_FLAG_IN_TX);
584 abort = !!(from & LBR_FROM_FLAG_ABORT);
585 skip = 3;
586 }
587 from = (u64)((((s64)from) << skip) >> skip);
588
589 /*
590 * Some CPUs report duplicated abort records,
591 * with the second entry not having an abort bit set.
592 * Skip them here. This loop runs backwards,
593 * so we need to undo the previous record.
594 * If the abort just happened outside the window
595 * the extra entry cannot be removed.
596 */
597 if (abort && x86_pmu.lbr_double_abort && out > 0)
598 out--;
599
600 cpuc->lbr_entries[out].from = from;
601 cpuc->lbr_entries[out].to = to;
602 cpuc->lbr_entries[out].mispred = mis;
603 cpuc->lbr_entries[out].predicted = pred;
604 cpuc->lbr_entries[out].in_tx = in_tx;
605 cpuc->lbr_entries[out].abort = abort;
606 cpuc->lbr_entries[out].cycles = cycles;
607 cpuc->lbr_entries[out].type = 0;
608 cpuc->lbr_entries[out].reserved = 0;
609 out++;
610 }
611 cpuc->lbr_stack.nr = out;
612 }
613
614 void intel_pmu_lbr_read(void)
615 {
616 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
617
618 if (!cpuc->lbr_users)
619 return;
620
621 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
622 intel_pmu_lbr_read_32(cpuc);
623 else
624 intel_pmu_lbr_read_64(cpuc);
625
626 intel_pmu_lbr_filter(cpuc);
627 }
628
629 /*
630 * SW filter is used:
631 * - in case there is no HW filter
632 * - in case the HW filter has errata or limitations
633 */
634 static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
635 {
636 u64 br_type = event->attr.branch_sample_type;
637 int mask = 0;
638
639 if (br_type & PERF_SAMPLE_BRANCH_USER)
640 mask |= X86_BR_USER;
641
642 if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
643 mask |= X86_BR_KERNEL;
644
645 /* we ignore BRANCH_HV here */
646
647 if (br_type & PERF_SAMPLE_BRANCH_ANY)
648 mask |= X86_BR_ANY;
649
650 if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL)
651 mask |= X86_BR_ANY_CALL;
652
653 if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
654 mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET;
655
656 if (br_type & PERF_SAMPLE_BRANCH_IND_CALL)
657 mask |= X86_BR_IND_CALL;
658
659 if (br_type & PERF_SAMPLE_BRANCH_ABORT_TX)
660 mask |= X86_BR_ABORT;
661
662 if (br_type & PERF_SAMPLE_BRANCH_IN_TX)
663 mask |= X86_BR_IN_TX;
664
665 if (br_type & PERF_SAMPLE_BRANCH_NO_TX)
666 mask |= X86_BR_NO_TX;
667
668 if (br_type & PERF_SAMPLE_BRANCH_COND)
669 mask |= X86_BR_JCC;
670
671 if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) {
672 if (!x86_pmu_has_lbr_callstack())
673 return -EOPNOTSUPP;
674 if (mask & ~(X86_BR_USER | X86_BR_KERNEL))
675 return -EINVAL;
676 mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET |
677 X86_BR_CALL_STACK;
678 }
679
680 if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
681 mask |= X86_BR_IND_JMP;
682
683 if (br_type & PERF_SAMPLE_BRANCH_CALL)
684 mask |= X86_BR_CALL | X86_BR_ZERO_CALL;
685
686 if (br_type & PERF_SAMPLE_BRANCH_TYPE_SAVE)
687 mask |= X86_BR_TYPE_SAVE;
688
689 /*
690 * stash actual user request into reg, it may
691 * be used by fixup code for some CPU
692 */
693 event->hw.branch_reg.reg = mask;
694 return 0;
695 }
696
697 /*
698 * setup the HW LBR filter
699 * Used only when available, may not be enough to disambiguate
700 * all branches, may need the help of the SW filter
701 */
702 static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
703 {
704 struct hw_perf_event_extra *reg;
705 u64 br_type = event->attr.branch_sample_type;
706 u64 mask = 0, v;
707 int i;
708
709 for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
710 if (!(br_type & (1ULL << i)))
711 continue;
712
713 v = x86_pmu.lbr_sel_map[i];
714 if (v == LBR_NOT_SUPP)
715 return -EOPNOTSUPP;
716
717 if (v != LBR_IGN)
718 mask |= v;
719 }
720
721 reg = &event->hw.branch_reg;
722 reg->idx = EXTRA_REG_LBR;
723
724 /*
725 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
726 * in suppress mode. So LBR_SELECT should be set to
727 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
728 * But the 10th bit LBR_CALL_STACK does not operate
729 * in suppress mode.
730 */
731 reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
732
733 if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
734 (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
735 (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO))
736 reg->config |= LBR_NO_INFO;
737
738 return 0;
739 }
740
741 int intel_pmu_setup_lbr_filter(struct perf_event *event)
742 {
743 int ret = 0;
744
745 /*
746 * no LBR on this PMU
747 */
748 if (!x86_pmu.lbr_nr)
749 return -EOPNOTSUPP;
750
751 /*
752 * setup SW LBR filter
753 */
754 ret = intel_pmu_setup_sw_lbr_filter(event);
755 if (ret)
756 return ret;
757
758 /*
759 * setup HW LBR filter, if any
760 */
761 if (x86_pmu.lbr_sel_map)
762 ret = intel_pmu_setup_hw_lbr_filter(event);
763
764 return ret;
765 }
766
767 /*
768 * return the type of control flow change at address "from"
769 * instruction is not necessarily a branch (in case of interrupt).
770 *
771 * The branch type returned also includes the priv level of the
772 * target of the control flow change (X86_BR_USER, X86_BR_KERNEL).
773 *
774 * If a branch type is unknown OR the instruction cannot be
775 * decoded (e.g., text page not present), then X86_BR_NONE is
776 * returned.
777 */
778 static int branch_type(unsigned long from, unsigned long to, int abort)
779 {
780 struct insn insn;
781 void *addr;
782 int bytes_read, bytes_left;
783 int ret = X86_BR_NONE;
784 int ext, to_plm, from_plm;
785 u8 buf[MAX_INSN_SIZE];
786 int is64 = 0;
787
788 to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
789 from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER;
790
791 /*
792 * maybe zero if lbr did not fill up after a reset by the time
793 * we get a PMU interrupt
794 */
795 if (from == 0 || to == 0)
796 return X86_BR_NONE;
797
798 if (abort)
799 return X86_BR_ABORT | to_plm;
800
801 if (from_plm == X86_BR_USER) {
802 /*
803 * can happen if measuring at the user level only
804 * and we interrupt in a kernel thread, e.g., idle.
805 */
806 if (!current->mm)
807 return X86_BR_NONE;
808
809 /* may fail if text not present */
810 bytes_left = copy_from_user_nmi(buf, (void __user *)from,
811 MAX_INSN_SIZE);
812 bytes_read = MAX_INSN_SIZE - bytes_left;
813 if (!bytes_read)
814 return X86_BR_NONE;
815
816 addr = buf;
817 } else {
818 /*
819 * The LBR logs any address in the IP, even if the IP just
820 * faulted. This means userspace can control the from address.
821 * Ensure we don't blindy read any address by validating it is
822 * a known text address.
823 */
824 if (kernel_text_address(from)) {
825 addr = (void *)from;
826 /*
827 * Assume we can get the maximum possible size
828 * when grabbing kernel data. This is not
829 * _strictly_ true since we could possibly be
830 * executing up next to a memory hole, but
831 * it is very unlikely to be a problem.
832 */
833 bytes_read = MAX_INSN_SIZE;
834 } else {
835 return X86_BR_NONE;
836 }
837 }
838
839 /*
840 * decoder needs to know the ABI especially
841 * on 64-bit systems running 32-bit apps
842 */
843 #ifdef CONFIG_X86_64
844 is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32);
845 #endif
846 insn_init(&insn, addr, bytes_read, is64);
847 insn_get_opcode(&insn);
848 if (!insn.opcode.got)
849 return X86_BR_ABORT;
850
851 switch (insn.opcode.bytes[0]) {
852 case 0xf:
853 switch (insn.opcode.bytes[1]) {
854 case 0x05: /* syscall */
855 case 0x34: /* sysenter */
856 ret = X86_BR_SYSCALL;
857 break;
858 case 0x07: /* sysret */
859 case 0x35: /* sysexit */
860 ret = X86_BR_SYSRET;
861 break;
862 case 0x80 ... 0x8f: /* conditional */
863 ret = X86_BR_JCC;
864 break;
865 default:
866 ret = X86_BR_NONE;
867 }
868 break;
869 case 0x70 ... 0x7f: /* conditional */
870 ret = X86_BR_JCC;
871 break;
872 case 0xc2: /* near ret */
873 case 0xc3: /* near ret */
874 case 0xca: /* far ret */
875 case 0xcb: /* far ret */
876 ret = X86_BR_RET;
877 break;
878 case 0xcf: /* iret */
879 ret = X86_BR_IRET;
880 break;
881 case 0xcc ... 0xce: /* int */
882 ret = X86_BR_INT;
883 break;
884 case 0xe8: /* call near rel */
885 insn_get_immediate(&insn);
886 if (insn.immediate1.value == 0) {
887 /* zero length call */
888 ret = X86_BR_ZERO_CALL;
889 break;
890 }
891 case 0x9a: /* call far absolute */
892 ret = X86_BR_CALL;
893 break;
894 case 0xe0 ... 0xe3: /* loop jmp */
895 ret = X86_BR_JCC;
896 break;
897 case 0xe9 ... 0xeb: /* jmp */
898 ret = X86_BR_JMP;
899 break;
900 case 0xff: /* call near absolute, call far absolute ind */
901 insn_get_modrm(&insn);
902 ext = (insn.modrm.bytes[0] >> 3) & 0x7;
903 switch (ext) {
904 case 2: /* near ind call */
905 case 3: /* far ind call */
906 ret = X86_BR_IND_CALL;
907 break;
908 case 4:
909 case 5:
910 ret = X86_BR_IND_JMP;
911 break;
912 }
913 break;
914 default:
915 ret = X86_BR_NONE;
916 }
917 /*
918 * interrupts, traps, faults (and thus ring transition) may
919 * occur on any instructions. Thus, to classify them correctly,
920 * we need to first look at the from and to priv levels. If they
921 * are different and to is in the kernel, then it indicates
922 * a ring transition. If the from instruction is not a ring
923 * transition instr (syscall, systenter, int), then it means
924 * it was a irq, trap or fault.
925 *
926 * we have no way of detecting kernel to kernel faults.
927 */
928 if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL
929 && ret != X86_BR_SYSCALL && ret != X86_BR_INT)
930 ret = X86_BR_IRQ;
931
932 /*
933 * branch priv level determined by target as
934 * is done by HW when LBR_SELECT is implemented
935 */
936 if (ret != X86_BR_NONE)
937 ret |= to_plm;
938
939 return ret;
940 }
941
942 #define X86_BR_TYPE_MAP_MAX 16
943
944 static int branch_map[X86_BR_TYPE_MAP_MAX] = {
945 PERF_BR_CALL, /* X86_BR_CALL */
946 PERF_BR_RET, /* X86_BR_RET */
947 PERF_BR_SYSCALL, /* X86_BR_SYSCALL */
948 PERF_BR_SYSRET, /* X86_BR_SYSRET */
949 PERF_BR_UNKNOWN, /* X86_BR_INT */
950 PERF_BR_UNKNOWN, /* X86_BR_IRET */
951 PERF_BR_COND, /* X86_BR_JCC */
952 PERF_BR_UNCOND, /* X86_BR_JMP */
953 PERF_BR_UNKNOWN, /* X86_BR_IRQ */
954 PERF_BR_IND_CALL, /* X86_BR_IND_CALL */
955 PERF_BR_UNKNOWN, /* X86_BR_ABORT */
956 PERF_BR_UNKNOWN, /* X86_BR_IN_TX */
957 PERF_BR_UNKNOWN, /* X86_BR_NO_TX */
958 PERF_BR_CALL, /* X86_BR_ZERO_CALL */
959 PERF_BR_UNKNOWN, /* X86_BR_CALL_STACK */
960 PERF_BR_IND, /* X86_BR_IND_JMP */
961 };
962
963 static int
964 common_branch_type(int type)
965 {
966 int i;
967
968 type >>= 2; /* skip X86_BR_USER and X86_BR_KERNEL */
969
970 if (type) {
971 i = __ffs(type);
972 if (i < X86_BR_TYPE_MAP_MAX)
973 return branch_map[i];
974 }
975
976 return PERF_BR_UNKNOWN;
977 }
978
979 /*
980 * implement actual branch filter based on user demand.
981 * Hardware may not exactly satisfy that request, thus
982 * we need to inspect opcodes. Mismatched branches are
983 * discarded. Therefore, the number of branches returned
984 * in PERF_SAMPLE_BRANCH_STACK sample may vary.
985 */
986 static void
987 intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
988 {
989 u64 from, to;
990 int br_sel = cpuc->br_sel;
991 int i, j, type;
992 bool compress = false;
993
994 /* if sampling all branches, then nothing to filter */
995 if (((br_sel & X86_BR_ALL) == X86_BR_ALL) &&
996 ((br_sel & X86_BR_TYPE_SAVE) != X86_BR_TYPE_SAVE))
997 return;
998
999 for (i = 0; i < cpuc->lbr_stack.nr; i++) {
1000
1001 from = cpuc->lbr_entries[i].from;
1002 to = cpuc->lbr_entries[i].to;
1003
1004 type = branch_type(from, to, cpuc->lbr_entries[i].abort);
1005 if (type != X86_BR_NONE && (br_sel & X86_BR_ANYTX)) {
1006 if (cpuc->lbr_entries[i].in_tx)
1007 type |= X86_BR_IN_TX;
1008 else
1009 type |= X86_BR_NO_TX;
1010 }
1011
1012 /* if type does not correspond, then discard */
1013 if (type == X86_BR_NONE || (br_sel & type) != type) {
1014 cpuc->lbr_entries[i].from = 0;
1015 compress = true;
1016 }
1017
1018 if ((br_sel & X86_BR_TYPE_SAVE) == X86_BR_TYPE_SAVE)
1019 cpuc->lbr_entries[i].type = common_branch_type(type);
1020 }
1021
1022 if (!compress)
1023 return;
1024
1025 /* remove all entries with from=0 */
1026 for (i = 0; i < cpuc->lbr_stack.nr; ) {
1027 if (!cpuc->lbr_entries[i].from) {
1028 j = i;
1029 while (++j < cpuc->lbr_stack.nr)
1030 cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j];
1031 cpuc->lbr_stack.nr--;
1032 if (!cpuc->lbr_entries[i].from)
1033 continue;
1034 }
1035 i++;
1036 }
1037 }
1038
1039 /*
1040 * Map interface branch filters onto LBR filters
1041 */
1042 static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1043 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1044 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1045 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1046 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1047 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_REL_JMP
1048 | LBR_IND_JMP | LBR_FAR,
1049 /*
1050 * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
1051 */
1052 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] =
1053 LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
1054 /*
1055 * NHM/WSM erratum: must include IND_JMP to capture IND_CALL
1056 */
1057 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
1058 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1059 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1060 };
1061
1062 static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1063 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1064 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1065 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1066 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1067 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1068 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1069 | LBR_FAR,
1070 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1071 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1072 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1073 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
1074 };
1075
1076 static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1077 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1078 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1079 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1080 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1081 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1082 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1083 | LBR_FAR,
1084 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1085 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1086 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1087 | LBR_RETURN | LBR_CALL_STACK,
1088 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1089 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
1090 };
1091
1092 /* core */
1093 void __init intel_pmu_lbr_init_core(void)
1094 {
1095 x86_pmu.lbr_nr = 4;
1096 x86_pmu.lbr_tos = MSR_LBR_TOS;
1097 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1098 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1099
1100 /*
1101 * SW branch filter usage:
1102 * - compensate for lack of HW filter
1103 */
1104 }
1105
1106 /* nehalem/westmere */
1107 void __init intel_pmu_lbr_init_nhm(void)
1108 {
1109 x86_pmu.lbr_nr = 16;
1110 x86_pmu.lbr_tos = MSR_LBR_TOS;
1111 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1112 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1113
1114 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1115 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1116
1117 /*
1118 * SW branch filter usage:
1119 * - workaround LBR_SEL errata (see above)
1120 * - support syscall, sysret capture.
1121 * That requires LBR_FAR but that means far
1122 * jmp need to be filtered out
1123 */
1124 }
1125
1126 /* sandy bridge */
1127 void __init intel_pmu_lbr_init_snb(void)
1128 {
1129 x86_pmu.lbr_nr = 16;
1130 x86_pmu.lbr_tos = MSR_LBR_TOS;
1131 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1132 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1133
1134 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1135 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
1136
1137 /*
1138 * SW branch filter usage:
1139 * - support syscall, sysret capture.
1140 * That requires LBR_FAR but that means far
1141 * jmp need to be filtered out
1142 */
1143 }
1144
1145 /* haswell */
1146 void intel_pmu_lbr_init_hsw(void)
1147 {
1148 x86_pmu.lbr_nr = 16;
1149 x86_pmu.lbr_tos = MSR_LBR_TOS;
1150 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1151 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1152
1153 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1154 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
1155
1156 if (lbr_from_signext_quirk_needed())
1157 static_branch_enable(&lbr_from_quirk_key);
1158 }
1159
1160 /* skylake */
1161 __init void intel_pmu_lbr_init_skl(void)
1162 {
1163 x86_pmu.lbr_nr = 32;
1164 x86_pmu.lbr_tos = MSR_LBR_TOS;
1165 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1166 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1167
1168 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1169 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
1170
1171 /*
1172 * SW branch filter usage:
1173 * - support syscall, sysret capture.
1174 * That requires LBR_FAR but that means far
1175 * jmp need to be filtered out
1176 */
1177 }
1178
1179 /* atom */
1180 void __init intel_pmu_lbr_init_atom(void)
1181 {
1182 /*
1183 * only models starting at stepping 10 seems
1184 * to have an operational LBR which can freeze
1185 * on PMU interrupt
1186 */
1187 if (boot_cpu_data.x86_model == 28
1188 && boot_cpu_data.x86_mask < 10) {
1189 pr_cont("LBR disabled due to erratum");
1190 return;
1191 }
1192
1193 x86_pmu.lbr_nr = 8;
1194 x86_pmu.lbr_tos = MSR_LBR_TOS;
1195 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1196 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1197
1198 /*
1199 * SW branch filter usage:
1200 * - compensate for lack of HW filter
1201 */
1202 }
1203
1204 /* slm */
1205 void __init intel_pmu_lbr_init_slm(void)
1206 {
1207 x86_pmu.lbr_nr = 8;
1208 x86_pmu.lbr_tos = MSR_LBR_TOS;
1209 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1210 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1211
1212 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1213 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1214
1215 /*
1216 * SW branch filter usage:
1217 * - compensate for lack of HW filter
1218 */
1219 pr_cont("8-deep LBR, ");
1220 }
1221
1222 /* Knights Landing */
1223 void intel_pmu_lbr_init_knl(void)
1224 {
1225 x86_pmu.lbr_nr = 8;
1226 x86_pmu.lbr_tos = MSR_LBR_TOS;
1227 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1228 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1229
1230 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1231 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
1232 }