]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/kernel/cpu/perf_event_intel_lbr.c
perf/x86/intel: Mark initialization code as such
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / cpu / perf_event_intel_lbr.c
1 #include <linux/perf_event.h>
2 #include <linux/types.h>
3
4 #include <asm/perf_event.h>
5 #include <asm/msr.h>
6 #include <asm/insn.h>
7
8 #include "perf_event.h"
9
10 enum {
11 LBR_FORMAT_32 = 0x00,
12 LBR_FORMAT_LIP = 0x01,
13 LBR_FORMAT_EIP = 0x02,
14 LBR_FORMAT_EIP_FLAGS = 0x03,
15 LBR_FORMAT_EIP_FLAGS2 = 0x04,
16 LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_EIP_FLAGS2,
17 };
18
19 static enum {
20 LBR_EIP_FLAGS = 1,
21 LBR_TSX = 2,
22 } lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = {
23 [LBR_FORMAT_EIP_FLAGS] = LBR_EIP_FLAGS,
24 [LBR_FORMAT_EIP_FLAGS2] = LBR_EIP_FLAGS | LBR_TSX,
25 };
26
27 /*
28 * Intel LBR_SELECT bits
29 * Intel Vol3a, April 2011, Section 16.7 Table 16-10
30 *
31 * Hardware branch filter (not available on all CPUs)
32 */
33 #define LBR_KERNEL_BIT 0 /* do not capture at ring0 */
34 #define LBR_USER_BIT 1 /* do not capture at ring > 0 */
35 #define LBR_JCC_BIT 2 /* do not capture conditional branches */
36 #define LBR_REL_CALL_BIT 3 /* do not capture relative calls */
37 #define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */
38 #define LBR_RETURN_BIT 5 /* do not capture near returns */
39 #define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */
40 #define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */
41 #define LBR_FAR_BIT 8 /* do not capture far branches */
42
43 #define LBR_KERNEL (1 << LBR_KERNEL_BIT)
44 #define LBR_USER (1 << LBR_USER_BIT)
45 #define LBR_JCC (1 << LBR_JCC_BIT)
46 #define LBR_REL_CALL (1 << LBR_REL_CALL_BIT)
47 #define LBR_IND_CALL (1 << LBR_IND_CALL_BIT)
48 #define LBR_RETURN (1 << LBR_RETURN_BIT)
49 #define LBR_REL_JMP (1 << LBR_REL_JMP_BIT)
50 #define LBR_IND_JMP (1 << LBR_IND_JMP_BIT)
51 #define LBR_FAR (1 << LBR_FAR_BIT)
52
53 #define LBR_PLM (LBR_KERNEL | LBR_USER)
54
55 #define LBR_SEL_MASK 0x1ff /* valid bits in LBR_SELECT */
56 #define LBR_NOT_SUPP -1 /* LBR filter not supported */
57 #define LBR_IGN 0 /* ignored */
58
59 #define LBR_ANY \
60 (LBR_JCC |\
61 LBR_REL_CALL |\
62 LBR_IND_CALL |\
63 LBR_RETURN |\
64 LBR_REL_JMP |\
65 LBR_IND_JMP |\
66 LBR_FAR)
67
68 #define LBR_FROM_FLAG_MISPRED (1ULL << 63)
69 #define LBR_FROM_FLAG_IN_TX (1ULL << 62)
70 #define LBR_FROM_FLAG_ABORT (1ULL << 61)
71
72 #define for_each_branch_sample_type(x) \
73 for ((x) = PERF_SAMPLE_BRANCH_USER; \
74 (x) < PERF_SAMPLE_BRANCH_MAX; (x) <<= 1)
75
76 /*
77 * x86control flow change classification
78 * x86control flow changes include branches, interrupts, traps, faults
79 */
80 enum {
81 X86_BR_NONE = 0, /* unknown */
82
83 X86_BR_USER = 1 << 0, /* branch target is user */
84 X86_BR_KERNEL = 1 << 1, /* branch target is kernel */
85
86 X86_BR_CALL = 1 << 2, /* call */
87 X86_BR_RET = 1 << 3, /* return */
88 X86_BR_SYSCALL = 1 << 4, /* syscall */
89 X86_BR_SYSRET = 1 << 5, /* syscall return */
90 X86_BR_INT = 1 << 6, /* sw interrupt */
91 X86_BR_IRET = 1 << 7, /* return from interrupt */
92 X86_BR_JCC = 1 << 8, /* conditional */
93 X86_BR_JMP = 1 << 9, /* jump */
94 X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */
95 X86_BR_IND_CALL = 1 << 11,/* indirect calls */
96 X86_BR_ABORT = 1 << 12,/* transaction abort */
97 X86_BR_IN_TX = 1 << 13,/* in transaction */
98 X86_BR_NO_TX = 1 << 14,/* not in transaction */
99 };
100
101 #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
102 #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX)
103
104 #define X86_BR_ANY \
105 (X86_BR_CALL |\
106 X86_BR_RET |\
107 X86_BR_SYSCALL |\
108 X86_BR_SYSRET |\
109 X86_BR_INT |\
110 X86_BR_IRET |\
111 X86_BR_JCC |\
112 X86_BR_JMP |\
113 X86_BR_IRQ |\
114 X86_BR_ABORT |\
115 X86_BR_IND_CALL)
116
117 #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
118
119 #define X86_BR_ANY_CALL \
120 (X86_BR_CALL |\
121 X86_BR_IND_CALL |\
122 X86_BR_SYSCALL |\
123 X86_BR_IRQ |\
124 X86_BR_INT)
125
126 static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
127
128 /*
129 * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
130 * otherwise it becomes near impossible to get a reliable stack.
131 */
132
133 static void __intel_pmu_lbr_enable(void)
134 {
135 u64 debugctl;
136 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
137
138 if (cpuc->lbr_sel)
139 wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config);
140
141 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
142 debugctl |= (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
143 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
144 }
145
146 static void __intel_pmu_lbr_disable(void)
147 {
148 u64 debugctl;
149
150 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
151 debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
152 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
153 }
154
155 static void intel_pmu_lbr_reset_32(void)
156 {
157 int i;
158
159 for (i = 0; i < x86_pmu.lbr_nr; i++)
160 wrmsrl(x86_pmu.lbr_from + i, 0);
161 }
162
163 static void intel_pmu_lbr_reset_64(void)
164 {
165 int i;
166
167 for (i = 0; i < x86_pmu.lbr_nr; i++) {
168 wrmsrl(x86_pmu.lbr_from + i, 0);
169 wrmsrl(x86_pmu.lbr_to + i, 0);
170 }
171 }
172
173 void intel_pmu_lbr_reset(void)
174 {
175 if (!x86_pmu.lbr_nr)
176 return;
177
178 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
179 intel_pmu_lbr_reset_32();
180 else
181 intel_pmu_lbr_reset_64();
182 }
183
184 void intel_pmu_lbr_enable(struct perf_event *event)
185 {
186 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
187
188 if (!x86_pmu.lbr_nr)
189 return;
190
191 /*
192 * Reset the LBR stack if we changed task context to
193 * avoid data leaks.
194 */
195 if (event->ctx->task && cpuc->lbr_context != event->ctx) {
196 intel_pmu_lbr_reset();
197 cpuc->lbr_context = event->ctx;
198 }
199 cpuc->br_sel = event->hw.branch_reg.reg;
200
201 cpuc->lbr_users++;
202 }
203
204 void intel_pmu_lbr_disable(struct perf_event *event)
205 {
206 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
207
208 if (!x86_pmu.lbr_nr)
209 return;
210
211 cpuc->lbr_users--;
212 WARN_ON_ONCE(cpuc->lbr_users < 0);
213
214 if (cpuc->enabled && !cpuc->lbr_users) {
215 __intel_pmu_lbr_disable();
216 /* avoid stale pointer */
217 cpuc->lbr_context = NULL;
218 }
219 }
220
221 void intel_pmu_lbr_enable_all(void)
222 {
223 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
224
225 if (cpuc->lbr_users)
226 __intel_pmu_lbr_enable();
227 }
228
229 void intel_pmu_lbr_disable_all(void)
230 {
231 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
232
233 if (cpuc->lbr_users)
234 __intel_pmu_lbr_disable();
235 }
236
237 /*
238 * TOS = most recently recorded branch
239 */
240 static inline u64 intel_pmu_lbr_tos(void)
241 {
242 u64 tos;
243
244 rdmsrl(x86_pmu.lbr_tos, tos);
245
246 return tos;
247 }
248
249 static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
250 {
251 unsigned long mask = x86_pmu.lbr_nr - 1;
252 u64 tos = intel_pmu_lbr_tos();
253 int i;
254
255 for (i = 0; i < x86_pmu.lbr_nr; i++) {
256 unsigned long lbr_idx = (tos - i) & mask;
257 union {
258 struct {
259 u32 from;
260 u32 to;
261 };
262 u64 lbr;
263 } msr_lastbranch;
264
265 rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
266
267 cpuc->lbr_entries[i].from = msr_lastbranch.from;
268 cpuc->lbr_entries[i].to = msr_lastbranch.to;
269 cpuc->lbr_entries[i].mispred = 0;
270 cpuc->lbr_entries[i].predicted = 0;
271 cpuc->lbr_entries[i].reserved = 0;
272 }
273 cpuc->lbr_stack.nr = i;
274 }
275
276 /*
277 * Due to lack of segmentation in Linux the effective address (offset)
278 * is the same as the linear address, allowing us to merge the LIP and EIP
279 * LBR formats.
280 */
281 static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
282 {
283 unsigned long mask = x86_pmu.lbr_nr - 1;
284 int lbr_format = x86_pmu.intel_cap.lbr_format;
285 u64 tos = intel_pmu_lbr_tos();
286 int i;
287 int out = 0;
288
289 for (i = 0; i < x86_pmu.lbr_nr; i++) {
290 unsigned long lbr_idx = (tos - i) & mask;
291 u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0;
292 int skip = 0;
293 int lbr_flags = lbr_desc[lbr_format];
294
295 rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
296 rdmsrl(x86_pmu.lbr_to + lbr_idx, to);
297
298 if (lbr_flags & LBR_EIP_FLAGS) {
299 mis = !!(from & LBR_FROM_FLAG_MISPRED);
300 pred = !mis;
301 skip = 1;
302 }
303 if (lbr_flags & LBR_TSX) {
304 in_tx = !!(from & LBR_FROM_FLAG_IN_TX);
305 abort = !!(from & LBR_FROM_FLAG_ABORT);
306 skip = 3;
307 }
308 from = (u64)((((s64)from) << skip) >> skip);
309
310 /*
311 * Some CPUs report duplicated abort records,
312 * with the second entry not having an abort bit set.
313 * Skip them here. This loop runs backwards,
314 * so we need to undo the previous record.
315 * If the abort just happened outside the window
316 * the extra entry cannot be removed.
317 */
318 if (abort && x86_pmu.lbr_double_abort && out > 0)
319 out--;
320
321 cpuc->lbr_entries[out].from = from;
322 cpuc->lbr_entries[out].to = to;
323 cpuc->lbr_entries[out].mispred = mis;
324 cpuc->lbr_entries[out].predicted = pred;
325 cpuc->lbr_entries[out].in_tx = in_tx;
326 cpuc->lbr_entries[out].abort = abort;
327 cpuc->lbr_entries[out].reserved = 0;
328 out++;
329 }
330 cpuc->lbr_stack.nr = out;
331 }
332
333 void intel_pmu_lbr_read(void)
334 {
335 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
336
337 if (!cpuc->lbr_users)
338 return;
339
340 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
341 intel_pmu_lbr_read_32(cpuc);
342 else
343 intel_pmu_lbr_read_64(cpuc);
344
345 intel_pmu_lbr_filter(cpuc);
346 }
347
348 /*
349 * SW filter is used:
350 * - in case there is no HW filter
351 * - in case the HW filter has errata or limitations
352 */
353 static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
354 {
355 u64 br_type = event->attr.branch_sample_type;
356 int mask = 0;
357
358 if (br_type & PERF_SAMPLE_BRANCH_USER)
359 mask |= X86_BR_USER;
360
361 if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
362 mask |= X86_BR_KERNEL;
363
364 /* we ignore BRANCH_HV here */
365
366 if (br_type & PERF_SAMPLE_BRANCH_ANY)
367 mask |= X86_BR_ANY;
368
369 if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL)
370 mask |= X86_BR_ANY_CALL;
371
372 if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
373 mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET;
374
375 if (br_type & PERF_SAMPLE_BRANCH_IND_CALL)
376 mask |= X86_BR_IND_CALL;
377
378 if (br_type & PERF_SAMPLE_BRANCH_ABORT_TX)
379 mask |= X86_BR_ABORT;
380
381 if (br_type & PERF_SAMPLE_BRANCH_IN_TX)
382 mask |= X86_BR_IN_TX;
383
384 if (br_type & PERF_SAMPLE_BRANCH_NO_TX)
385 mask |= X86_BR_NO_TX;
386
387 if (br_type & PERF_SAMPLE_BRANCH_COND)
388 mask |= X86_BR_JCC;
389
390 /*
391 * stash actual user request into reg, it may
392 * be used by fixup code for some CPU
393 */
394 event->hw.branch_reg.reg = mask;
395 }
396
397 /*
398 * setup the HW LBR filter
399 * Used only when available, may not be enough to disambiguate
400 * all branches, may need the help of the SW filter
401 */
402 static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
403 {
404 struct hw_perf_event_extra *reg;
405 u64 br_type = event->attr.branch_sample_type;
406 u64 mask = 0, m;
407 u64 v;
408
409 for_each_branch_sample_type(m) {
410 if (!(br_type & m))
411 continue;
412
413 v = x86_pmu.lbr_sel_map[m];
414 if (v == LBR_NOT_SUPP)
415 return -EOPNOTSUPP;
416
417 if (v != LBR_IGN)
418 mask |= v;
419 }
420 reg = &event->hw.branch_reg;
421 reg->idx = EXTRA_REG_LBR;
422
423 /* LBR_SELECT operates in suppress mode so invert mask */
424 reg->config = ~mask & x86_pmu.lbr_sel_mask;
425
426 return 0;
427 }
428
429 int intel_pmu_setup_lbr_filter(struct perf_event *event)
430 {
431 int ret = 0;
432
433 /*
434 * no LBR on this PMU
435 */
436 if (!x86_pmu.lbr_nr)
437 return -EOPNOTSUPP;
438
439 /*
440 * setup SW LBR filter
441 */
442 intel_pmu_setup_sw_lbr_filter(event);
443
444 /*
445 * setup HW LBR filter, if any
446 */
447 if (x86_pmu.lbr_sel_map)
448 ret = intel_pmu_setup_hw_lbr_filter(event);
449
450 return ret;
451 }
452
453 /*
454 * return the type of control flow change at address "from"
455 * intruction is not necessarily a branch (in case of interrupt).
456 *
457 * The branch type returned also includes the priv level of the
458 * target of the control flow change (X86_BR_USER, X86_BR_KERNEL).
459 *
460 * If a branch type is unknown OR the instruction cannot be
461 * decoded (e.g., text page not present), then X86_BR_NONE is
462 * returned.
463 */
464 static int branch_type(unsigned long from, unsigned long to, int abort)
465 {
466 struct insn insn;
467 void *addr;
468 int bytes, size = MAX_INSN_SIZE;
469 int ret = X86_BR_NONE;
470 int ext, to_plm, from_plm;
471 u8 buf[MAX_INSN_SIZE];
472 int is64 = 0;
473
474 to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
475 from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER;
476
477 /*
478 * maybe zero if lbr did not fill up after a reset by the time
479 * we get a PMU interrupt
480 */
481 if (from == 0 || to == 0)
482 return X86_BR_NONE;
483
484 if (abort)
485 return X86_BR_ABORT | to_plm;
486
487 if (from_plm == X86_BR_USER) {
488 /*
489 * can happen if measuring at the user level only
490 * and we interrupt in a kernel thread, e.g., idle.
491 */
492 if (!current->mm)
493 return X86_BR_NONE;
494
495 /* may fail if text not present */
496 bytes = copy_from_user_nmi(buf, (void __user *)from, size);
497 if (bytes != 0)
498 return X86_BR_NONE;
499
500 addr = buf;
501 } else {
502 /*
503 * The LBR logs any address in the IP, even if the IP just
504 * faulted. This means userspace can control the from address.
505 * Ensure we don't blindy read any address by validating it is
506 * a known text address.
507 */
508 if (kernel_text_address(from))
509 addr = (void *)from;
510 else
511 return X86_BR_NONE;
512 }
513
514 /*
515 * decoder needs to know the ABI especially
516 * on 64-bit systems running 32-bit apps
517 */
518 #ifdef CONFIG_X86_64
519 is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32);
520 #endif
521 insn_init(&insn, addr, is64);
522 insn_get_opcode(&insn);
523
524 switch (insn.opcode.bytes[0]) {
525 case 0xf:
526 switch (insn.opcode.bytes[1]) {
527 case 0x05: /* syscall */
528 case 0x34: /* sysenter */
529 ret = X86_BR_SYSCALL;
530 break;
531 case 0x07: /* sysret */
532 case 0x35: /* sysexit */
533 ret = X86_BR_SYSRET;
534 break;
535 case 0x80 ... 0x8f: /* conditional */
536 ret = X86_BR_JCC;
537 break;
538 default:
539 ret = X86_BR_NONE;
540 }
541 break;
542 case 0x70 ... 0x7f: /* conditional */
543 ret = X86_BR_JCC;
544 break;
545 case 0xc2: /* near ret */
546 case 0xc3: /* near ret */
547 case 0xca: /* far ret */
548 case 0xcb: /* far ret */
549 ret = X86_BR_RET;
550 break;
551 case 0xcf: /* iret */
552 ret = X86_BR_IRET;
553 break;
554 case 0xcc ... 0xce: /* int */
555 ret = X86_BR_INT;
556 break;
557 case 0xe8: /* call near rel */
558 case 0x9a: /* call far absolute */
559 ret = X86_BR_CALL;
560 break;
561 case 0xe0 ... 0xe3: /* loop jmp */
562 ret = X86_BR_JCC;
563 break;
564 case 0xe9 ... 0xeb: /* jmp */
565 ret = X86_BR_JMP;
566 break;
567 case 0xff: /* call near absolute, call far absolute ind */
568 insn_get_modrm(&insn);
569 ext = (insn.modrm.bytes[0] >> 3) & 0x7;
570 switch (ext) {
571 case 2: /* near ind call */
572 case 3: /* far ind call */
573 ret = X86_BR_IND_CALL;
574 break;
575 case 4:
576 case 5:
577 ret = X86_BR_JMP;
578 break;
579 }
580 break;
581 default:
582 ret = X86_BR_NONE;
583 }
584 /*
585 * interrupts, traps, faults (and thus ring transition) may
586 * occur on any instructions. Thus, to classify them correctly,
587 * we need to first look at the from and to priv levels. If they
588 * are different and to is in the kernel, then it indicates
589 * a ring transition. If the from instruction is not a ring
590 * transition instr (syscall, systenter, int), then it means
591 * it was a irq, trap or fault.
592 *
593 * we have no way of detecting kernel to kernel faults.
594 */
595 if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL
596 && ret != X86_BR_SYSCALL && ret != X86_BR_INT)
597 ret = X86_BR_IRQ;
598
599 /*
600 * branch priv level determined by target as
601 * is done by HW when LBR_SELECT is implemented
602 */
603 if (ret != X86_BR_NONE)
604 ret |= to_plm;
605
606 return ret;
607 }
608
609 /*
610 * implement actual branch filter based on user demand.
611 * Hardware may not exactly satisfy that request, thus
612 * we need to inspect opcodes. Mismatched branches are
613 * discarded. Therefore, the number of branches returned
614 * in PERF_SAMPLE_BRANCH_STACK sample may vary.
615 */
616 static void
617 intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
618 {
619 u64 from, to;
620 int br_sel = cpuc->br_sel;
621 int i, j, type;
622 bool compress = false;
623
624 /* if sampling all branches, then nothing to filter */
625 if ((br_sel & X86_BR_ALL) == X86_BR_ALL)
626 return;
627
628 for (i = 0; i < cpuc->lbr_stack.nr; i++) {
629
630 from = cpuc->lbr_entries[i].from;
631 to = cpuc->lbr_entries[i].to;
632
633 type = branch_type(from, to, cpuc->lbr_entries[i].abort);
634 if (type != X86_BR_NONE && (br_sel & X86_BR_ANYTX)) {
635 if (cpuc->lbr_entries[i].in_tx)
636 type |= X86_BR_IN_TX;
637 else
638 type |= X86_BR_NO_TX;
639 }
640
641 /* if type does not correspond, then discard */
642 if (type == X86_BR_NONE || (br_sel & type) != type) {
643 cpuc->lbr_entries[i].from = 0;
644 compress = true;
645 }
646 }
647
648 if (!compress)
649 return;
650
651 /* remove all entries with from=0 */
652 for (i = 0; i < cpuc->lbr_stack.nr; ) {
653 if (!cpuc->lbr_entries[i].from) {
654 j = i;
655 while (++j < cpuc->lbr_stack.nr)
656 cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j];
657 cpuc->lbr_stack.nr--;
658 if (!cpuc->lbr_entries[i].from)
659 continue;
660 }
661 i++;
662 }
663 }
664
665 /*
666 * Map interface branch filters onto LBR filters
667 */
668 static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = {
669 [PERF_SAMPLE_BRANCH_ANY] = LBR_ANY,
670 [PERF_SAMPLE_BRANCH_USER] = LBR_USER,
671 [PERF_SAMPLE_BRANCH_KERNEL] = LBR_KERNEL,
672 [PERF_SAMPLE_BRANCH_HV] = LBR_IGN,
673 [PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_REL_JMP
674 | LBR_IND_JMP | LBR_FAR,
675 /*
676 * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
677 */
678 [PERF_SAMPLE_BRANCH_ANY_CALL] =
679 LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
680 /*
681 * NHM/WSM erratum: must include IND_JMP to capture IND_CALL
682 */
683 [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL | LBR_IND_JMP,
684 [PERF_SAMPLE_BRANCH_COND] = LBR_JCC,
685 };
686
687 static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = {
688 [PERF_SAMPLE_BRANCH_ANY] = LBR_ANY,
689 [PERF_SAMPLE_BRANCH_USER] = LBR_USER,
690 [PERF_SAMPLE_BRANCH_KERNEL] = LBR_KERNEL,
691 [PERF_SAMPLE_BRANCH_HV] = LBR_IGN,
692 [PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_FAR,
693 [PERF_SAMPLE_BRANCH_ANY_CALL] = LBR_REL_CALL | LBR_IND_CALL
694 | LBR_FAR,
695 [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL,
696 [PERF_SAMPLE_BRANCH_COND] = LBR_JCC,
697 };
698
699 /* core */
700 void __init intel_pmu_lbr_init_core(void)
701 {
702 x86_pmu.lbr_nr = 4;
703 x86_pmu.lbr_tos = MSR_LBR_TOS;
704 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
705 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
706
707 /*
708 * SW branch filter usage:
709 * - compensate for lack of HW filter
710 */
711 pr_cont("4-deep LBR, ");
712 }
713
714 /* nehalem/westmere */
715 void __init intel_pmu_lbr_init_nhm(void)
716 {
717 x86_pmu.lbr_nr = 16;
718 x86_pmu.lbr_tos = MSR_LBR_TOS;
719 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
720 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
721
722 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
723 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
724
725 /*
726 * SW branch filter usage:
727 * - workaround LBR_SEL errata (see above)
728 * - support syscall, sysret capture.
729 * That requires LBR_FAR but that means far
730 * jmp need to be filtered out
731 */
732 pr_cont("16-deep LBR, ");
733 }
734
735 /* sandy bridge */
736 void __init intel_pmu_lbr_init_snb(void)
737 {
738 x86_pmu.lbr_nr = 16;
739 x86_pmu.lbr_tos = MSR_LBR_TOS;
740 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
741 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
742
743 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
744 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
745
746 /*
747 * SW branch filter usage:
748 * - support syscall, sysret capture.
749 * That requires LBR_FAR but that means far
750 * jmp need to be filtered out
751 */
752 pr_cont("16-deep LBR, ");
753 }
754
755 /* atom */
756 void __init intel_pmu_lbr_init_atom(void)
757 {
758 /*
759 * only models starting at stepping 10 seems
760 * to have an operational LBR which can freeze
761 * on PMU interrupt
762 */
763 if (boot_cpu_data.x86_model == 28
764 && boot_cpu_data.x86_mask < 10) {
765 pr_cont("LBR disabled due to erratum");
766 return;
767 }
768
769 x86_pmu.lbr_nr = 8;
770 x86_pmu.lbr_tos = MSR_LBR_TOS;
771 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
772 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
773
774 /*
775 * SW branch filter usage:
776 * - compensate for lack of HW filter
777 */
778 pr_cont("8-deep LBR, ");
779 }