]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/events/intel/lbr.c
Merge remote-tracking branches 'asoc/fix/dpcm', 'asoc/fix/imx', 'asoc/fix/msm8916...
[mirror_ubuntu-artful-kernel.git] / arch / x86 / events / intel / lbr.c
CommitLineData
de0428a7
KW
1#include <linux/perf_event.h>
2#include <linux/types.h>
3
4#include <asm/perf_event.h>
5#include <asm/msr.h>
3e702ff6 6#include <asm/insn.h>
de0428a7 7
27f6d22b 8#include "../perf_event.h"
caff2bef
PZ
9
10enum {
11 LBR_FORMAT_32 = 0x00,
12 LBR_FORMAT_LIP = 0x01,
13 LBR_FORMAT_EIP = 0x02,
14 LBR_FORMAT_EIP_FLAGS = 0x03,
135c5612 15 LBR_FORMAT_EIP_FLAGS2 = 0x04,
50eab8f6 16 LBR_FORMAT_INFO = 0x05,
8b92c3a7
KL
17 LBR_FORMAT_TIME = 0x06,
18 LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_TIME,
135c5612
AK
19};
20
e91c8d97 21static const enum {
135c5612
AK
22 LBR_EIP_FLAGS = 1,
23 LBR_TSX = 2,
24} lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = {
25 [LBR_FORMAT_EIP_FLAGS] = LBR_EIP_FLAGS,
26 [LBR_FORMAT_EIP_FLAGS2] = LBR_EIP_FLAGS | LBR_TSX,
caff2bef
PZ
27};
28
c5cc2cd9
SE
29/*
30 * Intel LBR_SELECT bits
31 * Intel Vol3a, April 2011, Section 16.7 Table 16-10
32 *
33 * Hardware branch filter (not available on all CPUs)
34 */
35#define LBR_KERNEL_BIT 0 /* do not capture at ring0 */
36#define LBR_USER_BIT 1 /* do not capture at ring > 0 */
37#define LBR_JCC_BIT 2 /* do not capture conditional branches */
38#define LBR_REL_CALL_BIT 3 /* do not capture relative calls */
39#define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */
40#define LBR_RETURN_BIT 5 /* do not capture near returns */
41#define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */
42#define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */
43#define LBR_FAR_BIT 8 /* do not capture far branches */
e9d7f7cd 44#define LBR_CALL_STACK_BIT 9 /* enable call stack */
c5cc2cd9 45
b16a5b52
AK
46/*
47 * Following bit only exists in Linux; we mask it out before writing it to
48 * the actual MSR. But it helps the constraint perf code to understand
49 * that this is a separate configuration.
50 */
51#define LBR_NO_INFO_BIT 63 /* don't read LBR_INFO. */
52
c5cc2cd9
SE
53#define LBR_KERNEL (1 << LBR_KERNEL_BIT)
54#define LBR_USER (1 << LBR_USER_BIT)
55#define LBR_JCC (1 << LBR_JCC_BIT)
56#define LBR_REL_CALL (1 << LBR_REL_CALL_BIT)
57#define LBR_IND_CALL (1 << LBR_IND_CALL_BIT)
58#define LBR_RETURN (1 << LBR_RETURN_BIT)
59#define LBR_REL_JMP (1 << LBR_REL_JMP_BIT)
60#define LBR_IND_JMP (1 << LBR_IND_JMP_BIT)
61#define LBR_FAR (1 << LBR_FAR_BIT)
e9d7f7cd 62#define LBR_CALL_STACK (1 << LBR_CALL_STACK_BIT)
b16a5b52 63#define LBR_NO_INFO (1ULL << LBR_NO_INFO_BIT)
c5cc2cd9
SE
64
65#define LBR_PLM (LBR_KERNEL | LBR_USER)
66
cf3beb7c 67#define LBR_SEL_MASK 0x3ff /* valid bits in LBR_SELECT */
c5cc2cd9
SE
68#define LBR_NOT_SUPP -1 /* LBR filter not supported */
69#define LBR_IGN 0 /* ignored */
70
71#define LBR_ANY \
72 (LBR_JCC |\
73 LBR_REL_CALL |\
74 LBR_IND_CALL |\
75 LBR_RETURN |\
76 LBR_REL_JMP |\
77 LBR_IND_JMP |\
78 LBR_FAR)
79
3812bba8
DCC
80#define LBR_FROM_FLAG_MISPRED BIT_ULL(63)
81#define LBR_FROM_FLAG_IN_TX BIT_ULL(62)
82#define LBR_FROM_FLAG_ABORT BIT_ULL(61)
c5cc2cd9 83
19fc9ddd
DCC
84#define LBR_FROM_SIGNEXT_2MSB (BIT_ULL(60) | BIT_ULL(59))
85
3e702ff6
SE
86/*
87 * x86control flow change classification
88 * x86control flow changes include branches, interrupts, traps, faults
89 */
90enum {
e9d7f7cd
YZ
91 X86_BR_NONE = 0, /* unknown */
92
93 X86_BR_USER = 1 << 0, /* branch target is user */
94 X86_BR_KERNEL = 1 << 1, /* branch target is kernel */
95
96 X86_BR_CALL = 1 << 2, /* call */
97 X86_BR_RET = 1 << 3, /* return */
98 X86_BR_SYSCALL = 1 << 4, /* syscall */
99 X86_BR_SYSRET = 1 << 5, /* syscall return */
100 X86_BR_INT = 1 << 6, /* sw interrupt */
101 X86_BR_IRET = 1 << 7, /* return from interrupt */
102 X86_BR_JCC = 1 << 8, /* conditional */
103 X86_BR_JMP = 1 << 9, /* jump */
104 X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */
105 X86_BR_IND_CALL = 1 << 11,/* indirect calls */
106 X86_BR_ABORT = 1 << 12,/* transaction abort */
107 X86_BR_IN_TX = 1 << 13,/* in transaction */
108 X86_BR_NO_TX = 1 << 14,/* not in transaction */
aa54ae9b
YZ
109 X86_BR_ZERO_CALL = 1 << 15,/* zero length call */
110 X86_BR_CALL_STACK = 1 << 16,/* call stack */
7b74cfb2 111 X86_BR_IND_JMP = 1 << 17,/* indirect jump */
3e702ff6
SE
112};
113
114#define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
135c5612 115#define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX)
3e702ff6
SE
116
117#define X86_BR_ANY \
118 (X86_BR_CALL |\
119 X86_BR_RET |\
120 X86_BR_SYSCALL |\
121 X86_BR_SYSRET |\
122 X86_BR_INT |\
123 X86_BR_IRET |\
124 X86_BR_JCC |\
125 X86_BR_JMP |\
126 X86_BR_IRQ |\
135c5612 127 X86_BR_ABORT |\
aa54ae9b 128 X86_BR_IND_CALL |\
7b74cfb2 129 X86_BR_IND_JMP |\
aa54ae9b 130 X86_BR_ZERO_CALL)
3e702ff6
SE
131
132#define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
133
134#define X86_BR_ANY_CALL \
135 (X86_BR_CALL |\
136 X86_BR_IND_CALL |\
aa54ae9b 137 X86_BR_ZERO_CALL |\
3e702ff6
SE
138 X86_BR_SYSCALL |\
139 X86_BR_IRQ |\
140 X86_BR_INT)
141
142static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
143
caff2bef
PZ
144/*
145 * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
146 * otherwise it becomes near impossible to get a reliable stack.
147 */
148
1a78d937 149static void __intel_pmu_lbr_enable(bool pmi)
caff2bef 150{
89cbc767 151 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cd1f11de 152 u64 debugctl, lbr_select = 0, orig_debugctl;
60ce0fbd 153
425507fa
AK
154 /*
155 * No need to unfreeze manually, as v4 can do that as part
156 * of the GLOBAL_STATUS ack.
157 */
158 if (pmi && x86_pmu.version >= 4)
159 return;
160
1a78d937
AK
161 /*
162 * No need to reprogram LBR_SELECT in a PMI, as it
163 * did not change.
164 */
96f3eda6 165 if (cpuc->lbr_sel)
b16a5b52 166 lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask;
6fc2e830 167 if (!pmi && cpuc->lbr_sel)
2c70d008 168 wrmsrl(MSR_LBR_SELECT, lbr_select);
caff2bef
PZ
169
170 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
cd1f11de 171 orig_debugctl = debugctl;
2c70d008
YZ
172 debugctl |= DEBUGCTLMSR_LBR;
173 /*
174 * LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
175 * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
176 * may cause superfluous increase/decrease of LBR_TOS.
177 */
178 if (!(lbr_select & LBR_CALL_STACK))
179 debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
cd1f11de
AK
180 if (orig_debugctl != debugctl)
181 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
caff2bef
PZ
182}
183
184static void __intel_pmu_lbr_disable(void)
185{
186 u64 debugctl;
187
188 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
7c5ecaf7 189 debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
caff2bef
PZ
190 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
191}
192
193static void intel_pmu_lbr_reset_32(void)
194{
195 int i;
196
197 for (i = 0; i < x86_pmu.lbr_nr; i++)
198 wrmsrl(x86_pmu.lbr_from + i, 0);
199}
200
201static void intel_pmu_lbr_reset_64(void)
202{
203 int i;
204
205 for (i = 0; i < x86_pmu.lbr_nr; i++) {
206 wrmsrl(x86_pmu.lbr_from + i, 0);
207 wrmsrl(x86_pmu.lbr_to + i, 0);
50eab8f6
AK
208 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
209 wrmsrl(MSR_LBR_INFO_0 + i, 0);
caff2bef
PZ
210 }
211}
212
de0428a7 213void intel_pmu_lbr_reset(void)
caff2bef 214{
74846d35
PZ
215 if (!x86_pmu.lbr_nr)
216 return;
217
8db909a7 218 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
caff2bef
PZ
219 intel_pmu_lbr_reset_32();
220 else
221 intel_pmu_lbr_reset_64();
222}
223
76cb2c61
YZ
224/*
225 * TOS = most recently recorded branch
226 */
227static inline u64 intel_pmu_lbr_tos(void)
228{
229 u64 tos;
230
231 rdmsrl(x86_pmu.lbr_tos, tos);
232 return tos;
233}
234
235enum {
236 LBR_NONE,
237 LBR_VALID,
238};
239
19fc9ddd
DCC
240/*
241 * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
242 * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
243 * TSX is not supported they have no consistent behavior:
244 *
245 * - For wrmsr(), bits 61:62 are considered part of the sign extension.
246 * - For HW updates (branch captures) bits 61:62 are always OFF and are not
247 * part of the sign extension.
248 *
249 * Therefore, if:
250 *
251 * 1) LBR has TSX format
252 * 2) CPU has no TSX support enabled
253 *
254 * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
255 * value from rdmsr() must be converted to have a 61 bits sign extension,
256 * ignoring the TSX flags.
257 */
258static inline bool lbr_from_signext_quirk_needed(void)
259{
260 int lbr_format = x86_pmu.intel_cap.lbr_format;
261 bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
262 boot_cpu_has(X86_FEATURE_RTM);
263
264 return !tsx_support && (lbr_desc[lbr_format] & LBR_TSX);
265}
266
267DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
268
269/* If quirk is enabled, ensure sign extension is 63 bits: */
270inline u64 lbr_from_signext_quirk_wr(u64 val)
271{
272 if (static_branch_unlikely(&lbr_from_quirk_key)) {
273 /*
274 * Sign extend into bits 61:62 while preserving bit 63.
275 *
276 * Quirk is enabled when TSX is disabled. Therefore TSX bits
277 * in val are always OFF and must be changed to be sign
278 * extension bits. Since bits 59:60 are guaranteed to be
279 * part of the sign extension bits, we can just copy them
280 * to 61:62.
281 */
282 val |= (LBR_FROM_SIGNEXT_2MSB & val) << 2;
283 }
284 return val;
285}
286
71adae99
DCC
287/*
288 * If quirk is needed, ensure sign extension is 61 bits:
289 */
e91c8d97 290static u64 lbr_from_signext_quirk_rd(u64 val)
71adae99 291{
d4cf1949 292 if (static_branch_unlikely(&lbr_from_quirk_key)) {
71adae99
DCC
293 /*
294 * Quirk is on when TSX is not enabled. Therefore TSX
295 * flags must be read as OFF.
296 */
297 val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
d4cf1949
PZ
298 }
299 return val;
300}
301
302static inline void wrlbr_from(unsigned int idx, u64 val)
303{
304 val = lbr_from_signext_quirk_wr(val);
305 wrmsrl(x86_pmu.lbr_from + idx, val);
306}
307
308static inline void wrlbr_to(unsigned int idx, u64 val)
309{
310 wrmsrl(x86_pmu.lbr_to + idx, val);
311}
312
313static inline u64 rdlbr_from(unsigned int idx)
314{
315 u64 val;
316
317 rdmsrl(x86_pmu.lbr_from + idx, val);
318
319 return lbr_from_signext_quirk_rd(val);
320}
321
322static inline u64 rdlbr_to(unsigned int idx)
323{
324 u64 val;
325
aefbc4d0 326 rdmsrl(x86_pmu.lbr_to + idx, val);
d4cf1949 327
71adae99
DCC
328 return val;
329}
330
76cb2c61
YZ
331static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
332{
333 int i;
334 unsigned lbr_idx, mask;
335 u64 tos;
336
337 if (task_ctx->lbr_callstack_users == 0 ||
338 task_ctx->lbr_stack_state == LBR_NONE) {
339 intel_pmu_lbr_reset();
340 return;
341 }
342
343 mask = x86_pmu.lbr_nr - 1;
b28ae956 344 tos = task_ctx->tos;
90405aa0 345 for (i = 0; i < tos; i++) {
76cb2c61 346 lbr_idx = (tos - i) & mask;
d4cf1949
PZ
347 wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
348 wrlbr_to (lbr_idx, task_ctx->lbr_to[i]);
349
50eab8f6 350 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
e0573364 351 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
76cb2c61 352 }
b28ae956 353 wrmsrl(x86_pmu.lbr_tos, tos);
76cb2c61
YZ
354 task_ctx->lbr_stack_state = LBR_NONE;
355}
356
357static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
358{
76cb2c61 359 unsigned lbr_idx, mask;
d4cf1949
PZ
360 u64 tos;
361 int i;
76cb2c61
YZ
362
363 if (task_ctx->lbr_callstack_users == 0) {
364 task_ctx->lbr_stack_state = LBR_NONE;
365 return;
366 }
367
368 mask = x86_pmu.lbr_nr - 1;
369 tos = intel_pmu_lbr_tos();
90405aa0 370 for (i = 0; i < tos; i++) {
76cb2c61 371 lbr_idx = (tos - i) & mask;
d4cf1949
PZ
372 task_ctx->lbr_from[i] = rdlbr_from(lbr_idx);
373 task_ctx->lbr_to[i] = rdlbr_to(lbr_idx);
50eab8f6 374 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
e0573364 375 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
76cb2c61 376 }
b28ae956 377 task_ctx->tos = tos;
76cb2c61
YZ
378 task_ctx->lbr_stack_state = LBR_VALID;
379}
380
2a0ad3b3
YZ
381void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
382{
df6c3db8 383 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
76cb2c61 384 struct x86_perf_task_context *task_ctx;
2a0ad3b3 385
df6c3db8
JO
386 if (!cpuc->lbr_users)
387 return;
388
76cb2c61
YZ
389 /*
390 * If LBR callstack feature is enabled and the stack was saved when
391 * the task was scheduled out, restore the stack. Otherwise flush
392 * the LBR stack.
393 */
394 task_ctx = ctx ? ctx->task_ctx_data : NULL;
395 if (task_ctx) {
3e2c1a67 396 if (sched_in)
76cb2c61 397 __intel_pmu_lbr_restore(task_ctx);
3e2c1a67 398 else
76cb2c61 399 __intel_pmu_lbr_save(task_ctx);
76cb2c61
YZ
400 return;
401 }
402
2a0ad3b3 403 /*
3e2c1a67
PZ
404 * Since a context switch can flip the address space and LBR entries
405 * are not tagged with an identifier, we need to wipe the LBR, even for
406 * per-cpu events. You simply cannot resolve the branches from the old
407 * address space.
408 */
409 if (sched_in)
2a0ad3b3 410 intel_pmu_lbr_reset();
2a0ad3b3
YZ
411}
412
63f0c1d8
YZ
413static inline bool branch_user_callstack(unsigned br_sel)
414{
415 return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK);
416}
417
68f7082f 418void intel_pmu_lbr_add(struct perf_event *event)
caff2bef 419{
89cbc767 420 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
63f0c1d8 421 struct x86_perf_task_context *task_ctx;
caff2bef
PZ
422
423 if (!x86_pmu.lbr_nr)
424 return;
425
3e702ff6 426 cpuc->br_sel = event->hw.branch_reg.reg;
caff2bef 427
a5dcff62 428 if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data) {
63f0c1d8
YZ
429 task_ctx = event->ctx->task_ctx_data;
430 task_ctx->lbr_callstack_users++;
431 }
432
3e2c1a67
PZ
433 /*
434 * Request pmu::sched_task() callback, which will fire inside the
435 * regular perf event scheduling, so that call will:
436 *
437 * - restore or wipe; when LBR-callstack,
438 * - wipe; otherwise,
439 *
440 * when this is from __perf_event_task_sched_in().
441 *
442 * However, if this is from perf_install_in_context(), no such callback
443 * will follow and we'll need to reset the LBR here if this is the
444 * first LBR event.
445 *
446 * The problem is, we cannot tell these cases apart... but we can
447 * exclude the biggest chunk of cases by looking at
448 * event->total_time_running. An event that has accrued runtime cannot
449 * be 'new'. Conversely, a new event can get installed through the
450 * context switch path for the first time.
451 */
2a0ad3b3 452 perf_sched_cb_inc(event->ctx->pmu);
3e2c1a67
PZ
453 if (!cpuc->lbr_users++ && !event->total_time_running)
454 intel_pmu_lbr_reset();
caff2bef
PZ
455}
456
68f7082f 457void intel_pmu_lbr_del(struct perf_event *event)
caff2bef 458{
89cbc767 459 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
63f0c1d8 460 struct x86_perf_task_context *task_ctx;
caff2bef
PZ
461
462 if (!x86_pmu.lbr_nr)
463 return;
464
5c38181c
DC
465 if (branch_user_callstack(cpuc->br_sel) &&
466 event->ctx->task_ctx_data) {
63f0c1d8
YZ
467 task_ctx = event->ctx->task_ctx_data;
468 task_ctx->lbr_callstack_users--;
469 }
470
caff2bef 471 cpuc->lbr_users--;
b83a46e7 472 WARN_ON_ONCE(cpuc->lbr_users < 0);
2a0ad3b3 473 perf_sched_cb_dec(event->ctx->pmu);
caff2bef
PZ
474}
475
1a78d937 476void intel_pmu_lbr_enable_all(bool pmi)
caff2bef 477{
89cbc767 478 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
caff2bef
PZ
479
480 if (cpuc->lbr_users)
1a78d937 481 __intel_pmu_lbr_enable(pmi);
caff2bef
PZ
482}
483
de0428a7 484void intel_pmu_lbr_disable_all(void)
caff2bef 485{
89cbc767 486 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
caff2bef
PZ
487
488 if (cpuc->lbr_users)
489 __intel_pmu_lbr_disable();
490}
491
caff2bef
PZ
492static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
493{
494 unsigned long mask = x86_pmu.lbr_nr - 1;
495 u64 tos = intel_pmu_lbr_tos();
496 int i;
497
63fb3f9b 498 for (i = 0; i < x86_pmu.lbr_nr; i++) {
caff2bef
PZ
499 unsigned long lbr_idx = (tos - i) & mask;
500 union {
501 struct {
502 u32 from;
503 u32 to;
504 };
505 u64 lbr;
506 } msr_lastbranch;
507
508 rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
509
bce38cd5
SE
510 cpuc->lbr_entries[i].from = msr_lastbranch.from;
511 cpuc->lbr_entries[i].to = msr_lastbranch.to;
512 cpuc->lbr_entries[i].mispred = 0;
513 cpuc->lbr_entries[i].predicted = 0;
f2200ac3
PZ
514 cpuc->lbr_entries[i].in_tx = 0;
515 cpuc->lbr_entries[i].abort = 0;
516 cpuc->lbr_entries[i].cycles = 0;
bce38cd5 517 cpuc->lbr_entries[i].reserved = 0;
caff2bef
PZ
518 }
519 cpuc->lbr_stack.nr = i;
520}
521
caff2bef
PZ
522/*
523 * Due to lack of segmentation in Linux the effective address (offset)
524 * is the same as the linear address, allowing us to merge the LIP and EIP
525 * LBR formats.
526 */
527static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
528{
6fc2e830 529 bool need_info = false;
caff2bef 530 unsigned long mask = x86_pmu.lbr_nr - 1;
8db909a7 531 int lbr_format = x86_pmu.intel_cap.lbr_format;
caff2bef
PZ
532 u64 tos = intel_pmu_lbr_tos();
533 int i;
b7af41a1 534 int out = 0;
90405aa0 535 int num = x86_pmu.lbr_nr;
caff2bef 536
6fc2e830
SE
537 if (cpuc->lbr_sel) {
538 need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
539 if (cpuc->lbr_sel->config & LBR_CALL_STACK)
540 num = tos;
541 }
90405aa0
AK
542
543 for (i = 0; i < num; i++) {
caff2bef 544 unsigned long lbr_idx = (tos - i) & mask;
135c5612
AK
545 u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0;
546 int skip = 0;
50eab8f6 547 u16 cycles = 0;
135c5612 548 int lbr_flags = lbr_desc[lbr_format];
caff2bef 549
d4cf1949
PZ
550 from = rdlbr_from(lbr_idx);
551 to = rdlbr_to(lbr_idx);
caff2bef 552
b16a5b52 553 if (lbr_format == LBR_FORMAT_INFO && need_info) {
50eab8f6
AK
554 u64 info;
555
556 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, info);
557 mis = !!(info & LBR_INFO_MISPRED);
558 pred = !mis;
559 in_tx = !!(info & LBR_INFO_IN_TX);
560 abort = !!(info & LBR_INFO_ABORT);
561 cycles = (info & LBR_INFO_CYCLES);
562 }
8b92c3a7
KL
563
564 if (lbr_format == LBR_FORMAT_TIME) {
565 mis = !!(from & LBR_FROM_FLAG_MISPRED);
566 pred = !mis;
567 skip = 1;
568 cycles = ((to >> 48) & LBR_INFO_CYCLES);
569
570 to = (u64)((((s64)to) << 16) >> 16);
571 }
572
135c5612 573 if (lbr_flags & LBR_EIP_FLAGS) {
bce38cd5
SE
574 mis = !!(from & LBR_FROM_FLAG_MISPRED);
575 pred = !mis;
135c5612
AK
576 skip = 1;
577 }
578 if (lbr_flags & LBR_TSX) {
579 in_tx = !!(from & LBR_FROM_FLAG_IN_TX);
580 abort = !!(from & LBR_FROM_FLAG_ABORT);
581 skip = 3;
caff2bef 582 }
135c5612 583 from = (u64)((((s64)from) << skip) >> skip);
caff2bef 584
b7af41a1
AK
585 /*
586 * Some CPUs report duplicated abort records,
587 * with the second entry not having an abort bit set.
588 * Skip them here. This loop runs backwards,
589 * so we need to undo the previous record.
590 * If the abort just happened outside the window
591 * the extra entry cannot be removed.
592 */
593 if (abort && x86_pmu.lbr_double_abort && out > 0)
594 out--;
595
596 cpuc->lbr_entries[out].from = from;
597 cpuc->lbr_entries[out].to = to;
598 cpuc->lbr_entries[out].mispred = mis;
599 cpuc->lbr_entries[out].predicted = pred;
600 cpuc->lbr_entries[out].in_tx = in_tx;
601 cpuc->lbr_entries[out].abort = abort;
50eab8f6 602 cpuc->lbr_entries[out].cycles = cycles;
b7af41a1
AK
603 cpuc->lbr_entries[out].reserved = 0;
604 out++;
caff2bef 605 }
b7af41a1 606 cpuc->lbr_stack.nr = out;
caff2bef
PZ
607}
608
de0428a7 609void intel_pmu_lbr_read(void)
caff2bef 610{
89cbc767 611 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
caff2bef
PZ
612
613 if (!cpuc->lbr_users)
614 return;
615
8db909a7 616 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
caff2bef
PZ
617 intel_pmu_lbr_read_32(cpuc);
618 else
619 intel_pmu_lbr_read_64(cpuc);
3e702ff6
SE
620
621 intel_pmu_lbr_filter(cpuc);
622}
623
624/*
625 * SW filter is used:
626 * - in case there is no HW filter
627 * - in case the HW filter has errata or limitations
628 */
e9d7f7cd 629static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
3e702ff6
SE
630{
631 u64 br_type = event->attr.branch_sample_type;
632 int mask = 0;
633
634 if (br_type & PERF_SAMPLE_BRANCH_USER)
635 mask |= X86_BR_USER;
636
2b923c8f 637 if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
3e702ff6
SE
638 mask |= X86_BR_KERNEL;
639
640 /* we ignore BRANCH_HV here */
641
642 if (br_type & PERF_SAMPLE_BRANCH_ANY)
643 mask |= X86_BR_ANY;
644
645 if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL)
646 mask |= X86_BR_ANY_CALL;
647
648 if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
649 mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET;
650
651 if (br_type & PERF_SAMPLE_BRANCH_IND_CALL)
652 mask |= X86_BR_IND_CALL;
135c5612
AK
653
654 if (br_type & PERF_SAMPLE_BRANCH_ABORT_TX)
655 mask |= X86_BR_ABORT;
656
657 if (br_type & PERF_SAMPLE_BRANCH_IN_TX)
658 mask |= X86_BR_IN_TX;
659
660 if (br_type & PERF_SAMPLE_BRANCH_NO_TX)
661 mask |= X86_BR_NO_TX;
662
37548914
AK
663 if (br_type & PERF_SAMPLE_BRANCH_COND)
664 mask |= X86_BR_JCC;
665
e9d7f7cd
YZ
666 if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) {
667 if (!x86_pmu_has_lbr_callstack())
668 return -EOPNOTSUPP;
669 if (mask & ~(X86_BR_USER | X86_BR_KERNEL))
670 return -EINVAL;
671 mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET |
672 X86_BR_CALL_STACK;
673 }
674
7b74cfb2
SE
675 if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
676 mask |= X86_BR_IND_JMP;
677
d892819f
SE
678 if (br_type & PERF_SAMPLE_BRANCH_CALL)
679 mask |= X86_BR_CALL | X86_BR_ZERO_CALL;
3e702ff6
SE
680 /*
681 * stash actual user request into reg, it may
682 * be used by fixup code for some CPU
683 */
684 event->hw.branch_reg.reg = mask;
e9d7f7cd 685 return 0;
caff2bef
PZ
686}
687
60ce0fbd
SE
688/*
689 * setup the HW LBR filter
690 * Used only when available, may not be enough to disambiguate
691 * all branches, may need the help of the SW filter
692 */
693static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
694{
695 struct hw_perf_event_extra *reg;
696 u64 br_type = event->attr.branch_sample_type;
27ac905b
YZ
697 u64 mask = 0, v;
698 int i;
60ce0fbd 699
2c44b193 700 for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
27ac905b 701 if (!(br_type & (1ULL << i)))
60ce0fbd
SE
702 continue;
703
27ac905b 704 v = x86_pmu.lbr_sel_map[i];
60ce0fbd
SE
705 if (v == LBR_NOT_SUPP)
706 return -EOPNOTSUPP;
60ce0fbd 707
3e702ff6
SE
708 if (v != LBR_IGN)
709 mask |= v;
60ce0fbd 710 }
b16a5b52 711
60ce0fbd
SE
712 reg = &event->hw.branch_reg;
713 reg->idx = EXTRA_REG_LBR;
714
e9d7f7cd
YZ
715 /*
716 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
717 * in suppress mode. So LBR_SELECT should be set to
718 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
cf3beb7c
KL
719 * But the 10th bit LBR_CALL_STACK does not operate
720 * in suppress mode.
e9d7f7cd 721 */
cf3beb7c 722 reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
60ce0fbd 723
b16a5b52
AK
724 if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
725 (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
726 (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO))
727 reg->config |= LBR_NO_INFO;
728
60ce0fbd
SE
729 return 0;
730}
731
60ce0fbd
SE
732int intel_pmu_setup_lbr_filter(struct perf_event *event)
733{
3e702ff6 734 int ret = 0;
60ce0fbd
SE
735
736 /*
737 * no LBR on this PMU
738 */
739 if (!x86_pmu.lbr_nr)
740 return -EOPNOTSUPP;
741
742 /*
3e702ff6 743 * setup SW LBR filter
60ce0fbd 744 */
e9d7f7cd
YZ
745 ret = intel_pmu_setup_sw_lbr_filter(event);
746 if (ret)
747 return ret;
3e702ff6
SE
748
749 /*
750 * setup HW LBR filter, if any
751 */
752 if (x86_pmu.lbr_sel_map)
753 ret = intel_pmu_setup_hw_lbr_filter(event);
754
755 return ret;
756}
757
758/*
759 * return the type of control flow change at address "from"
6a6256f9 760 * instruction is not necessarily a branch (in case of interrupt).
3e702ff6
SE
761 *
762 * The branch type returned also includes the priv level of the
763 * target of the control flow change (X86_BR_USER, X86_BR_KERNEL).
764 *
765 * If a branch type is unknown OR the instruction cannot be
766 * decoded (e.g., text page not present), then X86_BR_NONE is
767 * returned.
768 */
135c5612 769static int branch_type(unsigned long from, unsigned long to, int abort)
3e702ff6
SE
770{
771 struct insn insn;
772 void *addr;
6ba48ff4 773 int bytes_read, bytes_left;
3e702ff6
SE
774 int ret = X86_BR_NONE;
775 int ext, to_plm, from_plm;
776 u8 buf[MAX_INSN_SIZE];
777 int is64 = 0;
778
779 to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
780 from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER;
781
782 /*
783 * maybe zero if lbr did not fill up after a reset by the time
784 * we get a PMU interrupt
785 */
786 if (from == 0 || to == 0)
787 return X86_BR_NONE;
788
135c5612
AK
789 if (abort)
790 return X86_BR_ABORT | to_plm;
791
3e702ff6
SE
792 if (from_plm == X86_BR_USER) {
793 /*
794 * can happen if measuring at the user level only
795 * and we interrupt in a kernel thread, e.g., idle.
796 */
797 if (!current->mm)
798 return X86_BR_NONE;
799
800 /* may fail if text not present */
6ba48ff4
DH
801 bytes_left = copy_from_user_nmi(buf, (void __user *)from,
802 MAX_INSN_SIZE);
803 bytes_read = MAX_INSN_SIZE - bytes_left;
804 if (!bytes_read)
3e702ff6
SE
805 return X86_BR_NONE;
806
807 addr = buf;
6e15eb3b
PZ
808 } else {
809 /*
810 * The LBR logs any address in the IP, even if the IP just
811 * faulted. This means userspace can control the from address.
812 * Ensure we don't blindy read any address by validating it is
813 * a known text address.
814 */
6ba48ff4 815 if (kernel_text_address(from)) {
6e15eb3b 816 addr = (void *)from;
6ba48ff4
DH
817 /*
818 * Assume we can get the maximum possible size
819 * when grabbing kernel data. This is not
820 * _strictly_ true since we could possibly be
821 * executing up next to a memory hole, but
822 * it is very unlikely to be a problem.
823 */
824 bytes_read = MAX_INSN_SIZE;
825 } else {
6e15eb3b 826 return X86_BR_NONE;
6ba48ff4 827 }
6e15eb3b 828 }
3e702ff6
SE
829
830 /*
831 * decoder needs to know the ABI especially
832 * on 64-bit systems running 32-bit apps
833 */
834#ifdef CONFIG_X86_64
835 is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32);
836#endif
6ba48ff4 837 insn_init(&insn, addr, bytes_read, is64);
3e702ff6 838 insn_get_opcode(&insn);
6ba48ff4
DH
839 if (!insn.opcode.got)
840 return X86_BR_ABORT;
3e702ff6
SE
841
842 switch (insn.opcode.bytes[0]) {
843 case 0xf:
844 switch (insn.opcode.bytes[1]) {
845 case 0x05: /* syscall */
846 case 0x34: /* sysenter */
847 ret = X86_BR_SYSCALL;
848 break;
849 case 0x07: /* sysret */
850 case 0x35: /* sysexit */
851 ret = X86_BR_SYSRET;
852 break;
853 case 0x80 ... 0x8f: /* conditional */
854 ret = X86_BR_JCC;
855 break;
856 default:
857 ret = X86_BR_NONE;
858 }
859 break;
860 case 0x70 ... 0x7f: /* conditional */
861 ret = X86_BR_JCC;
862 break;
863 case 0xc2: /* near ret */
864 case 0xc3: /* near ret */
865 case 0xca: /* far ret */
866 case 0xcb: /* far ret */
867 ret = X86_BR_RET;
868 break;
869 case 0xcf: /* iret */
870 ret = X86_BR_IRET;
871 break;
872 case 0xcc ... 0xce: /* int */
873 ret = X86_BR_INT;
874 break;
875 case 0xe8: /* call near rel */
aa54ae9b
YZ
876 insn_get_immediate(&insn);
877 if (insn.immediate1.value == 0) {
878 /* zero length call */
879 ret = X86_BR_ZERO_CALL;
880 break;
881 }
3e702ff6
SE
882 case 0x9a: /* call far absolute */
883 ret = X86_BR_CALL;
884 break;
885 case 0xe0 ... 0xe3: /* loop jmp */
886 ret = X86_BR_JCC;
887 break;
888 case 0xe9 ... 0xeb: /* jmp */
889 ret = X86_BR_JMP;
890 break;
891 case 0xff: /* call near absolute, call far absolute ind */
892 insn_get_modrm(&insn);
893 ext = (insn.modrm.bytes[0] >> 3) & 0x7;
894 switch (ext) {
895 case 2: /* near ind call */
896 case 3: /* far ind call */
897 ret = X86_BR_IND_CALL;
898 break;
899 case 4:
900 case 5:
7b74cfb2 901 ret = X86_BR_IND_JMP;
3e702ff6
SE
902 break;
903 }
904 break;
905 default:
906 ret = X86_BR_NONE;
60ce0fbd
SE
907 }
908 /*
3e702ff6
SE
909 * interrupts, traps, faults (and thus ring transition) may
910 * occur on any instructions. Thus, to classify them correctly,
911 * we need to first look at the from and to priv levels. If they
912 * are different and to is in the kernel, then it indicates
913 * a ring transition. If the from instruction is not a ring
914 * transition instr (syscall, systenter, int), then it means
915 * it was a irq, trap or fault.
916 *
917 * we have no way of detecting kernel to kernel faults.
918 */
919 if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL
920 && ret != X86_BR_SYSCALL && ret != X86_BR_INT)
921 ret = X86_BR_IRQ;
922
923 /*
924 * branch priv level determined by target as
925 * is done by HW when LBR_SELECT is implemented
60ce0fbd 926 */
3e702ff6
SE
927 if (ret != X86_BR_NONE)
928 ret |= to_plm;
60ce0fbd 929
3e702ff6
SE
930 return ret;
931}
932
933/*
934 * implement actual branch filter based on user demand.
935 * Hardware may not exactly satisfy that request, thus
936 * we need to inspect opcodes. Mismatched branches are
937 * discarded. Therefore, the number of branches returned
938 * in PERF_SAMPLE_BRANCH_STACK sample may vary.
939 */
940static void
941intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
942{
943 u64 from, to;
944 int br_sel = cpuc->br_sel;
945 int i, j, type;
946 bool compress = false;
947
948 /* if sampling all branches, then nothing to filter */
949 if ((br_sel & X86_BR_ALL) == X86_BR_ALL)
950 return;
951
952 for (i = 0; i < cpuc->lbr_stack.nr; i++) {
953
954 from = cpuc->lbr_entries[i].from;
955 to = cpuc->lbr_entries[i].to;
956
135c5612
AK
957 type = branch_type(from, to, cpuc->lbr_entries[i].abort);
958 if (type != X86_BR_NONE && (br_sel & X86_BR_ANYTX)) {
959 if (cpuc->lbr_entries[i].in_tx)
960 type |= X86_BR_IN_TX;
961 else
962 type |= X86_BR_NO_TX;
963 }
3e702ff6
SE
964
965 /* if type does not correspond, then discard */
966 if (type == X86_BR_NONE || (br_sel & type) != type) {
967 cpuc->lbr_entries[i].from = 0;
968 compress = true;
969 }
970 }
971
972 if (!compress)
973 return;
974
975 /* remove all entries with from=0 */
976 for (i = 0; i < cpuc->lbr_stack.nr; ) {
977 if (!cpuc->lbr_entries[i].from) {
978 j = i;
979 while (++j < cpuc->lbr_stack.nr)
980 cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j];
981 cpuc->lbr_stack.nr--;
982 if (!cpuc->lbr_entries[i].from)
983 continue;
984 }
985 i++;
986 }
60ce0fbd
SE
987}
988
c5cc2cd9
SE
989/*
990 * Map interface branch filters onto LBR filters
991 */
2c44b193 992static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
27ac905b
YZ
993 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
994 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
995 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
996 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
997 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_REL_JMP
998 | LBR_IND_JMP | LBR_FAR,
c5cc2cd9
SE
999 /*
1000 * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
1001 */
27ac905b 1002 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] =
c5cc2cd9
SE
1003 LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
1004 /*
1005 * NHM/WSM erratum: must include IND_JMP to capture IND_CALL
1006 */
27ac905b
YZ
1007 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
1008 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
7b74cfb2 1009 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
c5cc2cd9
SE
1010};
1011
2c44b193 1012static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
27ac905b
YZ
1013 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1014 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1015 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1016 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1017 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1018 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1019 | LBR_FAR,
1020 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1021 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
7b74cfb2 1022 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
d892819f 1023 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
c5cc2cd9
SE
1024};
1025
2c44b193 1026static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
e9d7f7cd
YZ
1027 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1028 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1029 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1030 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1031 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1032 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1033 | LBR_FAR,
1034 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1035 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1036 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1037 | LBR_RETURN | LBR_CALL_STACK,
7b74cfb2 1038 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
d892819f 1039 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
e9d7f7cd
YZ
1040};
1041
c5cc2cd9 1042/* core */
066ce64c 1043void __init intel_pmu_lbr_init_core(void)
caff2bef 1044{
caff2bef 1045 x86_pmu.lbr_nr = 4;
225ce539
SE
1046 x86_pmu.lbr_tos = MSR_LBR_TOS;
1047 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1048 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
c5cc2cd9 1049
3e702ff6
SE
1050 /*
1051 * SW branch filter usage:
1052 * - compensate for lack of HW filter
1053 */
caff2bef
PZ
1054}
1055
c5cc2cd9 1056/* nehalem/westmere */
066ce64c 1057void __init intel_pmu_lbr_init_nhm(void)
caff2bef 1058{
caff2bef 1059 x86_pmu.lbr_nr = 16;
225ce539
SE
1060 x86_pmu.lbr_tos = MSR_LBR_TOS;
1061 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1062 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
c5cc2cd9
SE
1063
1064 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1065 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1066
3e702ff6
SE
1067 /*
1068 * SW branch filter usage:
1069 * - workaround LBR_SEL errata (see above)
1070 * - support syscall, sysret capture.
1071 * That requires LBR_FAR but that means far
1072 * jmp need to be filtered out
1073 */
caff2bef
PZ
1074}
1075
c5cc2cd9 1076/* sandy bridge */
066ce64c 1077void __init intel_pmu_lbr_init_snb(void)
c5cc2cd9
SE
1078{
1079 x86_pmu.lbr_nr = 16;
1080 x86_pmu.lbr_tos = MSR_LBR_TOS;
1081 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1082 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1083
1084 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1085 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
1086
3e702ff6
SE
1087 /*
1088 * SW branch filter usage:
1089 * - support syscall, sysret capture.
1090 * That requires LBR_FAR but that means far
1091 * jmp need to be filtered out
1092 */
c5cc2cd9
SE
1093}
1094
e9d7f7cd
YZ
1095/* haswell */
1096void intel_pmu_lbr_init_hsw(void)
1097{
1098 x86_pmu.lbr_nr = 16;
1099 x86_pmu.lbr_tos = MSR_LBR_TOS;
1100 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1101 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1102
1103 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1104 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
19fc9ddd
DCC
1105
1106 if (lbr_from_signext_quirk_needed())
1107 static_branch_enable(&lbr_from_quirk_key);
e9d7f7cd
YZ
1108}
1109
9a92e16f
AK
1110/* skylake */
1111__init void intel_pmu_lbr_init_skl(void)
1112{
1113 x86_pmu.lbr_nr = 32;
1114 x86_pmu.lbr_tos = MSR_LBR_TOS;
1115 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1116 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1117
1118 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1119 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
1120
1121 /*
1122 * SW branch filter usage:
1123 * - support syscall, sysret capture.
1124 * That requires LBR_FAR but that means far
1125 * jmp need to be filtered out
1126 */
9a92e16f
AK
1127}
1128
c5cc2cd9 1129/* atom */
066ce64c 1130void __init intel_pmu_lbr_init_atom(void)
caff2bef 1131{
88c9a65e
SE
1132 /*
1133 * only models starting at stepping 10 seems
1134 * to have an operational LBR which can freeze
1135 * on PMU interrupt
1136 */
3ec18cd8
SE
1137 if (boot_cpu_data.x86_model == 28
1138 && boot_cpu_data.x86_mask < 10) {
88c9a65e
SE
1139 pr_cont("LBR disabled due to erratum");
1140 return;
1141 }
1142
caff2bef 1143 x86_pmu.lbr_nr = 8;
225ce539
SE
1144 x86_pmu.lbr_tos = MSR_LBR_TOS;
1145 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1146 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
c5cc2cd9 1147
3e702ff6
SE
1148 /*
1149 * SW branch filter usage:
1150 * - compensate for lack of HW filter
1151 */
caff2bef 1152}
1e7b9390 1153
f21d5adc
KL
1154/* slm */
1155void __init intel_pmu_lbr_init_slm(void)
1156{
1157 x86_pmu.lbr_nr = 8;
1158 x86_pmu.lbr_tos = MSR_LBR_TOS;
1159 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1160 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1161
1162 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1163 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1164
1165 /*
1166 * SW branch filter usage:
1167 * - compensate for lack of HW filter
1168 */
1169 pr_cont("8-deep LBR, ");
1170}
1171
1e7b9390
HC
1172/* Knights Landing */
1173void intel_pmu_lbr_init_knl(void)
1174{
1175 x86_pmu.lbr_nr = 8;
1176 x86_pmu.lbr_tos = MSR_LBR_TOS;
1177 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1178 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1179
1180 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1181 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
1e7b9390 1182}