]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/x86/events/intel/ds.c
perf/x86: Disable extended registers for non-supported PMUs
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / events / intel / ds.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/bitops.h>
3 #include <linux/types.h>
4 #include <linux/slab.h>
5
6 #include <asm/cpu_entry_area.h>
7 #include <asm/perf_event.h>
8 #include <asm/tlbflush.h>
9 #include <asm/insn.h>
10
11 #include "../perf_event.h"
12
13 /* Waste a full page so it can be mapped into the cpu_entry_area */
14 DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
15
16 /* The size of a BTS record in bytes: */
17 #define BTS_RECORD_SIZE 24
18
19 #define PEBS_FIXUP_SIZE PAGE_SIZE
20
21 /*
22 * pebs_record_32 for p4 and core not supported
23
24 struct pebs_record_32 {
25 u32 flags, ip;
26 u32 ax, bc, cx, dx;
27 u32 si, di, bp, sp;
28 };
29
30 */
31
32 union intel_x86_pebs_dse {
33 u64 val;
34 struct {
35 unsigned int ld_dse:4;
36 unsigned int ld_stlb_miss:1;
37 unsigned int ld_locked:1;
38 unsigned int ld_reserved:26;
39 };
40 struct {
41 unsigned int st_l1d_hit:1;
42 unsigned int st_reserved1:3;
43 unsigned int st_stlb_miss:1;
44 unsigned int st_locked:1;
45 unsigned int st_reserved2:26;
46 };
47 };
48
49
50 /*
51 * Map PEBS Load Latency Data Source encodings to generic
52 * memory data source information
53 */
54 #define P(a, b) PERF_MEM_S(a, b)
55 #define OP_LH (P(OP, LOAD) | P(LVL, HIT))
56 #define LEVEL(x) P(LVLNUM, x)
57 #define REM P(REMOTE, REMOTE)
58 #define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
59
60 /* Version for Sandy Bridge and later */
61 static u64 pebs_data_source[] = {
62 P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
63 OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* 0x01: L1 local */
64 OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */
65 OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, NONE), /* 0x03: L2 hit */
66 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, NONE), /* 0x04: L3 hit */
67 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, MISS), /* 0x05: L3 hit, snoop miss */
68 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT), /* 0x06: L3 hit, snoop hit */
69 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* 0x07: L3 hit, snoop hitm */
70 OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HIT), /* 0x08: L3 miss snoop hit */
71 OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HITM), /* 0x09: L3 miss snoop hitm*/
72 OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, HIT), /* 0x0a: L3 miss, shared */
73 OP_LH | P(LVL, REM_RAM1) | REM | LEVEL(L3) | P(SNOOP, HIT), /* 0x0b: L3 miss, shared */
74 OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | SNOOP_NONE_MISS, /* 0x0c: L3 miss, excl */
75 OP_LH | P(LVL, REM_RAM1) | LEVEL(RAM) | REM | SNOOP_NONE_MISS, /* 0x0d: L3 miss, excl */
76 OP_LH | P(LVL, IO) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0e: I/O */
77 OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0f: uncached */
78 };
79
80 /* Patch up minor differences in the bits */
81 void __init intel_pmu_pebs_data_source_nhm(void)
82 {
83 pebs_data_source[0x05] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
84 pebs_data_source[0x06] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
85 pebs_data_source[0x07] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
86 }
87
88 void __init intel_pmu_pebs_data_source_skl(bool pmem)
89 {
90 u64 pmem_or_l4 = pmem ? LEVEL(PMEM) : LEVEL(L4);
91
92 pebs_data_source[0x08] = OP_LH | pmem_or_l4 | P(SNOOP, HIT);
93 pebs_data_source[0x09] = OP_LH | pmem_or_l4 | REM | P(SNOOP, HIT);
94 pebs_data_source[0x0b] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE);
95 pebs_data_source[0x0c] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOPX, FWD);
96 pebs_data_source[0x0d] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOP, HITM);
97 }
98
99 static u64 precise_store_data(u64 status)
100 {
101 union intel_x86_pebs_dse dse;
102 u64 val = P(OP, STORE) | P(SNOOP, NA) | P(LVL, L1) | P(TLB, L2);
103
104 dse.val = status;
105
106 /*
107 * bit 4: TLB access
108 * 1 = stored missed 2nd level TLB
109 *
110 * so it either hit the walker or the OS
111 * otherwise hit 2nd level TLB
112 */
113 if (dse.st_stlb_miss)
114 val |= P(TLB, MISS);
115 else
116 val |= P(TLB, HIT);
117
118 /*
119 * bit 0: hit L1 data cache
120 * if not set, then all we know is that
121 * it missed L1D
122 */
123 if (dse.st_l1d_hit)
124 val |= P(LVL, HIT);
125 else
126 val |= P(LVL, MISS);
127
128 /*
129 * bit 5: Locked prefix
130 */
131 if (dse.st_locked)
132 val |= P(LOCK, LOCKED);
133
134 return val;
135 }
136
137 static u64 precise_datala_hsw(struct perf_event *event, u64 status)
138 {
139 union perf_mem_data_src dse;
140
141 dse.val = PERF_MEM_NA;
142
143 if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW)
144 dse.mem_op = PERF_MEM_OP_STORE;
145 else if (event->hw.flags & PERF_X86_EVENT_PEBS_LD_HSW)
146 dse.mem_op = PERF_MEM_OP_LOAD;
147
148 /*
149 * L1 info only valid for following events:
150 *
151 * MEM_UOPS_RETIRED.STLB_MISS_STORES
152 * MEM_UOPS_RETIRED.LOCK_STORES
153 * MEM_UOPS_RETIRED.SPLIT_STORES
154 * MEM_UOPS_RETIRED.ALL_STORES
155 */
156 if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) {
157 if (status & 1)
158 dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
159 else
160 dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS;
161 }
162 return dse.val;
163 }
164
165 static u64 load_latency_data(u64 status)
166 {
167 union intel_x86_pebs_dse dse;
168 u64 val;
169
170 dse.val = status;
171
172 /*
173 * use the mapping table for bit 0-3
174 */
175 val = pebs_data_source[dse.ld_dse];
176
177 /*
178 * Nehalem models do not support TLB, Lock infos
179 */
180 if (x86_pmu.pebs_no_tlb) {
181 val |= P(TLB, NA) | P(LOCK, NA);
182 return val;
183 }
184 /*
185 * bit 4: TLB access
186 * 0 = did not miss 2nd level TLB
187 * 1 = missed 2nd level TLB
188 */
189 if (dse.ld_stlb_miss)
190 val |= P(TLB, MISS) | P(TLB, L2);
191 else
192 val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2);
193
194 /*
195 * bit 5: locked prefix
196 */
197 if (dse.ld_locked)
198 val |= P(LOCK, LOCKED);
199
200 return val;
201 }
202
203 struct pebs_record_core {
204 u64 flags, ip;
205 u64 ax, bx, cx, dx;
206 u64 si, di, bp, sp;
207 u64 r8, r9, r10, r11;
208 u64 r12, r13, r14, r15;
209 };
210
211 struct pebs_record_nhm {
212 u64 flags, ip;
213 u64 ax, bx, cx, dx;
214 u64 si, di, bp, sp;
215 u64 r8, r9, r10, r11;
216 u64 r12, r13, r14, r15;
217 u64 status, dla, dse, lat;
218 };
219
220 /*
221 * Same as pebs_record_nhm, with two additional fields.
222 */
223 struct pebs_record_hsw {
224 u64 flags, ip;
225 u64 ax, bx, cx, dx;
226 u64 si, di, bp, sp;
227 u64 r8, r9, r10, r11;
228 u64 r12, r13, r14, r15;
229 u64 status, dla, dse, lat;
230 u64 real_ip, tsx_tuning;
231 };
232
233 union hsw_tsx_tuning {
234 struct {
235 u32 cycles_last_block : 32,
236 hle_abort : 1,
237 rtm_abort : 1,
238 instruction_abort : 1,
239 non_instruction_abort : 1,
240 retry : 1,
241 data_conflict : 1,
242 capacity_writes : 1,
243 capacity_reads : 1;
244 };
245 u64 value;
246 };
247
248 #define PEBS_HSW_TSX_FLAGS 0xff00000000ULL
249
250 /* Same as HSW, plus TSC */
251
252 struct pebs_record_skl {
253 u64 flags, ip;
254 u64 ax, bx, cx, dx;
255 u64 si, di, bp, sp;
256 u64 r8, r9, r10, r11;
257 u64 r12, r13, r14, r15;
258 u64 status, dla, dse, lat;
259 u64 real_ip, tsx_tuning;
260 u64 tsc;
261 };
262
263 void init_debug_store_on_cpu(int cpu)
264 {
265 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
266
267 if (!ds)
268 return;
269
270 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
271 (u32)((u64)(unsigned long)ds),
272 (u32)((u64)(unsigned long)ds >> 32));
273 }
274
275 void fini_debug_store_on_cpu(int cpu)
276 {
277 if (!per_cpu(cpu_hw_events, cpu).ds)
278 return;
279
280 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
281 }
282
283 static DEFINE_PER_CPU(void *, insn_buffer);
284
285 static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
286 {
287 unsigned long start = (unsigned long)cea;
288 phys_addr_t pa;
289 size_t msz = 0;
290
291 pa = virt_to_phys(addr);
292
293 preempt_disable();
294 for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
295 cea_set_pte(cea, pa, prot);
296
297 /*
298 * This is a cross-CPU update of the cpu_entry_area, we must shoot down
299 * all TLB entries for it.
300 */
301 flush_tlb_kernel_range(start, start + size);
302 preempt_enable();
303 }
304
305 static void ds_clear_cea(void *cea, size_t size)
306 {
307 unsigned long start = (unsigned long)cea;
308 size_t msz = 0;
309
310 preempt_disable();
311 for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
312 cea_set_pte(cea, 0, PAGE_NONE);
313
314 flush_tlb_kernel_range(start, start + size);
315 preempt_enable();
316 }
317
318 static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
319 {
320 unsigned int order = get_order(size);
321 int node = cpu_to_node(cpu);
322 struct page *page;
323
324 page = __alloc_pages_node(node, flags | __GFP_ZERO, order);
325 return page ? page_address(page) : NULL;
326 }
327
328 static void dsfree_pages(const void *buffer, size_t size)
329 {
330 if (buffer)
331 free_pages((unsigned long)buffer, get_order(size));
332 }
333
334 static int alloc_pebs_buffer(int cpu)
335 {
336 struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
337 struct debug_store *ds = hwev->ds;
338 size_t bsiz = x86_pmu.pebs_buffer_size;
339 int max, node = cpu_to_node(cpu);
340 void *buffer, *ibuffer, *cea;
341
342 if (!x86_pmu.pebs)
343 return 0;
344
345 buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu);
346 if (unlikely(!buffer))
347 return -ENOMEM;
348
349 /*
350 * HSW+ already provides us the eventing ip; no need to allocate this
351 * buffer then.
352 */
353 if (x86_pmu.intel_cap.pebs_format < 2) {
354 ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
355 if (!ibuffer) {
356 dsfree_pages(buffer, bsiz);
357 return -ENOMEM;
358 }
359 per_cpu(insn_buffer, cpu) = ibuffer;
360 }
361 hwev->ds_pebs_vaddr = buffer;
362 /* Update the cpu entry area mapping */
363 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
364 ds->pebs_buffer_base = (unsigned long) cea;
365 ds_update_cea(cea, buffer, bsiz, PAGE_KERNEL);
366 ds->pebs_index = ds->pebs_buffer_base;
367 max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size);
368 ds->pebs_absolute_maximum = ds->pebs_buffer_base + max;
369 return 0;
370 }
371
372 static void release_pebs_buffer(int cpu)
373 {
374 struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
375 void *cea;
376
377 if (!x86_pmu.pebs)
378 return;
379
380 kfree(per_cpu(insn_buffer, cpu));
381 per_cpu(insn_buffer, cpu) = NULL;
382
383 /* Clear the fixmap */
384 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
385 ds_clear_cea(cea, x86_pmu.pebs_buffer_size);
386 dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size);
387 hwev->ds_pebs_vaddr = NULL;
388 }
389
390 static int alloc_bts_buffer(int cpu)
391 {
392 struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
393 struct debug_store *ds = hwev->ds;
394 void *buffer, *cea;
395 int max;
396
397 if (!x86_pmu.bts)
398 return 0;
399
400 buffer = dsalloc_pages(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, cpu);
401 if (unlikely(!buffer)) {
402 WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
403 return -ENOMEM;
404 }
405 hwev->ds_bts_vaddr = buffer;
406 /* Update the fixmap */
407 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
408 ds->bts_buffer_base = (unsigned long) cea;
409 ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL);
410 ds->bts_index = ds->bts_buffer_base;
411 max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
412 ds->bts_absolute_maximum = ds->bts_buffer_base +
413 max * BTS_RECORD_SIZE;
414 ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
415 (max / 16) * BTS_RECORD_SIZE;
416 return 0;
417 }
418
419 static void release_bts_buffer(int cpu)
420 {
421 struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
422 void *cea;
423
424 if (!x86_pmu.bts)
425 return;
426
427 /* Clear the fixmap */
428 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
429 ds_clear_cea(cea, BTS_BUFFER_SIZE);
430 dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE);
431 hwev->ds_bts_vaddr = NULL;
432 }
433
434 static int alloc_ds_buffer(int cpu)
435 {
436 struct debug_store *ds = &get_cpu_entry_area(cpu)->cpu_debug_store;
437
438 memset(ds, 0, sizeof(*ds));
439 per_cpu(cpu_hw_events, cpu).ds = ds;
440 return 0;
441 }
442
443 static void release_ds_buffer(int cpu)
444 {
445 per_cpu(cpu_hw_events, cpu).ds = NULL;
446 }
447
448 void release_ds_buffers(void)
449 {
450 int cpu;
451
452 if (!x86_pmu.bts && !x86_pmu.pebs)
453 return;
454
455 for_each_possible_cpu(cpu)
456 release_ds_buffer(cpu);
457
458 for_each_possible_cpu(cpu) {
459 /*
460 * Again, ignore errors from offline CPUs, they will no longer
461 * observe cpu_hw_events.ds and not program the DS_AREA when
462 * they come up.
463 */
464 fini_debug_store_on_cpu(cpu);
465 }
466
467 for_each_possible_cpu(cpu) {
468 release_pebs_buffer(cpu);
469 release_bts_buffer(cpu);
470 }
471 }
472
473 void reserve_ds_buffers(void)
474 {
475 int bts_err = 0, pebs_err = 0;
476 int cpu;
477
478 x86_pmu.bts_active = 0;
479 x86_pmu.pebs_active = 0;
480
481 if (!x86_pmu.bts && !x86_pmu.pebs)
482 return;
483
484 if (!x86_pmu.bts)
485 bts_err = 1;
486
487 if (!x86_pmu.pebs)
488 pebs_err = 1;
489
490 for_each_possible_cpu(cpu) {
491 if (alloc_ds_buffer(cpu)) {
492 bts_err = 1;
493 pebs_err = 1;
494 }
495
496 if (!bts_err && alloc_bts_buffer(cpu))
497 bts_err = 1;
498
499 if (!pebs_err && alloc_pebs_buffer(cpu))
500 pebs_err = 1;
501
502 if (bts_err && pebs_err)
503 break;
504 }
505
506 if (bts_err) {
507 for_each_possible_cpu(cpu)
508 release_bts_buffer(cpu);
509 }
510
511 if (pebs_err) {
512 for_each_possible_cpu(cpu)
513 release_pebs_buffer(cpu);
514 }
515
516 if (bts_err && pebs_err) {
517 for_each_possible_cpu(cpu)
518 release_ds_buffer(cpu);
519 } else {
520 if (x86_pmu.bts && !bts_err)
521 x86_pmu.bts_active = 1;
522
523 if (x86_pmu.pebs && !pebs_err)
524 x86_pmu.pebs_active = 1;
525
526 for_each_possible_cpu(cpu) {
527 /*
528 * Ignores wrmsr_on_cpu() errors for offline CPUs they
529 * will get this call through intel_pmu_cpu_starting().
530 */
531 init_debug_store_on_cpu(cpu);
532 }
533 }
534 }
535
536 /*
537 * BTS
538 */
539
540 struct event_constraint bts_constraint =
541 EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0);
542
543 void intel_pmu_enable_bts(u64 config)
544 {
545 unsigned long debugctlmsr;
546
547 debugctlmsr = get_debugctlmsr();
548
549 debugctlmsr |= DEBUGCTLMSR_TR;
550 debugctlmsr |= DEBUGCTLMSR_BTS;
551 if (config & ARCH_PERFMON_EVENTSEL_INT)
552 debugctlmsr |= DEBUGCTLMSR_BTINT;
553
554 if (!(config & ARCH_PERFMON_EVENTSEL_OS))
555 debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
556
557 if (!(config & ARCH_PERFMON_EVENTSEL_USR))
558 debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
559
560 update_debugctlmsr(debugctlmsr);
561 }
562
563 void intel_pmu_disable_bts(void)
564 {
565 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
566 unsigned long debugctlmsr;
567
568 if (!cpuc->ds)
569 return;
570
571 debugctlmsr = get_debugctlmsr();
572
573 debugctlmsr &=
574 ~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
575 DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
576
577 update_debugctlmsr(debugctlmsr);
578 }
579
580 int intel_pmu_drain_bts_buffer(void)
581 {
582 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
583 struct debug_store *ds = cpuc->ds;
584 struct bts_record {
585 u64 from;
586 u64 to;
587 u64 flags;
588 };
589 struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
590 struct bts_record *at, *base, *top;
591 struct perf_output_handle handle;
592 struct perf_event_header header;
593 struct perf_sample_data data;
594 unsigned long skip = 0;
595 struct pt_regs regs;
596
597 if (!event)
598 return 0;
599
600 if (!x86_pmu.bts_active)
601 return 0;
602
603 base = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
604 top = (struct bts_record *)(unsigned long)ds->bts_index;
605
606 if (top <= base)
607 return 0;
608
609 memset(&regs, 0, sizeof(regs));
610
611 ds->bts_index = ds->bts_buffer_base;
612
613 perf_sample_data_init(&data, 0, event->hw.last_period);
614
615 /*
616 * BTS leaks kernel addresses in branches across the cpl boundary,
617 * such as traps or system calls, so unless the user is asking for
618 * kernel tracing (and right now it's not possible), we'd need to
619 * filter them out. But first we need to count how many of those we
620 * have in the current batch. This is an extra O(n) pass, however,
621 * it's much faster than the other one especially considering that
622 * n <= 2560 (BTS_BUFFER_SIZE / BTS_RECORD_SIZE * 15/16; see the
623 * alloc_bts_buffer()).
624 */
625 for (at = base; at < top; at++) {
626 /*
627 * Note that right now *this* BTS code only works if
628 * attr::exclude_kernel is set, but let's keep this extra
629 * check here in case that changes.
630 */
631 if (event->attr.exclude_kernel &&
632 (kernel_ip(at->from) || kernel_ip(at->to)))
633 skip++;
634 }
635
636 /*
637 * Prepare a generic sample, i.e. fill in the invariant fields.
638 * We will overwrite the from and to address before we output
639 * the sample.
640 */
641 rcu_read_lock();
642 perf_prepare_sample(&header, &data, event, &regs);
643
644 if (perf_output_begin(&handle, event, header.size *
645 (top - base - skip)))
646 goto unlock;
647
648 for (at = base; at < top; at++) {
649 /* Filter out any records that contain kernel addresses. */
650 if (event->attr.exclude_kernel &&
651 (kernel_ip(at->from) || kernel_ip(at->to)))
652 continue;
653
654 data.ip = at->from;
655 data.addr = at->to;
656
657 perf_output_sample(&handle, &header, &data, event);
658 }
659
660 perf_output_end(&handle);
661
662 /* There's new data available. */
663 event->hw.interrupts++;
664 event->pending_kill = POLL_IN;
665 unlock:
666 rcu_read_unlock();
667 return 1;
668 }
669
670 static inline void intel_pmu_drain_pebs_buffer(void)
671 {
672 struct pt_regs regs;
673
674 x86_pmu.drain_pebs(&regs);
675 }
676
677 /*
678 * PEBS
679 */
680 struct event_constraint intel_core2_pebs_event_constraints[] = {
681 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
682 INTEL_FLAGS_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
683 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
684 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
685 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
686 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
687 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
688 EVENT_CONSTRAINT_END
689 };
690
691 struct event_constraint intel_atom_pebs_event_constraints[] = {
692 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
693 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
694 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
695 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
696 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
697 /* Allow all events as PEBS with no flags */
698 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
699 EVENT_CONSTRAINT_END
700 };
701
702 struct event_constraint intel_slm_pebs_event_constraints[] = {
703 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
704 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1),
705 /* Allow all events as PEBS with no flags */
706 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
707 EVENT_CONSTRAINT_END
708 };
709
710 struct event_constraint intel_glm_pebs_event_constraints[] = {
711 /* Allow all events as PEBS with no flags */
712 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
713 EVENT_CONSTRAINT_END
714 };
715
716 struct event_constraint intel_nehalem_pebs_event_constraints[] = {
717 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
718 INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
719 INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
720 INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
721 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
722 INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
723 INTEL_FLAGS_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
724 INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
725 INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
726 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
727 INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
728 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
729 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
730 EVENT_CONSTRAINT_END
731 };
732
733 struct event_constraint intel_westmere_pebs_event_constraints[] = {
734 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
735 INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
736 INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
737 INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
738 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
739 INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
740 INTEL_FLAGS_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
741 INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
742 INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
743 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
744 INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
745 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
746 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
747 EVENT_CONSTRAINT_END
748 };
749
750 struct event_constraint intel_snb_pebs_event_constraints[] = {
751 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
752 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
753 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
754 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
755 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
756 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
757 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
758 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
759 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
760 /* Allow all events as PEBS with no flags */
761 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
762 EVENT_CONSTRAINT_END
763 };
764
765 struct event_constraint intel_ivb_pebs_event_constraints[] = {
766 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
767 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
768 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
769 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
770 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
771 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
772 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
773 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
774 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
775 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
776 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
777 /* Allow all events as PEBS with no flags */
778 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
779 EVENT_CONSTRAINT_END
780 };
781
782 struct event_constraint intel_hsw_pebs_event_constraints[] = {
783 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
784 INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
785 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
786 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
787 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
788 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
789 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
790 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
791 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
792 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
793 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
794 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
795 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
796 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
797 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
798 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
799 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
800 /* Allow all events as PEBS with no flags */
801 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
802 EVENT_CONSTRAINT_END
803 };
804
805 struct event_constraint intel_bdw_pebs_event_constraints[] = {
806 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
807 INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
808 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
809 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
810 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
811 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
812 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
813 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
814 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
815 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
816 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
817 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
818 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
819 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
820 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
821 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
822 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
823 /* Allow all events as PEBS with no flags */
824 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
825 EVENT_CONSTRAINT_END
826 };
827
828
829 struct event_constraint intel_skl_pebs_event_constraints[] = {
830 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
831 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
832 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
833 /* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
834 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
835 INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */
836 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
837 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
838 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
839 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x22d0, 0xf), /* MEM_INST_RETIRED.LOCK_STORES */
840 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
841 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
842 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
843 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
844 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
845 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
846 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_L3_MISS_RETIRED.* */
847 /* Allow all events as PEBS with no flags */
848 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
849 EVENT_CONSTRAINT_END
850 };
851
852 struct event_constraint intel_icl_pebs_event_constraints[] = {
853 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */
854 INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x400000000ULL), /* SLOTS */
855
856 INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
857 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */
858 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf), /* MEM_INST_RETIRED.STORE */
859
860 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */
861
862 INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
863
864 /*
865 * Everything else is handled by PMU_FL_PEBS_ALL, because we
866 * need the full constraints from the main table.
867 */
868
869 EVENT_CONSTRAINT_END
870 };
871
872 struct event_constraint *intel_pebs_constraints(struct perf_event *event)
873 {
874 struct event_constraint *c;
875
876 if (!event->attr.precise_ip)
877 return NULL;
878
879 if (x86_pmu.pebs_constraints) {
880 for_each_event_constraint(c, x86_pmu.pebs_constraints) {
881 if (constraint_match(c, event->hw.config)) {
882 event->hw.flags |= c->flags;
883 return c;
884 }
885 }
886 }
887
888 /*
889 * Extended PEBS support
890 * Makes the PEBS code search the normal constraints.
891 */
892 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
893 return NULL;
894
895 return &emptyconstraint;
896 }
897
898 /*
899 * We need the sched_task callback even for per-cpu events when we use
900 * the large interrupt threshold, such that we can provide PID and TID
901 * to PEBS samples.
902 */
903 static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
904 {
905 return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs);
906 }
907
908 void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
909 {
910 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
911
912 if (!sched_in && pebs_needs_sched_cb(cpuc))
913 intel_pmu_drain_pebs_buffer();
914 }
915
916 static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
917 {
918 struct debug_store *ds = cpuc->ds;
919 u64 threshold;
920 int reserved;
921
922 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
923 reserved = x86_pmu.max_pebs_events + x86_pmu.num_counters_fixed;
924 else
925 reserved = x86_pmu.max_pebs_events;
926
927 if (cpuc->n_pebs == cpuc->n_large_pebs) {
928 threshold = ds->pebs_absolute_maximum -
929 reserved * cpuc->pebs_record_size;
930 } else {
931 threshold = ds->pebs_buffer_base + cpuc->pebs_record_size;
932 }
933
934 ds->pebs_interrupt_threshold = threshold;
935 }
936
937 static void adaptive_pebs_record_size_update(void)
938 {
939 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
940 u64 pebs_data_cfg = cpuc->pebs_data_cfg;
941 int sz = sizeof(struct pebs_basic);
942
943 if (pebs_data_cfg & PEBS_DATACFG_MEMINFO)
944 sz += sizeof(struct pebs_meminfo);
945 if (pebs_data_cfg & PEBS_DATACFG_GP)
946 sz += sizeof(struct pebs_gprs);
947 if (pebs_data_cfg & PEBS_DATACFG_XMMS)
948 sz += sizeof(struct pebs_xmm);
949 if (pebs_data_cfg & PEBS_DATACFG_LBRS)
950 sz += x86_pmu.lbr_nr * sizeof(struct pebs_lbr_entry);
951
952 cpuc->pebs_record_size = sz;
953 }
954
955 #define PERF_PEBS_MEMINFO_TYPE (PERF_SAMPLE_ADDR | PERF_SAMPLE_DATA_SRC | \
956 PERF_SAMPLE_PHYS_ADDR | PERF_SAMPLE_WEIGHT | \
957 PERF_SAMPLE_TRANSACTION)
958
959 static u64 pebs_update_adaptive_cfg(struct perf_event *event)
960 {
961 struct perf_event_attr *attr = &event->attr;
962 u64 sample_type = attr->sample_type;
963 u64 pebs_data_cfg = 0;
964 bool gprs, tsx_weight;
965
966 if (!(sample_type & ~(PERF_SAMPLE_IP|PERF_SAMPLE_TIME)) &&
967 attr->precise_ip > 1)
968 return pebs_data_cfg;
969
970 if (sample_type & PERF_PEBS_MEMINFO_TYPE)
971 pebs_data_cfg |= PEBS_DATACFG_MEMINFO;
972
973 /*
974 * We need GPRs when:
975 * + user requested them
976 * + precise_ip < 2 for the non event IP
977 * + For RTM TSX weight we need GPRs for the abort code.
978 */
979 gprs = (sample_type & PERF_SAMPLE_REGS_INTR) &&
980 (attr->sample_regs_intr & PEBS_GP_REGS);
981
982 tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT) &&
983 ((attr->config & INTEL_ARCH_EVENT_MASK) ==
984 x86_pmu.rtm_abort_event);
985
986 if (gprs || (attr->precise_ip < 2) || tsx_weight)
987 pebs_data_cfg |= PEBS_DATACFG_GP;
988
989 if ((sample_type & PERF_SAMPLE_REGS_INTR) &&
990 (attr->sample_regs_intr & PEBS_XMM_REGS))
991 pebs_data_cfg |= PEBS_DATACFG_XMMS;
992
993 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
994 /*
995 * For now always log all LBRs. Could configure this
996 * later.
997 */
998 pebs_data_cfg |= PEBS_DATACFG_LBRS |
999 ((x86_pmu.lbr_nr-1) << PEBS_DATACFG_LBR_SHIFT);
1000 }
1001
1002 return pebs_data_cfg;
1003 }
1004
1005 static void
1006 pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
1007 struct perf_event *event, bool add)
1008 {
1009 struct pmu *pmu = event->ctx->pmu;
1010 /*
1011 * Make sure we get updated with the first PEBS
1012 * event. It will trigger also during removal, but
1013 * that does not hurt:
1014 */
1015 bool update = cpuc->n_pebs == 1;
1016
1017 if (needed_cb != pebs_needs_sched_cb(cpuc)) {
1018 if (!needed_cb)
1019 perf_sched_cb_inc(pmu);
1020 else
1021 perf_sched_cb_dec(pmu);
1022
1023 update = true;
1024 }
1025
1026 /*
1027 * The PEBS record doesn't shrink on pmu::del(). Doing so would require
1028 * iterating all remaining PEBS events to reconstruct the config.
1029 */
1030 if (x86_pmu.intel_cap.pebs_baseline && add) {
1031 u64 pebs_data_cfg;
1032
1033 /* Clear pebs_data_cfg and pebs_record_size for first PEBS. */
1034 if (cpuc->n_pebs == 1) {
1035 cpuc->pebs_data_cfg = 0;
1036 cpuc->pebs_record_size = sizeof(struct pebs_basic);
1037 }
1038
1039 pebs_data_cfg = pebs_update_adaptive_cfg(event);
1040
1041 /* Update pebs_record_size if new event requires more data. */
1042 if (pebs_data_cfg & ~cpuc->pebs_data_cfg) {
1043 cpuc->pebs_data_cfg |= pebs_data_cfg;
1044 adaptive_pebs_record_size_update();
1045 update = true;
1046 }
1047 }
1048
1049 if (update)
1050 pebs_update_threshold(cpuc);
1051 }
1052
1053 void intel_pmu_pebs_add(struct perf_event *event)
1054 {
1055 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1056 struct hw_perf_event *hwc = &event->hw;
1057 bool needed_cb = pebs_needs_sched_cb(cpuc);
1058
1059 cpuc->n_pebs++;
1060 if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
1061 cpuc->n_large_pebs++;
1062
1063 pebs_update_state(needed_cb, cpuc, event, true);
1064 }
1065
1066 void intel_pmu_pebs_enable(struct perf_event *event)
1067 {
1068 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1069 struct hw_perf_event *hwc = &event->hw;
1070 struct debug_store *ds = cpuc->ds;
1071
1072 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
1073
1074 cpuc->pebs_enabled |= 1ULL << hwc->idx;
1075
1076 if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5))
1077 cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32);
1078 else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
1079 cpuc->pebs_enabled |= 1ULL << 63;
1080
1081 if (x86_pmu.intel_cap.pebs_baseline) {
1082 hwc->config |= ICL_EVENTSEL_ADAPTIVE;
1083 if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) {
1084 wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg);
1085 cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg;
1086 }
1087 }
1088
1089 /*
1090 * Use auto-reload if possible to save a MSR write in the PMI.
1091 * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
1092 */
1093 if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
1094 unsigned int idx = hwc->idx;
1095
1096 if (idx >= INTEL_PMC_IDX_FIXED)
1097 idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED);
1098 ds->pebs_event_reset[idx] =
1099 (u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
1100 } else {
1101 ds->pebs_event_reset[hwc->idx] = 0;
1102 }
1103 }
1104
1105 void intel_pmu_pebs_del(struct perf_event *event)
1106 {
1107 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1108 struct hw_perf_event *hwc = &event->hw;
1109 bool needed_cb = pebs_needs_sched_cb(cpuc);
1110
1111 cpuc->n_pebs--;
1112 if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
1113 cpuc->n_large_pebs--;
1114
1115 pebs_update_state(needed_cb, cpuc, event, false);
1116 }
1117
1118 void intel_pmu_pebs_disable(struct perf_event *event)
1119 {
1120 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1121 struct hw_perf_event *hwc = &event->hw;
1122
1123 if (cpuc->n_pebs == cpuc->n_large_pebs)
1124 intel_pmu_drain_pebs_buffer();
1125
1126 cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
1127
1128 if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) &&
1129 (x86_pmu.version < 5))
1130 cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32));
1131 else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
1132 cpuc->pebs_enabled &= ~(1ULL << 63);
1133
1134 if (cpuc->enabled)
1135 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
1136
1137 hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
1138 }
1139
1140 void intel_pmu_pebs_enable_all(void)
1141 {
1142 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1143
1144 if (cpuc->pebs_enabled)
1145 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
1146 }
1147
1148 void intel_pmu_pebs_disable_all(void)
1149 {
1150 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1151
1152 if (cpuc->pebs_enabled)
1153 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
1154 }
1155
1156 static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
1157 {
1158 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1159 unsigned long from = cpuc->lbr_entries[0].from;
1160 unsigned long old_to, to = cpuc->lbr_entries[0].to;
1161 unsigned long ip = regs->ip;
1162 int is_64bit = 0;
1163 void *kaddr;
1164 int size;
1165
1166 /*
1167 * We don't need to fixup if the PEBS assist is fault like
1168 */
1169 if (!x86_pmu.intel_cap.pebs_trap)
1170 return 1;
1171
1172 /*
1173 * No LBR entry, no basic block, no rewinding
1174 */
1175 if (!cpuc->lbr_stack.nr || !from || !to)
1176 return 0;
1177
1178 /*
1179 * Basic blocks should never cross user/kernel boundaries
1180 */
1181 if (kernel_ip(ip) != kernel_ip(to))
1182 return 0;
1183
1184 /*
1185 * unsigned math, either ip is before the start (impossible) or
1186 * the basic block is larger than 1 page (sanity)
1187 */
1188 if ((ip - to) > PEBS_FIXUP_SIZE)
1189 return 0;
1190
1191 /*
1192 * We sampled a branch insn, rewind using the LBR stack
1193 */
1194 if (ip == to) {
1195 set_linear_ip(regs, from);
1196 return 1;
1197 }
1198
1199 size = ip - to;
1200 if (!kernel_ip(ip)) {
1201 int bytes;
1202 u8 *buf = this_cpu_read(insn_buffer);
1203
1204 /* 'size' must fit our buffer, see above */
1205 bytes = copy_from_user_nmi(buf, (void __user *)to, size);
1206 if (bytes != 0)
1207 return 0;
1208
1209 kaddr = buf;
1210 } else {
1211 kaddr = (void *)to;
1212 }
1213
1214 do {
1215 struct insn insn;
1216
1217 old_to = to;
1218
1219 #ifdef CONFIG_X86_64
1220 is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
1221 #endif
1222 insn_init(&insn, kaddr, size, is_64bit);
1223 insn_get_length(&insn);
1224 /*
1225 * Make sure there was not a problem decoding the
1226 * instruction and getting the length. This is
1227 * doubly important because we have an infinite
1228 * loop if insn.length=0.
1229 */
1230 if (!insn.length)
1231 break;
1232
1233 to += insn.length;
1234 kaddr += insn.length;
1235 size -= insn.length;
1236 } while (to < ip);
1237
1238 if (to == ip) {
1239 set_linear_ip(regs, old_to);
1240 return 1;
1241 }
1242
1243 /*
1244 * Even though we decoded the basic block, the instruction stream
1245 * never matched the given IP, either the TO or the IP got corrupted.
1246 */
1247 return 0;
1248 }
1249
1250 static inline u64 intel_get_tsx_weight(u64 tsx_tuning)
1251 {
1252 if (tsx_tuning) {
1253 union hsw_tsx_tuning tsx = { .value = tsx_tuning };
1254 return tsx.cycles_last_block;
1255 }
1256 return 0;
1257 }
1258
1259 static inline u64 intel_get_tsx_transaction(u64 tsx_tuning, u64 ax)
1260 {
1261 u64 txn = (tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
1262
1263 /* For RTM XABORTs also log the abort code from AX */
1264 if ((txn & PERF_TXN_TRANSACTION) && (ax & 1))
1265 txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
1266 return txn;
1267 }
1268
1269 static inline u64 get_pebs_status(void *n)
1270 {
1271 if (x86_pmu.intel_cap.pebs_format < 4)
1272 return ((struct pebs_record_nhm *)n)->status;
1273 return ((struct pebs_basic *)n)->applicable_counters;
1274 }
1275
1276 #define PERF_X86_EVENT_PEBS_HSW_PREC \
1277 (PERF_X86_EVENT_PEBS_ST_HSW | \
1278 PERF_X86_EVENT_PEBS_LD_HSW | \
1279 PERF_X86_EVENT_PEBS_NA_HSW)
1280
1281 static u64 get_data_src(struct perf_event *event, u64 aux)
1282 {
1283 u64 val = PERF_MEM_NA;
1284 int fl = event->hw.flags;
1285 bool fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
1286
1287 if (fl & PERF_X86_EVENT_PEBS_LDLAT)
1288 val = load_latency_data(aux);
1289 else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
1290 val = precise_datala_hsw(event, aux);
1291 else if (fst)
1292 val = precise_store_data(aux);
1293 return val;
1294 }
1295
1296 static void setup_pebs_fixed_sample_data(struct perf_event *event,
1297 struct pt_regs *iregs, void *__pebs,
1298 struct perf_sample_data *data,
1299 struct pt_regs *regs)
1300 {
1301 /*
1302 * We cast to the biggest pebs_record but are careful not to
1303 * unconditionally access the 'extra' entries.
1304 */
1305 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1306 struct pebs_record_skl *pebs = __pebs;
1307 u64 sample_type;
1308 int fll;
1309
1310 if (pebs == NULL)
1311 return;
1312
1313 sample_type = event->attr.sample_type;
1314 fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;
1315
1316 perf_sample_data_init(data, 0, event->hw.last_period);
1317
1318 data->period = event->hw.last_period;
1319
1320 /*
1321 * Use latency for weight (only avail with PEBS-LL)
1322 */
1323 if (fll && (sample_type & PERF_SAMPLE_WEIGHT))
1324 data->weight = pebs->lat;
1325
1326 /*
1327 * data.data_src encodes the data source
1328 */
1329 if (sample_type & PERF_SAMPLE_DATA_SRC)
1330 data->data_src.val = get_data_src(event, pebs->dse);
1331
1332 /*
1333 * We must however always use iregs for the unwinder to stay sane; the
1334 * record BP,SP,IP can point into thin air when the record is from a
1335 * previous PMI context or an (I)RET happened between the record and
1336 * PMI.
1337 */
1338 if (sample_type & PERF_SAMPLE_CALLCHAIN)
1339 data->callchain = perf_callchain(event, iregs);
1340
1341 /*
1342 * We use the interrupt regs as a base because the PEBS record does not
1343 * contain a full regs set, specifically it seems to lack segment
1344 * descriptors, which get used by things like user_mode().
1345 *
1346 * In the simple case fix up only the IP for PERF_SAMPLE_IP.
1347 */
1348 *regs = *iregs;
1349
1350 /*
1351 * Initialize regs_>flags from PEBS,
1352 * Clear exact bit (which uses x86 EFLAGS Reserved bit 3),
1353 * i.e., do not rely on it being zero:
1354 */
1355 regs->flags = pebs->flags & ~PERF_EFLAGS_EXACT;
1356
1357 if (sample_type & PERF_SAMPLE_REGS_INTR) {
1358 regs->ax = pebs->ax;
1359 regs->bx = pebs->bx;
1360 regs->cx = pebs->cx;
1361 regs->dx = pebs->dx;
1362 regs->si = pebs->si;
1363 regs->di = pebs->di;
1364
1365 regs->bp = pebs->bp;
1366 regs->sp = pebs->sp;
1367
1368 #ifndef CONFIG_X86_32
1369 regs->r8 = pebs->r8;
1370 regs->r9 = pebs->r9;
1371 regs->r10 = pebs->r10;
1372 regs->r11 = pebs->r11;
1373 regs->r12 = pebs->r12;
1374 regs->r13 = pebs->r13;
1375 regs->r14 = pebs->r14;
1376 regs->r15 = pebs->r15;
1377 #endif
1378 }
1379
1380 if (event->attr.precise_ip > 1) {
1381 /*
1382 * Haswell and later processors have an 'eventing IP'
1383 * (real IP) which fixes the off-by-1 skid in hardware.
1384 * Use it when precise_ip >= 2 :
1385 */
1386 if (x86_pmu.intel_cap.pebs_format >= 2) {
1387 set_linear_ip(regs, pebs->real_ip);
1388 regs->flags |= PERF_EFLAGS_EXACT;
1389 } else {
1390 /* Otherwise, use PEBS off-by-1 IP: */
1391 set_linear_ip(regs, pebs->ip);
1392
1393 /*
1394 * With precise_ip >= 2, try to fix up the off-by-1 IP
1395 * using the LBR. If successful, the fixup function
1396 * corrects regs->ip and calls set_linear_ip() on regs:
1397 */
1398 if (intel_pmu_pebs_fixup_ip(regs))
1399 regs->flags |= PERF_EFLAGS_EXACT;
1400 }
1401 } else {
1402 /*
1403 * When precise_ip == 1, return the PEBS off-by-1 IP,
1404 * no fixup attempted:
1405 */
1406 set_linear_ip(regs, pebs->ip);
1407 }
1408
1409
1410 if ((sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR)) &&
1411 x86_pmu.intel_cap.pebs_format >= 1)
1412 data->addr = pebs->dla;
1413
1414 if (x86_pmu.intel_cap.pebs_format >= 2) {
1415 /* Only set the TSX weight when no memory weight. */
1416 if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
1417 data->weight = intel_get_tsx_weight(pebs->tsx_tuning);
1418
1419 if (sample_type & PERF_SAMPLE_TRANSACTION)
1420 data->txn = intel_get_tsx_transaction(pebs->tsx_tuning,
1421 pebs->ax);
1422 }
1423
1424 /*
1425 * v3 supplies an accurate time stamp, so we use that
1426 * for the time stamp.
1427 *
1428 * We can only do this for the default trace clock.
1429 */
1430 if (x86_pmu.intel_cap.pebs_format >= 3 &&
1431 event->attr.use_clockid == 0)
1432 data->time = native_sched_clock_from_tsc(pebs->tsc);
1433
1434 if (has_branch_stack(event))
1435 data->br_stack = &cpuc->lbr_stack;
1436 }
1437
1438 static void adaptive_pebs_save_regs(struct pt_regs *regs,
1439 struct pebs_gprs *gprs)
1440 {
1441 regs->ax = gprs->ax;
1442 regs->bx = gprs->bx;
1443 regs->cx = gprs->cx;
1444 regs->dx = gprs->dx;
1445 regs->si = gprs->si;
1446 regs->di = gprs->di;
1447 regs->bp = gprs->bp;
1448 regs->sp = gprs->sp;
1449 #ifndef CONFIG_X86_32
1450 regs->r8 = gprs->r8;
1451 regs->r9 = gprs->r9;
1452 regs->r10 = gprs->r10;
1453 regs->r11 = gprs->r11;
1454 regs->r12 = gprs->r12;
1455 regs->r13 = gprs->r13;
1456 regs->r14 = gprs->r14;
1457 regs->r15 = gprs->r15;
1458 #endif
1459 }
1460
1461 /*
1462 * With adaptive PEBS the layout depends on what fields are configured.
1463 */
1464
1465 static void setup_pebs_adaptive_sample_data(struct perf_event *event,
1466 struct pt_regs *iregs, void *__pebs,
1467 struct perf_sample_data *data,
1468 struct pt_regs *regs)
1469 {
1470 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1471 struct pebs_basic *basic = __pebs;
1472 void *next_record = basic + 1;
1473 u64 sample_type;
1474 u64 format_size;
1475 struct pebs_meminfo *meminfo = NULL;
1476 struct pebs_gprs *gprs = NULL;
1477 struct x86_perf_regs *perf_regs;
1478
1479 if (basic == NULL)
1480 return;
1481
1482 perf_regs = container_of(regs, struct x86_perf_regs, regs);
1483 perf_regs->xmm_regs = NULL;
1484
1485 sample_type = event->attr.sample_type;
1486 format_size = basic->format_size;
1487 perf_sample_data_init(data, 0, event->hw.last_period);
1488 data->period = event->hw.last_period;
1489
1490 if (event->attr.use_clockid == 0)
1491 data->time = native_sched_clock_from_tsc(basic->tsc);
1492
1493 /*
1494 * We must however always use iregs for the unwinder to stay sane; the
1495 * record BP,SP,IP can point into thin air when the record is from a
1496 * previous PMI context or an (I)RET happened between the record and
1497 * PMI.
1498 */
1499 if (sample_type & PERF_SAMPLE_CALLCHAIN)
1500 data->callchain = perf_callchain(event, iregs);
1501
1502 *regs = *iregs;
1503 /* The ip in basic is EventingIP */
1504 set_linear_ip(regs, basic->ip);
1505 regs->flags = PERF_EFLAGS_EXACT;
1506
1507 /*
1508 * The record for MEMINFO is in front of GP
1509 * But PERF_SAMPLE_TRANSACTION needs gprs->ax.
1510 * Save the pointer here but process later.
1511 */
1512 if (format_size & PEBS_DATACFG_MEMINFO) {
1513 meminfo = next_record;
1514 next_record = meminfo + 1;
1515 }
1516
1517 if (format_size & PEBS_DATACFG_GP) {
1518 gprs = next_record;
1519 next_record = gprs + 1;
1520
1521 if (event->attr.precise_ip < 2) {
1522 set_linear_ip(regs, gprs->ip);
1523 regs->flags &= ~PERF_EFLAGS_EXACT;
1524 }
1525
1526 if (sample_type & PERF_SAMPLE_REGS_INTR)
1527 adaptive_pebs_save_regs(regs, gprs);
1528 }
1529
1530 if (format_size & PEBS_DATACFG_MEMINFO) {
1531 if (sample_type & PERF_SAMPLE_WEIGHT)
1532 data->weight = meminfo->latency ?:
1533 intel_get_tsx_weight(meminfo->tsx_tuning);
1534
1535 if (sample_type & PERF_SAMPLE_DATA_SRC)
1536 data->data_src.val = get_data_src(event, meminfo->aux);
1537
1538 if (sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR))
1539 data->addr = meminfo->address;
1540
1541 if (sample_type & PERF_SAMPLE_TRANSACTION)
1542 data->txn = intel_get_tsx_transaction(meminfo->tsx_tuning,
1543 gprs ? gprs->ax : 0);
1544 }
1545
1546 if (format_size & PEBS_DATACFG_XMMS) {
1547 struct pebs_xmm *xmm = next_record;
1548
1549 next_record = xmm + 1;
1550 perf_regs->xmm_regs = xmm->xmm;
1551 }
1552
1553 if (format_size & PEBS_DATACFG_LBRS) {
1554 struct pebs_lbr *lbr = next_record;
1555 int num_lbr = ((format_size >> PEBS_DATACFG_LBR_SHIFT)
1556 & 0xff) + 1;
1557 next_record = next_record + num_lbr*sizeof(struct pebs_lbr_entry);
1558
1559 if (has_branch_stack(event)) {
1560 intel_pmu_store_pebs_lbrs(lbr);
1561 data->br_stack = &cpuc->lbr_stack;
1562 }
1563 }
1564
1565 WARN_ONCE(next_record != __pebs + (format_size >> 48),
1566 "PEBS record size %llu, expected %llu, config %llx\n",
1567 format_size >> 48,
1568 (u64)(next_record - __pebs),
1569 basic->format_size);
1570 }
1571
1572 static inline void *
1573 get_next_pebs_record_by_bit(void *base, void *top, int bit)
1574 {
1575 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1576 void *at;
1577 u64 pebs_status;
1578
1579 /*
1580 * fmt0 does not have a status bitfield (does not use
1581 * perf_record_nhm format)
1582 */
1583 if (x86_pmu.intel_cap.pebs_format < 1)
1584 return base;
1585
1586 if (base == NULL)
1587 return NULL;
1588
1589 for (at = base; at < top; at += cpuc->pebs_record_size) {
1590 unsigned long status = get_pebs_status(at);
1591
1592 if (test_bit(bit, (unsigned long *)&status)) {
1593 /* PEBS v3 has accurate status bits */
1594 if (x86_pmu.intel_cap.pebs_format >= 3)
1595 return at;
1596
1597 if (status == (1 << bit))
1598 return at;
1599
1600 /* clear non-PEBS bit and re-check */
1601 pebs_status = status & cpuc->pebs_enabled;
1602 pebs_status &= PEBS_COUNTER_MASK;
1603 if (pebs_status == (1 << bit))
1604 return at;
1605 }
1606 }
1607 return NULL;
1608 }
1609
1610 void intel_pmu_auto_reload_read(struct perf_event *event)
1611 {
1612 WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD));
1613
1614 perf_pmu_disable(event->pmu);
1615 intel_pmu_drain_pebs_buffer();
1616 perf_pmu_enable(event->pmu);
1617 }
1618
1619 /*
1620 * Special variant of intel_pmu_save_and_restart() for auto-reload.
1621 */
1622 static int
1623 intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
1624 {
1625 struct hw_perf_event *hwc = &event->hw;
1626 int shift = 64 - x86_pmu.cntval_bits;
1627 u64 period = hwc->sample_period;
1628 u64 prev_raw_count, new_raw_count;
1629 s64 new, old;
1630
1631 WARN_ON(!period);
1632
1633 /*
1634 * drain_pebs() only happens when the PMU is disabled.
1635 */
1636 WARN_ON(this_cpu_read(cpu_hw_events.enabled));
1637
1638 prev_raw_count = local64_read(&hwc->prev_count);
1639 rdpmcl(hwc->event_base_rdpmc, new_raw_count);
1640 local64_set(&hwc->prev_count, new_raw_count);
1641
1642 /*
1643 * Since the counter increments a negative counter value and
1644 * overflows on the sign switch, giving the interval:
1645 *
1646 * [-period, 0]
1647 *
1648 * the difference between two consequtive reads is:
1649 *
1650 * A) value2 - value1;
1651 * when no overflows have happened in between,
1652 *
1653 * B) (0 - value1) + (value2 - (-period));
1654 * when one overflow happened in between,
1655 *
1656 * C) (0 - value1) + (n - 1) * (period) + (value2 - (-period));
1657 * when @n overflows happened in between.
1658 *
1659 * Here A) is the obvious difference, B) is the extension to the
1660 * discrete interval, where the first term is to the top of the
1661 * interval and the second term is from the bottom of the next
1662 * interval and C) the extension to multiple intervals, where the
1663 * middle term is the whole intervals covered.
1664 *
1665 * An equivalent of C, by reduction, is:
1666 *
1667 * value2 - value1 + n * period
1668 */
1669 new = ((s64)(new_raw_count << shift) >> shift);
1670 old = ((s64)(prev_raw_count << shift) >> shift);
1671 local64_add(new - old + count * period, &event->count);
1672
1673 perf_event_update_userpage(event);
1674
1675 return 0;
1676 }
1677
1678 static void __intel_pmu_pebs_event(struct perf_event *event,
1679 struct pt_regs *iregs,
1680 void *base, void *top,
1681 int bit, int count,
1682 void (*setup_sample)(struct perf_event *,
1683 struct pt_regs *,
1684 void *,
1685 struct perf_sample_data *,
1686 struct pt_regs *))
1687 {
1688 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1689 struct hw_perf_event *hwc = &event->hw;
1690 struct perf_sample_data data;
1691 struct x86_perf_regs perf_regs;
1692 struct pt_regs *regs = &perf_regs.regs;
1693 void *at = get_next_pebs_record_by_bit(base, top, bit);
1694
1695 if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
1696 /*
1697 * Now, auto-reload is only enabled in fixed period mode.
1698 * The reload value is always hwc->sample_period.
1699 * May need to change it, if auto-reload is enabled in
1700 * freq mode later.
1701 */
1702 intel_pmu_save_and_restart_reload(event, count);
1703 } else if (!intel_pmu_save_and_restart(event))
1704 return;
1705
1706 while (count > 1) {
1707 setup_sample(event, iregs, at, &data, regs);
1708 perf_event_output(event, &data, regs);
1709 at += cpuc->pebs_record_size;
1710 at = get_next_pebs_record_by_bit(at, top, bit);
1711 count--;
1712 }
1713
1714 setup_sample(event, iregs, at, &data, regs);
1715
1716 /*
1717 * All but the last records are processed.
1718 * The last one is left to be able to call the overflow handler.
1719 */
1720 if (perf_event_overflow(event, &data, regs)) {
1721 x86_pmu_stop(event, 0);
1722 return;
1723 }
1724
1725 }
1726
1727 static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
1728 {
1729 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1730 struct debug_store *ds = cpuc->ds;
1731 struct perf_event *event = cpuc->events[0]; /* PMC0 only */
1732 struct pebs_record_core *at, *top;
1733 int n;
1734
1735 if (!x86_pmu.pebs_active)
1736 return;
1737
1738 at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
1739 top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
1740
1741 /*
1742 * Whatever else happens, drain the thing
1743 */
1744 ds->pebs_index = ds->pebs_buffer_base;
1745
1746 if (!test_bit(0, cpuc->active_mask))
1747 return;
1748
1749 WARN_ON_ONCE(!event);
1750
1751 if (!event->attr.precise_ip)
1752 return;
1753
1754 n = top - at;
1755 if (n <= 0) {
1756 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
1757 intel_pmu_save_and_restart_reload(event, 0);
1758 return;
1759 }
1760
1761 __intel_pmu_pebs_event(event, iregs, at, top, 0, n,
1762 setup_pebs_fixed_sample_data);
1763 }
1764
1765 static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size)
1766 {
1767 struct perf_event *event;
1768 int bit;
1769
1770 /*
1771 * The drain_pebs() could be called twice in a short period
1772 * for auto-reload event in pmu::read(). There are no
1773 * overflows have happened in between.
1774 * It needs to call intel_pmu_save_and_restart_reload() to
1775 * update the event->count for this case.
1776 */
1777 for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) {
1778 event = cpuc->events[bit];
1779 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
1780 intel_pmu_save_and_restart_reload(event, 0);
1781 }
1782 }
1783
1784 static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
1785 {
1786 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1787 struct debug_store *ds = cpuc->ds;
1788 struct perf_event *event;
1789 void *base, *at, *top;
1790 short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
1791 short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
1792 int bit, i, size;
1793 u64 mask;
1794
1795 if (!x86_pmu.pebs_active)
1796 return;
1797
1798 base = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
1799 top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
1800
1801 ds->pebs_index = ds->pebs_buffer_base;
1802
1803 mask = (1ULL << x86_pmu.max_pebs_events) - 1;
1804 size = x86_pmu.max_pebs_events;
1805 if (x86_pmu.flags & PMU_FL_PEBS_ALL) {
1806 mask |= ((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED;
1807 size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
1808 }
1809
1810 if (unlikely(base >= top)) {
1811 intel_pmu_pebs_event_update_no_drain(cpuc, size);
1812 return;
1813 }
1814
1815 for (at = base; at < top; at += x86_pmu.pebs_record_size) {
1816 struct pebs_record_nhm *p = at;
1817 u64 pebs_status;
1818
1819 pebs_status = p->status & cpuc->pebs_enabled;
1820 pebs_status &= mask;
1821
1822 /* PEBS v3 has more accurate status bits */
1823 if (x86_pmu.intel_cap.pebs_format >= 3) {
1824 for_each_set_bit(bit, (unsigned long *)&pebs_status, size)
1825 counts[bit]++;
1826
1827 continue;
1828 }
1829
1830 /*
1831 * On some CPUs the PEBS status can be zero when PEBS is
1832 * racing with clearing of GLOBAL_STATUS.
1833 *
1834 * Normally we would drop that record, but in the
1835 * case when there is only a single active PEBS event
1836 * we can assume it's for that event.
1837 */
1838 if (!pebs_status && cpuc->pebs_enabled &&
1839 !(cpuc->pebs_enabled & (cpuc->pebs_enabled-1)))
1840 pebs_status = cpuc->pebs_enabled;
1841
1842 bit = find_first_bit((unsigned long *)&pebs_status,
1843 x86_pmu.max_pebs_events);
1844 if (bit >= x86_pmu.max_pebs_events)
1845 continue;
1846
1847 /*
1848 * The PEBS hardware does not deal well with the situation
1849 * when events happen near to each other and multiple bits
1850 * are set. But it should happen rarely.
1851 *
1852 * If these events include one PEBS and multiple non-PEBS
1853 * events, it doesn't impact PEBS record. The record will
1854 * be handled normally. (slow path)
1855 *
1856 * If these events include two or more PEBS events, the
1857 * records for the events can be collapsed into a single
1858 * one, and it's not possible to reconstruct all events
1859 * that caused the PEBS record. It's called collision.
1860 * If collision happened, the record will be dropped.
1861 */
1862 if (p->status != (1ULL << bit)) {
1863 for_each_set_bit(i, (unsigned long *)&pebs_status, size)
1864 error[i]++;
1865 continue;
1866 }
1867
1868 counts[bit]++;
1869 }
1870
1871 for_each_set_bit(bit, (unsigned long *)&mask, size) {
1872 if ((counts[bit] == 0) && (error[bit] == 0))
1873 continue;
1874
1875 event = cpuc->events[bit];
1876 if (WARN_ON_ONCE(!event))
1877 continue;
1878
1879 if (WARN_ON_ONCE(!event->attr.precise_ip))
1880 continue;
1881
1882 /* log dropped samples number */
1883 if (error[bit]) {
1884 perf_log_lost_samples(event, error[bit]);
1885
1886 if (perf_event_account_interrupt(event))
1887 x86_pmu_stop(event, 0);
1888 }
1889
1890 if (counts[bit]) {
1891 __intel_pmu_pebs_event(event, iregs, base,
1892 top, bit, counts[bit],
1893 setup_pebs_fixed_sample_data);
1894 }
1895 }
1896 }
1897
1898 static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs)
1899 {
1900 short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
1901 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1902 struct debug_store *ds = cpuc->ds;
1903 struct perf_event *event;
1904 void *base, *at, *top;
1905 int bit, size;
1906 u64 mask;
1907
1908 if (!x86_pmu.pebs_active)
1909 return;
1910
1911 base = (struct pebs_basic *)(unsigned long)ds->pebs_buffer_base;
1912 top = (struct pebs_basic *)(unsigned long)ds->pebs_index;
1913
1914 ds->pebs_index = ds->pebs_buffer_base;
1915
1916 mask = ((1ULL << x86_pmu.max_pebs_events) - 1) |
1917 (((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED);
1918 size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
1919
1920 if (unlikely(base >= top)) {
1921 intel_pmu_pebs_event_update_no_drain(cpuc, size);
1922 return;
1923 }
1924
1925 for (at = base; at < top; at += cpuc->pebs_record_size) {
1926 u64 pebs_status;
1927
1928 pebs_status = get_pebs_status(at) & cpuc->pebs_enabled;
1929 pebs_status &= mask;
1930
1931 for_each_set_bit(bit, (unsigned long *)&pebs_status, size)
1932 counts[bit]++;
1933 }
1934
1935 for_each_set_bit(bit, (unsigned long *)&mask, size) {
1936 if (counts[bit] == 0)
1937 continue;
1938
1939 event = cpuc->events[bit];
1940 if (WARN_ON_ONCE(!event))
1941 continue;
1942
1943 if (WARN_ON_ONCE(!event->attr.precise_ip))
1944 continue;
1945
1946 __intel_pmu_pebs_event(event, iregs, base,
1947 top, bit, counts[bit],
1948 setup_pebs_adaptive_sample_data);
1949 }
1950 }
1951
1952 /*
1953 * BTS, PEBS probe and setup
1954 */
1955
1956 void __init intel_ds_init(void)
1957 {
1958 /*
1959 * No support for 32bit formats
1960 */
1961 if (!boot_cpu_has(X86_FEATURE_DTES64))
1962 return;
1963
1964 x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
1965 x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
1966 x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
1967 if (x86_pmu.version <= 4) {
1968 x86_pmu.pebs_no_isolation = 1;
1969 x86_pmu.pebs_no_xmm_regs = 1;
1970 }
1971 if (x86_pmu.pebs) {
1972 char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
1973 char *pebs_qual = "";
1974 int format = x86_pmu.intel_cap.pebs_format;
1975
1976 if (format < 4)
1977 x86_pmu.intel_cap.pebs_baseline = 0;
1978
1979 switch (format) {
1980 case 0:
1981 pr_cont("PEBS fmt0%c, ", pebs_type);
1982 x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
1983 /*
1984 * Using >PAGE_SIZE buffers makes the WRMSR to
1985 * PERF_GLOBAL_CTRL in intel_pmu_enable_all()
1986 * mysteriously hang on Core2.
1987 *
1988 * As a workaround, we don't do this.
1989 */
1990 x86_pmu.pebs_buffer_size = PAGE_SIZE;
1991 x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
1992 break;
1993
1994 case 1:
1995 pr_cont("PEBS fmt1%c, ", pebs_type);
1996 x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
1997 x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
1998 break;
1999
2000 case 2:
2001 pr_cont("PEBS fmt2%c, ", pebs_type);
2002 x86_pmu.pebs_record_size = sizeof(struct pebs_record_hsw);
2003 x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
2004 break;
2005
2006 case 3:
2007 pr_cont("PEBS fmt3%c, ", pebs_type);
2008 x86_pmu.pebs_record_size =
2009 sizeof(struct pebs_record_skl);
2010 x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
2011 x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
2012 break;
2013
2014 case 4:
2015 x86_pmu.drain_pebs = intel_pmu_drain_pebs_icl;
2016 x86_pmu.pebs_record_size = sizeof(struct pebs_basic);
2017 if (x86_pmu.intel_cap.pebs_baseline) {
2018 x86_pmu.large_pebs_flags |=
2019 PERF_SAMPLE_BRANCH_STACK |
2020 PERF_SAMPLE_TIME;
2021 x86_pmu.flags |= PMU_FL_PEBS_ALL;
2022 pebs_qual = "-baseline";
2023 x86_get_pmu()->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
2024 } else {
2025 /* Only basic record supported */
2026 x86_pmu.pebs_no_xmm_regs = 1;
2027 x86_pmu.large_pebs_flags &=
2028 ~(PERF_SAMPLE_ADDR |
2029 PERF_SAMPLE_TIME |
2030 PERF_SAMPLE_DATA_SRC |
2031 PERF_SAMPLE_TRANSACTION |
2032 PERF_SAMPLE_REGS_USER |
2033 PERF_SAMPLE_REGS_INTR);
2034 }
2035 pr_cont("PEBS fmt4%c%s, ", pebs_type, pebs_qual);
2036 break;
2037
2038 default:
2039 pr_cont("no PEBS fmt%d%c, ", format, pebs_type);
2040 x86_pmu.pebs = 0;
2041 }
2042 }
2043 }
2044
2045 void perf_restore_debug_store(void)
2046 {
2047 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2048
2049 if (!x86_pmu.bts && !x86_pmu.pebs)
2050 return;
2051
2052 wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds);
2053 }