]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/kernel/cpu/perf_event_intel_ds.c
percpu, x86: Add arch-specific this_cpu_cmpxchg_double() support
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / cpu / perf_event_intel_ds.c
1 #ifdef CONFIG_CPU_SUP_INTEL
2
3 /* The maximal number of PEBS events: */
4 #define MAX_PEBS_EVENTS 4
5
6 /* The size of a BTS record in bytes: */
7 #define BTS_RECORD_SIZE 24
8
9 #define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
10 #define PEBS_BUFFER_SIZE PAGE_SIZE
11
12 /*
13 * pebs_record_32 for p4 and core not supported
14
15 struct pebs_record_32 {
16 u32 flags, ip;
17 u32 ax, bc, cx, dx;
18 u32 si, di, bp, sp;
19 };
20
21 */
22
23 struct pebs_record_core {
24 u64 flags, ip;
25 u64 ax, bx, cx, dx;
26 u64 si, di, bp, sp;
27 u64 r8, r9, r10, r11;
28 u64 r12, r13, r14, r15;
29 };
30
31 struct pebs_record_nhm {
32 u64 flags, ip;
33 u64 ax, bx, cx, dx;
34 u64 si, di, bp, sp;
35 u64 r8, r9, r10, r11;
36 u64 r12, r13, r14, r15;
37 u64 status, dla, dse, lat;
38 };
39
40 /*
41 * A debug store configuration.
42 *
43 * We only support architectures that use 64bit fields.
44 */
45 struct debug_store {
46 u64 bts_buffer_base;
47 u64 bts_index;
48 u64 bts_absolute_maximum;
49 u64 bts_interrupt_threshold;
50 u64 pebs_buffer_base;
51 u64 pebs_index;
52 u64 pebs_absolute_maximum;
53 u64 pebs_interrupt_threshold;
54 u64 pebs_event_reset[MAX_PEBS_EVENTS];
55 };
56
57 static void init_debug_store_on_cpu(int cpu)
58 {
59 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
60
61 if (!ds)
62 return;
63
64 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
65 (u32)((u64)(unsigned long)ds),
66 (u32)((u64)(unsigned long)ds >> 32));
67 }
68
69 static void fini_debug_store_on_cpu(int cpu)
70 {
71 if (!per_cpu(cpu_hw_events, cpu).ds)
72 return;
73
74 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
75 }
76
77 static int alloc_pebs_buffer(int cpu)
78 {
79 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
80 int node = cpu_to_node(cpu);
81 int max, thresh = 1; /* always use a single PEBS record */
82 void *buffer;
83
84 if (!x86_pmu.pebs)
85 return 0;
86
87 buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
88 if (unlikely(!buffer))
89 return -ENOMEM;
90
91 max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
92
93 ds->pebs_buffer_base = (u64)(unsigned long)buffer;
94 ds->pebs_index = ds->pebs_buffer_base;
95 ds->pebs_absolute_maximum = ds->pebs_buffer_base +
96 max * x86_pmu.pebs_record_size;
97
98 ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
99 thresh * x86_pmu.pebs_record_size;
100
101 return 0;
102 }
103
104 static void release_pebs_buffer(int cpu)
105 {
106 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
107
108 if (!ds || !x86_pmu.pebs)
109 return;
110
111 kfree((void *)(unsigned long)ds->pebs_buffer_base);
112 ds->pebs_buffer_base = 0;
113 }
114
115 static int alloc_bts_buffer(int cpu)
116 {
117 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
118 int node = cpu_to_node(cpu);
119 int max, thresh;
120 void *buffer;
121
122 if (!x86_pmu.bts)
123 return 0;
124
125 buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
126 if (unlikely(!buffer))
127 return -ENOMEM;
128
129 max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
130 thresh = max / 16;
131
132 ds->bts_buffer_base = (u64)(unsigned long)buffer;
133 ds->bts_index = ds->bts_buffer_base;
134 ds->bts_absolute_maximum = ds->bts_buffer_base +
135 max * BTS_RECORD_SIZE;
136 ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
137 thresh * BTS_RECORD_SIZE;
138
139 return 0;
140 }
141
142 static void release_bts_buffer(int cpu)
143 {
144 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
145
146 if (!ds || !x86_pmu.bts)
147 return;
148
149 kfree((void *)(unsigned long)ds->bts_buffer_base);
150 ds->bts_buffer_base = 0;
151 }
152
153 static int alloc_ds_buffer(int cpu)
154 {
155 int node = cpu_to_node(cpu);
156 struct debug_store *ds;
157
158 ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node);
159 if (unlikely(!ds))
160 return -ENOMEM;
161
162 per_cpu(cpu_hw_events, cpu).ds = ds;
163
164 return 0;
165 }
166
167 static void release_ds_buffer(int cpu)
168 {
169 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
170
171 if (!ds)
172 return;
173
174 per_cpu(cpu_hw_events, cpu).ds = NULL;
175 kfree(ds);
176 }
177
178 static void release_ds_buffers(void)
179 {
180 int cpu;
181
182 if (!x86_pmu.bts && !x86_pmu.pebs)
183 return;
184
185 get_online_cpus();
186 for_each_online_cpu(cpu)
187 fini_debug_store_on_cpu(cpu);
188
189 for_each_possible_cpu(cpu) {
190 release_pebs_buffer(cpu);
191 release_bts_buffer(cpu);
192 release_ds_buffer(cpu);
193 }
194 put_online_cpus();
195 }
196
197 static void reserve_ds_buffers(void)
198 {
199 int bts_err = 0, pebs_err = 0;
200 int cpu;
201
202 x86_pmu.bts_active = 0;
203 x86_pmu.pebs_active = 0;
204
205 if (!x86_pmu.bts && !x86_pmu.pebs)
206 return;
207
208 if (!x86_pmu.bts)
209 bts_err = 1;
210
211 if (!x86_pmu.pebs)
212 pebs_err = 1;
213
214 get_online_cpus();
215
216 for_each_possible_cpu(cpu) {
217 if (alloc_ds_buffer(cpu)) {
218 bts_err = 1;
219 pebs_err = 1;
220 }
221
222 if (!bts_err && alloc_bts_buffer(cpu))
223 bts_err = 1;
224
225 if (!pebs_err && alloc_pebs_buffer(cpu))
226 pebs_err = 1;
227
228 if (bts_err && pebs_err)
229 break;
230 }
231
232 if (bts_err) {
233 for_each_possible_cpu(cpu)
234 release_bts_buffer(cpu);
235 }
236
237 if (pebs_err) {
238 for_each_possible_cpu(cpu)
239 release_pebs_buffer(cpu);
240 }
241
242 if (bts_err && pebs_err) {
243 for_each_possible_cpu(cpu)
244 release_ds_buffer(cpu);
245 } else {
246 if (x86_pmu.bts && !bts_err)
247 x86_pmu.bts_active = 1;
248
249 if (x86_pmu.pebs && !pebs_err)
250 x86_pmu.pebs_active = 1;
251
252 for_each_online_cpu(cpu)
253 init_debug_store_on_cpu(cpu);
254 }
255
256 put_online_cpus();
257 }
258
259 /*
260 * BTS
261 */
262
263 static struct event_constraint bts_constraint =
264 EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
265
266 static void intel_pmu_enable_bts(u64 config)
267 {
268 unsigned long debugctlmsr;
269
270 debugctlmsr = get_debugctlmsr();
271
272 debugctlmsr |= DEBUGCTLMSR_TR;
273 debugctlmsr |= DEBUGCTLMSR_BTS;
274 debugctlmsr |= DEBUGCTLMSR_BTINT;
275
276 if (!(config & ARCH_PERFMON_EVENTSEL_OS))
277 debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
278
279 if (!(config & ARCH_PERFMON_EVENTSEL_USR))
280 debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
281
282 update_debugctlmsr(debugctlmsr);
283 }
284
285 static void intel_pmu_disable_bts(void)
286 {
287 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
288 unsigned long debugctlmsr;
289
290 if (!cpuc->ds)
291 return;
292
293 debugctlmsr = get_debugctlmsr();
294
295 debugctlmsr &=
296 ~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
297 DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
298
299 update_debugctlmsr(debugctlmsr);
300 }
301
302 static int intel_pmu_drain_bts_buffer(void)
303 {
304 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
305 struct debug_store *ds = cpuc->ds;
306 struct bts_record {
307 u64 from;
308 u64 to;
309 u64 flags;
310 };
311 struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
312 struct bts_record *at, *top;
313 struct perf_output_handle handle;
314 struct perf_event_header header;
315 struct perf_sample_data data;
316 struct pt_regs regs;
317
318 if (!event)
319 return 0;
320
321 if (!x86_pmu.bts_active)
322 return 0;
323
324 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
325 top = (struct bts_record *)(unsigned long)ds->bts_index;
326
327 if (top <= at)
328 return 0;
329
330 ds->bts_index = ds->bts_buffer_base;
331
332 perf_sample_data_init(&data, 0);
333 data.period = event->hw.last_period;
334 regs.ip = 0;
335
336 /*
337 * Prepare a generic sample, i.e. fill in the invariant fields.
338 * We will overwrite the from and to address before we output
339 * the sample.
340 */
341 perf_prepare_sample(&header, &data, event, &regs);
342
343 if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1))
344 return 1;
345
346 for (; at < top; at++) {
347 data.ip = at->from;
348 data.addr = at->to;
349
350 perf_output_sample(&handle, &header, &data, event);
351 }
352
353 perf_output_end(&handle);
354
355 /* There's new data available. */
356 event->hw.interrupts++;
357 event->pending_kill = POLL_IN;
358 return 1;
359 }
360
361 /*
362 * PEBS
363 */
364
365 static struct event_constraint intel_core_pebs_events[] = {
366 PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INSTR_RETIRED.ANY */
367 PEBS_EVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
368 PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
369 PEBS_EVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
370 PEBS_EVENT_CONSTRAINT(0x01cb, 0x1), /* MEM_LOAD_RETIRED.L1D_MISS */
371 PEBS_EVENT_CONSTRAINT(0x02cb, 0x1), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */
372 PEBS_EVENT_CONSTRAINT(0x04cb, 0x1), /* MEM_LOAD_RETIRED.L2_MISS */
373 PEBS_EVENT_CONSTRAINT(0x08cb, 0x1), /* MEM_LOAD_RETIRED.L2_LINE_MISS */
374 PEBS_EVENT_CONSTRAINT(0x10cb, 0x1), /* MEM_LOAD_RETIRED.DTLB_MISS */
375 EVENT_CONSTRAINT_END
376 };
377
378 static struct event_constraint intel_nehalem_pebs_events[] = {
379 PEBS_EVENT_CONSTRAINT(0x00c0, 0xf), /* INSTR_RETIRED.ANY */
380 PEBS_EVENT_CONSTRAINT(0xfec1, 0xf), /* X87_OPS_RETIRED.ANY */
381 PEBS_EVENT_CONSTRAINT(0x00c5, 0xf), /* BR_INST_RETIRED.MISPRED */
382 PEBS_EVENT_CONSTRAINT(0x1fc7, 0xf), /* SIMD_INST_RETURED.ANY */
383 PEBS_EVENT_CONSTRAINT(0x01cb, 0xf), /* MEM_LOAD_RETIRED.L1D_MISS */
384 PEBS_EVENT_CONSTRAINT(0x02cb, 0xf), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */
385 PEBS_EVENT_CONSTRAINT(0x04cb, 0xf), /* MEM_LOAD_RETIRED.L2_MISS */
386 PEBS_EVENT_CONSTRAINT(0x08cb, 0xf), /* MEM_LOAD_RETIRED.L2_LINE_MISS */
387 PEBS_EVENT_CONSTRAINT(0x10cb, 0xf), /* MEM_LOAD_RETIRED.DTLB_MISS */
388 EVENT_CONSTRAINT_END
389 };
390
391 static struct event_constraint *
392 intel_pebs_constraints(struct perf_event *event)
393 {
394 struct event_constraint *c;
395
396 if (!event->attr.precise_ip)
397 return NULL;
398
399 if (x86_pmu.pebs_constraints) {
400 for_each_event_constraint(c, x86_pmu.pebs_constraints) {
401 if ((event->hw.config & c->cmask) == c->code)
402 return c;
403 }
404 }
405
406 return &emptyconstraint;
407 }
408
409 static void intel_pmu_pebs_enable(struct perf_event *event)
410 {
411 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
412 struct hw_perf_event *hwc = &event->hw;
413
414 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
415
416 cpuc->pebs_enabled |= 1ULL << hwc->idx;
417 WARN_ON_ONCE(cpuc->enabled);
418
419 if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
420 intel_pmu_lbr_enable(event);
421 }
422
423 static void intel_pmu_pebs_disable(struct perf_event *event)
424 {
425 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
426 struct hw_perf_event *hwc = &event->hw;
427
428 cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
429 if (cpuc->enabled)
430 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
431
432 hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
433
434 if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
435 intel_pmu_lbr_disable(event);
436 }
437
438 static void intel_pmu_pebs_enable_all(void)
439 {
440 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
441
442 if (cpuc->pebs_enabled)
443 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
444 }
445
446 static void intel_pmu_pebs_disable_all(void)
447 {
448 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
449
450 if (cpuc->pebs_enabled)
451 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
452 }
453
454 #include <asm/insn.h>
455
456 static inline bool kernel_ip(unsigned long ip)
457 {
458 #ifdef CONFIG_X86_32
459 return ip > PAGE_OFFSET;
460 #else
461 return (long)ip < 0;
462 #endif
463 }
464
465 static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
466 {
467 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
468 unsigned long from = cpuc->lbr_entries[0].from;
469 unsigned long old_to, to = cpuc->lbr_entries[0].to;
470 unsigned long ip = regs->ip;
471
472 /*
473 * We don't need to fixup if the PEBS assist is fault like
474 */
475 if (!x86_pmu.intel_cap.pebs_trap)
476 return 1;
477
478 /*
479 * No LBR entry, no basic block, no rewinding
480 */
481 if (!cpuc->lbr_stack.nr || !from || !to)
482 return 0;
483
484 /*
485 * Basic blocks should never cross user/kernel boundaries
486 */
487 if (kernel_ip(ip) != kernel_ip(to))
488 return 0;
489
490 /*
491 * unsigned math, either ip is before the start (impossible) or
492 * the basic block is larger than 1 page (sanity)
493 */
494 if ((ip - to) > PAGE_SIZE)
495 return 0;
496
497 /*
498 * We sampled a branch insn, rewind using the LBR stack
499 */
500 if (ip == to) {
501 regs->ip = from;
502 return 1;
503 }
504
505 do {
506 struct insn insn;
507 u8 buf[MAX_INSN_SIZE];
508 void *kaddr;
509
510 old_to = to;
511 if (!kernel_ip(ip)) {
512 int bytes, size = MAX_INSN_SIZE;
513
514 bytes = copy_from_user_nmi(buf, (void __user *)to, size);
515 if (bytes != size)
516 return 0;
517
518 kaddr = buf;
519 } else
520 kaddr = (void *)to;
521
522 kernel_insn_init(&insn, kaddr);
523 insn_get_length(&insn);
524 to += insn.length;
525 } while (to < ip);
526
527 if (to == ip) {
528 regs->ip = old_to;
529 return 1;
530 }
531
532 /*
533 * Even though we decoded the basic block, the instruction stream
534 * never matched the given IP, either the TO or the IP got corrupted.
535 */
536 return 0;
537 }
538
539 static int intel_pmu_save_and_restart(struct perf_event *event);
540
541 static void __intel_pmu_pebs_event(struct perf_event *event,
542 struct pt_regs *iregs, void *__pebs)
543 {
544 /*
545 * We cast to pebs_record_core since that is a subset of
546 * both formats and we don't use the other fields in this
547 * routine.
548 */
549 struct pebs_record_core *pebs = __pebs;
550 struct perf_sample_data data;
551 struct pt_regs regs;
552
553 if (!intel_pmu_save_and_restart(event))
554 return;
555
556 perf_sample_data_init(&data, 0);
557 data.period = event->hw.last_period;
558
559 /*
560 * We use the interrupt regs as a base because the PEBS record
561 * does not contain a full regs set, specifically it seems to
562 * lack segment descriptors, which get used by things like
563 * user_mode().
564 *
565 * In the simple case fix up only the IP and BP,SP regs, for
566 * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
567 * A possible PERF_SAMPLE_REGS will have to transfer all regs.
568 */
569 regs = *iregs;
570 regs.ip = pebs->ip;
571 regs.bp = pebs->bp;
572 regs.sp = pebs->sp;
573
574 if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(&regs))
575 regs.flags |= PERF_EFLAGS_EXACT;
576 else
577 regs.flags &= ~PERF_EFLAGS_EXACT;
578
579 if (perf_event_overflow(event, 1, &data, &regs))
580 x86_pmu_stop(event, 0);
581 }
582
583 static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
584 {
585 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
586 struct debug_store *ds = cpuc->ds;
587 struct perf_event *event = cpuc->events[0]; /* PMC0 only */
588 struct pebs_record_core *at, *top;
589 int n;
590
591 if (!x86_pmu.pebs_active)
592 return;
593
594 at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
595 top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
596
597 /*
598 * Whatever else happens, drain the thing
599 */
600 ds->pebs_index = ds->pebs_buffer_base;
601
602 if (!test_bit(0, cpuc->active_mask))
603 return;
604
605 WARN_ON_ONCE(!event);
606
607 if (!event->attr.precise_ip)
608 return;
609
610 n = top - at;
611 if (n <= 0)
612 return;
613
614 /*
615 * Should not happen, we program the threshold at 1 and do not
616 * set a reset value.
617 */
618 WARN_ON_ONCE(n > 1);
619 at += n - 1;
620
621 __intel_pmu_pebs_event(event, iregs, at);
622 }
623
624 static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
625 {
626 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
627 struct debug_store *ds = cpuc->ds;
628 struct pebs_record_nhm *at, *top;
629 struct perf_event *event = NULL;
630 u64 status = 0;
631 int bit, n;
632
633 if (!x86_pmu.pebs_active)
634 return;
635
636 at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
637 top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
638
639 ds->pebs_index = ds->pebs_buffer_base;
640
641 n = top - at;
642 if (n <= 0)
643 return;
644
645 /*
646 * Should not happen, we program the threshold at 1 and do not
647 * set a reset value.
648 */
649 WARN_ON_ONCE(n > MAX_PEBS_EVENTS);
650
651 for ( ; at < top; at++) {
652 for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) {
653 event = cpuc->events[bit];
654 if (!test_bit(bit, cpuc->active_mask))
655 continue;
656
657 WARN_ON_ONCE(!event);
658
659 if (!event->attr.precise_ip)
660 continue;
661
662 if (__test_and_set_bit(bit, (unsigned long *)&status))
663 continue;
664
665 break;
666 }
667
668 if (!event || bit >= MAX_PEBS_EVENTS)
669 continue;
670
671 __intel_pmu_pebs_event(event, iregs, at);
672 }
673 }
674
675 /*
676 * BTS, PEBS probe and setup
677 */
678
679 static void intel_ds_init(void)
680 {
681 /*
682 * No support for 32bit formats
683 */
684 if (!boot_cpu_has(X86_FEATURE_DTES64))
685 return;
686
687 x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
688 x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
689 if (x86_pmu.pebs) {
690 char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
691 int format = x86_pmu.intel_cap.pebs_format;
692
693 switch (format) {
694 case 0:
695 printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
696 x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
697 x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
698 x86_pmu.pebs_constraints = intel_core_pebs_events;
699 break;
700
701 case 1:
702 printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
703 x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
704 x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
705 x86_pmu.pebs_constraints = intel_nehalem_pebs_events;
706 break;
707
708 default:
709 printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
710 x86_pmu.pebs = 0;
711 break;
712 }
713 }
714 }
715
716 #else /* CONFIG_CPU_SUP_INTEL */
717
718 static void reserve_ds_buffers(void)
719 {
720 }
721
722 static void release_ds_buffers(void)
723 {
724 }
725
726 #endif /* CONFIG_CPU_SUP_INTEL */