2 * Performance events - AMD IBS
4 * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
6 * For licencing details see kernel-base/COPYING
9 #include <linux/perf_event.h>
10 #include <linux/init.h>
11 #include <linux/export.h>
12 #include <linux/pci.h>
13 #include <linux/ptrace.h>
14 #include <linux/syscore_ops.h>
18 #include "../perf_event.h"
22 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
24 #include <linux/kprobes.h>
25 #include <linux/hardirq.h>
29 #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
30 #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
36 * ENABLED; tracks the pmu::add(), pmu::del() state, when set the counter is taken
37 * and any further add()s must fail.
39 * STARTED/STOPPING/STOPPED; deal with pmu::start(), pmu::stop() state but are
40 * complicated by the fact that the IBS hardware can send late NMIs (ie. after
41 * we've cleared the EN bit).
43 * In order to consume these late NMIs we have the STOPPED state, any NMI that
44 * happens after we've cleared the EN state will clear this bit and report the
45 * NMI handled (this is fundamentally racy in the face or multiple NMI sources,
46 * someone else can consume our BIT and our NMI will go unhandled).
48 * And since we cannot set/clear this separate bit together with the EN bit,
49 * there are races; if we cleared STARTED early, an NMI could land in
50 * between clearing STARTED and clearing the EN bit (in fact multiple NMIs
51 * could happen if the period is small enough), and consume our STOPPED bit
52 * and trigger streams of unhandled NMIs.
54 * If, however, we clear STARTED late, an NMI can hit between clearing the
55 * EN bit and clearing STARTED, still see STARTED set and process the event.
56 * If this event will have the VALID bit clear, we bail properly, but this
57 * is not a given. With VALID set we can end up calling pmu::stop() again
58 * (the throttle logic) and trigger the WARNs in there.
60 * So what we do is set STOPPING before clearing EN to avoid the pmu::stop()
61 * nesting, and clear STARTED late, so that we have a well defined state over
62 * the clearing of the EN bit.
64 * XXX: we could probably be using !atomic bitops for all this.
77 struct perf_event
*event
;
78 unsigned long state
[BITS_TO_LONGS(IBS_MAX_STATES
)];
89 unsigned long offset_mask
[1];
91 struct cpu_perf_ibs __percpu
*pcpu
;
93 struct attribute
**format_attrs
;
94 struct attribute_group format_group
;
95 const struct attribute_group
*attr_groups
[2];
97 u64 (*get_count
)(u64 config
);
100 struct perf_ibs_data
{
103 u32 data
[0]; /* data buffer starts here */
106 u64 regs
[MSR_AMD64_IBS_REG_COUNT_MAX
];
110 perf_event_set_period(struct hw_perf_event
*hwc
, u64 min
, u64 max
, u64
*hw_period
)
112 s64 left
= local64_read(&hwc
->period_left
);
113 s64 period
= hwc
->sample_period
;
117 * If we are way outside a reasonable range then just skip forward:
119 if (unlikely(left
<= -period
)) {
121 local64_set(&hwc
->period_left
, left
);
122 hwc
->last_period
= period
;
126 if (unlikely(left
< (s64
)min
)) {
128 local64_set(&hwc
->period_left
, left
);
129 hwc
->last_period
= period
;
134 * If the hw period that triggers the sw overflow is too short
135 * we might hit the irq handler. This biases the results.
136 * Thus we shorten the next-to-last period and set the last
137 * period to the max period.
147 *hw_period
= (u64
)left
;
153 perf_event_try_update(struct perf_event
*event
, u64 new_raw_count
, int width
)
155 struct hw_perf_event
*hwc
= &event
->hw
;
156 int shift
= 64 - width
;
161 * Careful: an NMI might modify the previous event value.
163 * Our tactic to handle this is to first atomically read and
164 * exchange a new raw count - then add that new-prev delta
165 * count to the generic event atomically:
167 prev_raw_count
= local64_read(&hwc
->prev_count
);
168 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
169 new_raw_count
) != prev_raw_count
)
173 * Now we have the new raw value and have updated the prev
174 * timestamp already. We can now calculate the elapsed delta
175 * (event-)time and add that to the generic event.
177 * Careful, not all hw sign-extends above the physical width
180 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
183 local64_add(delta
, &event
->count
);
184 local64_sub(delta
, &hwc
->period_left
);
189 static struct perf_ibs perf_ibs_fetch
;
190 static struct perf_ibs perf_ibs_op
;
192 static struct perf_ibs
*get_ibs_pmu(int type
)
194 if (perf_ibs_fetch
.pmu
.type
== type
)
195 return &perf_ibs_fetch
;
196 if (perf_ibs_op
.pmu
.type
== type
)
202 * Use IBS for precise event sampling:
204 * perf record -a -e cpu-cycles:p ... # use ibs op counting cycle count
205 * perf record -a -e r076:p ... # same as -e cpu-cycles:p
206 * perf record -a -e r0C1:p ... # use ibs op counting micro-ops
208 * IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl,
209 * MSRC001_1033) is used to select either cycle or micro-ops counting
212 * The rip of IBS samples has skid 0. Thus, IBS supports precise
213 * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
214 * rip is invalid when IBS was not able to record the rip correctly.
215 * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
218 static int perf_ibs_precise_event(struct perf_event
*event
, u64
*config
)
220 switch (event
->attr
.precise_ip
) {
230 switch (event
->attr
.type
) {
231 case PERF_TYPE_HARDWARE
:
232 switch (event
->attr
.config
) {
233 case PERF_COUNT_HW_CPU_CYCLES
:
239 switch (event
->attr
.config
) {
244 *config
= IBS_OP_CNT_CTL
;
255 static const struct perf_event_attr ibs_notsupp
= {
264 static int perf_ibs_init(struct perf_event
*event
)
266 struct hw_perf_event
*hwc
= &event
->hw
;
267 struct perf_ibs
*perf_ibs
;
271 perf_ibs
= get_ibs_pmu(event
->attr
.type
);
273 config
= event
->attr
.config
;
275 perf_ibs
= &perf_ibs_op
;
276 ret
= perf_ibs_precise_event(event
, &config
);
281 if (event
->pmu
!= &perf_ibs
->pmu
)
284 if (perf_flags(&event
->attr
) & perf_flags(&ibs_notsupp
))
287 if (config
& ~perf_ibs
->config_mask
)
290 if (hwc
->sample_period
) {
291 if (config
& perf_ibs
->cnt_mask
)
292 /* raw max_cnt may not be set */
294 if (!event
->attr
.sample_freq
&& hwc
->sample_period
& 0x0f)
296 * lower 4 bits can not be set in ibs max cnt,
297 * but allowing it in case we adjust the
298 * sample period to set a frequency.
301 hwc
->sample_period
&= ~0x0FULL
;
302 if (!hwc
->sample_period
)
303 hwc
->sample_period
= 0x10;
305 max_cnt
= config
& perf_ibs
->cnt_mask
;
306 config
&= ~perf_ibs
->cnt_mask
;
307 event
->attr
.sample_period
= max_cnt
<< 4;
308 hwc
->sample_period
= event
->attr
.sample_period
;
311 if (!hwc
->sample_period
)
315 * If we modify hwc->sample_period, we also need to update
316 * hwc->last_period and hwc->period_left.
318 hwc
->last_period
= hwc
->sample_period
;
319 local64_set(&hwc
->period_left
, hwc
->sample_period
);
321 hwc
->config_base
= perf_ibs
->msr
;
322 hwc
->config
= config
;
327 static int perf_ibs_set_period(struct perf_ibs
*perf_ibs
,
328 struct hw_perf_event
*hwc
, u64
*period
)
332 /* ignore lower 4 bits in min count: */
333 overflow
= perf_event_set_period(hwc
, 1<<4, perf_ibs
->max_period
, period
);
334 local64_set(&hwc
->prev_count
, 0);
339 static u64
get_ibs_fetch_count(u64 config
)
341 return (config
& IBS_FETCH_CNT
) >> 12;
344 static u64
get_ibs_op_count(u64 config
)
348 if (config
& IBS_OP_VAL
)
349 count
+= (config
& IBS_OP_MAX_CNT
) << 4; /* cnt rolled over */
351 if (ibs_caps
& IBS_CAPS_RDWROPCNT
)
352 count
+= (config
& IBS_OP_CUR_CNT
) >> 32;
358 perf_ibs_event_update(struct perf_ibs
*perf_ibs
, struct perf_event
*event
,
361 u64 count
= perf_ibs
->get_count(*config
);
364 * Set width to 64 since we do not overflow on max width but
365 * instead on max count. In perf_ibs_set_period() we clear
366 * prev count manually on overflow.
368 while (!perf_event_try_update(event
, count
, 64)) {
369 rdmsrl(event
->hw
.config_base
, *config
);
370 count
= perf_ibs
->get_count(*config
);
374 static inline void perf_ibs_enable_event(struct perf_ibs
*perf_ibs
,
375 struct hw_perf_event
*hwc
, u64 config
)
377 wrmsrl(hwc
->config_base
, hwc
->config
| config
| perf_ibs
->enable_mask
);
381 * Erratum #420 Instruction-Based Sampling Engine May Generate
382 * Interrupt that Cannot Be Cleared:
384 * Must clear counter mask first, then clear the enable bit. See
385 * Revision Guide for AMD Family 10h Processors, Publication #41322.
387 static inline void perf_ibs_disable_event(struct perf_ibs
*perf_ibs
,
388 struct hw_perf_event
*hwc
, u64 config
)
390 config
&= ~perf_ibs
->cnt_mask
;
391 wrmsrl(hwc
->config_base
, config
);
392 config
&= ~perf_ibs
->enable_mask
;
393 wrmsrl(hwc
->config_base
, config
);
397 * We cannot restore the ibs pmu state, so we always needs to update
398 * the event while stopping it and then reset the state when starting
399 * again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in
400 * perf_ibs_start()/perf_ibs_stop() and instead always do it.
402 static void perf_ibs_start(struct perf_event
*event
, int flags
)
404 struct hw_perf_event
*hwc
= &event
->hw
;
405 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
406 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
409 if (WARN_ON_ONCE(!(hwc
->state
& PERF_HES_STOPPED
)))
412 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
415 perf_ibs_set_period(perf_ibs
, hwc
, &period
);
417 * Set STARTED before enabling the hardware, such that a subsequent NMI
420 set_bit(IBS_STARTED
, pcpu
->state
);
421 clear_bit(IBS_STOPPING
, pcpu
->state
);
422 perf_ibs_enable_event(perf_ibs
, hwc
, period
>> 4);
424 perf_event_update_userpage(event
);
427 static void perf_ibs_stop(struct perf_event
*event
, int flags
)
429 struct hw_perf_event
*hwc
= &event
->hw
;
430 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
431 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
435 if (test_and_set_bit(IBS_STOPPING
, pcpu
->state
))
438 stopping
= test_bit(IBS_STARTED
, pcpu
->state
);
440 if (!stopping
&& (hwc
->state
& PERF_HES_UPTODATE
))
443 rdmsrl(hwc
->config_base
, config
);
447 * Set STOPPED before disabling the hardware, such that it
448 * must be visible to NMIs the moment we clear the EN bit,
449 * at which point we can generate an !VALID sample which
450 * we need to consume.
452 set_bit(IBS_STOPPED
, pcpu
->state
);
453 perf_ibs_disable_event(perf_ibs
, hwc
, config
);
455 * Clear STARTED after disabling the hardware; if it were
456 * cleared before an NMI hitting after the clear but before
457 * clearing the EN bit might think it a spurious NMI and not
460 * Clearing it after, however, creates the problem of the NMI
461 * handler seeing STARTED but not having a valid sample.
463 clear_bit(IBS_STARTED
, pcpu
->state
);
464 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
465 hwc
->state
|= PERF_HES_STOPPED
;
468 if (hwc
->state
& PERF_HES_UPTODATE
)
472 * Clear valid bit to not count rollovers on update, rollovers
473 * are only updated in the irq handler.
475 config
&= ~perf_ibs
->valid_mask
;
477 perf_ibs_event_update(perf_ibs
, event
, &config
);
478 hwc
->state
|= PERF_HES_UPTODATE
;
481 static int perf_ibs_add(struct perf_event
*event
, int flags
)
483 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
484 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
486 if (test_and_set_bit(IBS_ENABLED
, pcpu
->state
))
489 event
->hw
.state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
493 if (flags
& PERF_EF_START
)
494 perf_ibs_start(event
, PERF_EF_RELOAD
);
499 static void perf_ibs_del(struct perf_event
*event
, int flags
)
501 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
502 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
504 if (!test_and_clear_bit(IBS_ENABLED
, pcpu
->state
))
507 perf_ibs_stop(event
, PERF_EF_UPDATE
);
511 perf_event_update_userpage(event
);
514 static void perf_ibs_read(struct perf_event
*event
) { }
516 PMU_FORMAT_ATTR(rand_en
, "config:57");
517 PMU_FORMAT_ATTR(cnt_ctl
, "config:19");
519 static struct attribute
*ibs_fetch_format_attrs
[] = {
520 &format_attr_rand_en
.attr
,
524 static struct attribute
*ibs_op_format_attrs
[] = {
525 NULL
, /* &format_attr_cnt_ctl.attr if IBS_CAPS_OPCNT */
529 static struct perf_ibs perf_ibs_fetch
= {
531 .task_ctx_nr
= perf_invalid_context
,
533 .event_init
= perf_ibs_init
,
536 .start
= perf_ibs_start
,
537 .stop
= perf_ibs_stop
,
538 .read
= perf_ibs_read
,
540 .msr
= MSR_AMD64_IBSFETCHCTL
,
541 .config_mask
= IBS_FETCH_CONFIG_MASK
,
542 .cnt_mask
= IBS_FETCH_MAX_CNT
,
543 .enable_mask
= IBS_FETCH_ENABLE
,
544 .valid_mask
= IBS_FETCH_VAL
,
545 .max_period
= IBS_FETCH_MAX_CNT
<< 4,
546 .offset_mask
= { MSR_AMD64_IBSFETCH_REG_MASK
},
547 .offset_max
= MSR_AMD64_IBSFETCH_REG_COUNT
,
548 .format_attrs
= ibs_fetch_format_attrs
,
550 .get_count
= get_ibs_fetch_count
,
553 static struct perf_ibs perf_ibs_op
= {
555 .task_ctx_nr
= perf_invalid_context
,
557 .event_init
= perf_ibs_init
,
560 .start
= perf_ibs_start
,
561 .stop
= perf_ibs_stop
,
562 .read
= perf_ibs_read
,
564 .msr
= MSR_AMD64_IBSOPCTL
,
565 .config_mask
= IBS_OP_CONFIG_MASK
,
566 .cnt_mask
= IBS_OP_MAX_CNT
,
567 .enable_mask
= IBS_OP_ENABLE
,
568 .valid_mask
= IBS_OP_VAL
,
569 .max_period
= IBS_OP_MAX_CNT
<< 4,
570 .offset_mask
= { MSR_AMD64_IBSOP_REG_MASK
},
571 .offset_max
= MSR_AMD64_IBSOP_REG_COUNT
,
572 .format_attrs
= ibs_op_format_attrs
,
574 .get_count
= get_ibs_op_count
,
577 static int perf_ibs_handle_irq(struct perf_ibs
*perf_ibs
, struct pt_regs
*iregs
)
579 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
580 struct perf_event
*event
= pcpu
->event
;
581 struct hw_perf_event
*hwc
= &event
->hw
;
582 struct perf_sample_data data
;
583 struct perf_raw_record raw
;
585 struct perf_ibs_data ibs_data
;
586 int offset
, size
, check_rip
, offset_max
, throttle
= 0;
588 u64
*buf
, *config
, period
;
590 if (!test_bit(IBS_STARTED
, pcpu
->state
)) {
593 * Catch spurious interrupts after stopping IBS: After
594 * disabling IBS there could be still incoming NMIs
595 * with samples that even have the valid bit cleared.
596 * Mark all this NMIs as handled.
598 if (test_and_clear_bit(IBS_STOPPED
, pcpu
->state
))
604 msr
= hwc
->config_base
;
607 if (!(*buf
++ & perf_ibs
->valid_mask
))
610 config
= &ibs_data
.regs
[0];
611 perf_ibs_event_update(perf_ibs
, event
, config
);
612 perf_sample_data_init(&data
, 0, hwc
->last_period
);
613 if (!perf_ibs_set_period(perf_ibs
, hwc
, &period
))
614 goto out
; /* no sw counter overflow */
616 ibs_data
.caps
= ibs_caps
;
619 check_rip
= (perf_ibs
== &perf_ibs_op
&& (ibs_caps
& IBS_CAPS_RIPINVALIDCHK
));
620 if (event
->attr
.sample_type
& PERF_SAMPLE_RAW
)
621 offset_max
= perf_ibs
->offset_max
;
627 rdmsrl(msr
+ offset
, *buf
++);
629 offset
= find_next_bit(perf_ibs
->offset_mask
,
630 perf_ibs
->offset_max
,
632 } while (offset
< offset_max
);
633 if (event
->attr
.sample_type
& PERF_SAMPLE_RAW
) {
635 * Read IbsBrTarget and IbsOpData4 separately
636 * depending on their availability.
637 * Can't add to offset_max as they are staggered
639 if (ibs_caps
& IBS_CAPS_BRNTRGT
) {
640 rdmsrl(MSR_AMD64_IBSBRTARGET
, *buf
++);
643 if (ibs_caps
& IBS_CAPS_OPDATA4
) {
644 rdmsrl(MSR_AMD64_IBSOPDATA4
, *buf
++);
648 ibs_data
.size
= sizeof(u64
) * size
;
651 if (check_rip
&& (ibs_data
.regs
[2] & IBS_RIP_INVALID
)) {
652 regs
.flags
&= ~PERF_EFLAGS_EXACT
;
654 set_linear_ip(®s
, ibs_data
.regs
[1]);
655 regs
.flags
|= PERF_EFLAGS_EXACT
;
658 if (event
->attr
.sample_type
& PERF_SAMPLE_RAW
) {
659 raw
= (struct perf_raw_record
){
661 .size
= sizeof(u32
) + ibs_data
.size
,
662 .data
= ibs_data
.data
,
668 throttle
= perf_event_overflow(event
, &data
, ®s
);
671 perf_ibs_stop(event
, 0);
673 perf_ibs_enable_event(perf_ibs
, hwc
, period
>> 4);
675 perf_event_update_userpage(event
);
681 perf_ibs_nmi_handler(unsigned int cmd
, struct pt_regs
*regs
)
683 u64 stamp
= sched_clock();
686 handled
+= perf_ibs_handle_irq(&perf_ibs_fetch
, regs
);
687 handled
+= perf_ibs_handle_irq(&perf_ibs_op
, regs
);
690 inc_irq_stat(apic_perf_irqs
);
692 perf_sample_event_took(sched_clock() - stamp
);
696 NOKPROBE_SYMBOL(perf_ibs_nmi_handler
);
698 static __init
int perf_ibs_pmu_init(struct perf_ibs
*perf_ibs
, char *name
)
700 struct cpu_perf_ibs __percpu
*pcpu
;
703 pcpu
= alloc_percpu(struct cpu_perf_ibs
);
707 perf_ibs
->pcpu
= pcpu
;
709 /* register attributes */
710 if (perf_ibs
->format_attrs
[0]) {
711 memset(&perf_ibs
->format_group
, 0, sizeof(perf_ibs
->format_group
));
712 perf_ibs
->format_group
.name
= "format";
713 perf_ibs
->format_group
.attrs
= perf_ibs
->format_attrs
;
715 memset(&perf_ibs
->attr_groups
, 0, sizeof(perf_ibs
->attr_groups
));
716 perf_ibs
->attr_groups
[0] = &perf_ibs
->format_group
;
717 perf_ibs
->pmu
.attr_groups
= perf_ibs
->attr_groups
;
720 ret
= perf_pmu_register(&perf_ibs
->pmu
, name
, -1);
722 perf_ibs
->pcpu
= NULL
;
729 static __init
void perf_event_ibs_init(void)
731 struct attribute
**attr
= ibs_op_format_attrs
;
733 perf_ibs_pmu_init(&perf_ibs_fetch
, "ibs_fetch");
735 if (ibs_caps
& IBS_CAPS_OPCNT
) {
736 perf_ibs_op
.config_mask
|= IBS_OP_CNT_CTL
;
737 *attr
++ = &format_attr_cnt_ctl
.attr
;
739 perf_ibs_pmu_init(&perf_ibs_op
, "ibs_op");
741 register_nmi_handler(NMI_LOCAL
, perf_ibs_nmi_handler
, 0, "perf_ibs");
742 pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps
);
745 #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
747 static __init
void perf_event_ibs_init(void) { }
751 /* IBS - apic initialization, for perf and oprofile */
753 static __init u32
__get_ibs_caps(void)
756 unsigned int max_level
;
758 if (!boot_cpu_has(X86_FEATURE_IBS
))
761 /* check IBS cpuid feature flags */
762 max_level
= cpuid_eax(0x80000000);
763 if (max_level
< IBS_CPUID_FEATURES
)
764 return IBS_CAPS_DEFAULT
;
766 caps
= cpuid_eax(IBS_CPUID_FEATURES
);
767 if (!(caps
& IBS_CAPS_AVAIL
))
768 /* cpuid flags not valid */
769 return IBS_CAPS_DEFAULT
;
774 u32
get_ibs_caps(void)
779 EXPORT_SYMBOL(get_ibs_caps
);
781 static inline int get_eilvt(int offset
)
783 return !setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_NMI
, 1);
786 static inline int put_eilvt(int offset
)
788 return !setup_APIC_eilvt(offset
, 0, 0, 1);
792 * Check and reserve APIC extended interrupt LVT offset for IBS if available.
794 static inline int ibs_eilvt_valid(void)
802 rdmsrl(MSR_AMD64_IBSCTL
, val
);
803 offset
= val
& IBSCTL_LVT_OFFSET_MASK
;
805 if (!(val
& IBSCTL_LVT_OFFSET_VALID
)) {
806 pr_err(FW_BUG
"cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
807 smp_processor_id(), offset
, MSR_AMD64_IBSCTL
, val
);
811 if (!get_eilvt(offset
)) {
812 pr_err(FW_BUG
"cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
813 smp_processor_id(), offset
, MSR_AMD64_IBSCTL
, val
);
824 static int setup_ibs_ctl(int ibs_eilvt_off
)
826 struct pci_dev
*cpu_cfg
;
833 cpu_cfg
= pci_get_device(PCI_VENDOR_ID_AMD
,
834 PCI_DEVICE_ID_AMD_10H_NB_MISC
,
839 pci_write_config_dword(cpu_cfg
, IBSCTL
, ibs_eilvt_off
840 | IBSCTL_LVT_OFFSET_VALID
);
841 pci_read_config_dword(cpu_cfg
, IBSCTL
, &value
);
842 if (value
!= (ibs_eilvt_off
| IBSCTL_LVT_OFFSET_VALID
)) {
843 pci_dev_put(cpu_cfg
);
844 pr_debug("Failed to setup IBS LVT offset, IBSCTL = 0x%08x\n",
851 pr_debug("No CPU node configured for IBS\n");
859 * This runs only on the current cpu. We try to find an LVT offset and
860 * setup the local APIC. For this we must disable preemption. On
861 * success we initialize all nodes with this offset. This updates then
862 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
863 * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
864 * is using the new offset.
866 static void force_ibs_eilvt_setup(void)
872 /* find the next free available EILVT entry, skip offset 0 */
873 for (offset
= 1; offset
< APIC_EILVT_NR_MAX
; offset
++) {
874 if (get_eilvt(offset
))
879 if (offset
== APIC_EILVT_NR_MAX
) {
880 pr_debug("No EILVT entry available\n");
884 ret
= setup_ibs_ctl(offset
);
888 if (!ibs_eilvt_valid())
891 pr_info("IBS: LVT offset %d assigned\n", offset
);
901 static void ibs_eilvt_setup(void)
904 * Force LVT offset assignment for family 10h: The offsets are
905 * not assigned by the BIOS for this family, so the OS is
906 * responsible for doing it. If the OS assignment fails, fall
907 * back to BIOS settings and try to setup this.
909 if (boot_cpu_data
.x86
== 0x10)
910 force_ibs_eilvt_setup();
913 static inline int get_ibs_lvt_offset(void)
917 rdmsrl(MSR_AMD64_IBSCTL
, val
);
918 if (!(val
& IBSCTL_LVT_OFFSET_VALID
))
921 return val
& IBSCTL_LVT_OFFSET_MASK
;
924 static void setup_APIC_ibs(void)
928 offset
= get_ibs_lvt_offset();
932 if (!setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_NMI
, 0))
935 pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
939 static void clear_APIC_ibs(void)
943 offset
= get_ibs_lvt_offset();
945 setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_FIX
, 1);
948 static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu
)
956 static int perf_ibs_suspend(void)
962 static void perf_ibs_resume(void)
968 static struct syscore_ops perf_ibs_syscore_ops
= {
969 .resume
= perf_ibs_resume
,
970 .suspend
= perf_ibs_suspend
,
973 static void perf_ibs_pm_init(void)
975 register_syscore_ops(&perf_ibs_syscore_ops
);
980 static inline void perf_ibs_pm_init(void) { }
984 static int x86_pmu_amd_ibs_dying_cpu(unsigned int cpu
)
990 static __init
int amd_ibs_init(void)
994 caps
= __get_ibs_caps();
996 return -ENODEV
; /* ibs not supported by the cpu */
1000 if (!ibs_eilvt_valid())
1006 /* make ibs_caps visible to other cpus: */
1009 * x86_pmu_amd_ibs_starting_cpu will be called from core on
1012 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING
,
1013 "perf/x86/amd/ibs:starting",
1014 x86_pmu_amd_ibs_starting_cpu
,
1015 x86_pmu_amd_ibs_dying_cpu
);
1017 perf_event_ibs_init();
1022 /* Since we need the pci subsystem to init ibs we can't do this earlier: */
1023 device_initcall(amd_ibs_init
);