1 #include <asm/cpu_device_id.h>
4 static struct intel_uncore_type
*empty_uncore
[] = { NULL
, };
5 struct intel_uncore_type
**uncore_msr_uncores
= empty_uncore
;
6 struct intel_uncore_type
**uncore_pci_uncores
= empty_uncore
;
8 static bool pcidrv_registered
;
9 struct pci_driver
*uncore_pci_driver
;
10 /* pci bus to socket mapping */
11 DEFINE_RAW_SPINLOCK(pci2phy_map_lock
);
12 struct list_head pci2phy_map_head
= LIST_HEAD_INIT(pci2phy_map_head
);
13 struct pci_extra_dev
*uncore_extra_pci_dev
;
14 static int max_packages
;
16 /* mask of cpus that collect uncore events */
17 static cpumask_t uncore_cpu_mask
;
19 /* constraint for the fixed counter */
20 static struct event_constraint uncore_constraint_fixed
=
21 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED
, ~0ULL);
22 struct event_constraint uncore_constraint_empty
=
23 EVENT_CONSTRAINT(0, 0, 0);
25 MODULE_LICENSE("GPL");
27 static int uncore_pcibus_to_physid(struct pci_bus
*bus
)
29 struct pci2phy_map
*map
;
32 raw_spin_lock(&pci2phy_map_lock
);
33 list_for_each_entry(map
, &pci2phy_map_head
, list
) {
34 if (map
->segment
== pci_domain_nr(bus
)) {
35 phys_id
= map
->pbus_to_physid
[bus
->number
];
39 raw_spin_unlock(&pci2phy_map_lock
);
44 static void uncore_free_pcibus_map(void)
46 struct pci2phy_map
*map
, *tmp
;
48 list_for_each_entry_safe(map
, tmp
, &pci2phy_map_head
, list
) {
54 struct pci2phy_map
*__find_pci2phy_map(int segment
)
56 struct pci2phy_map
*map
, *alloc
= NULL
;
59 lockdep_assert_held(&pci2phy_map_lock
);
62 list_for_each_entry(map
, &pci2phy_map_head
, list
) {
63 if (map
->segment
== segment
)
68 raw_spin_unlock(&pci2phy_map_lock
);
69 alloc
= kmalloc(sizeof(struct pci2phy_map
), GFP_KERNEL
);
70 raw_spin_lock(&pci2phy_map_lock
);
80 map
->segment
= segment
;
81 for (i
= 0; i
< 256; i
++)
82 map
->pbus_to_physid
[i
] = -1;
83 list_add_tail(&map
->list
, &pci2phy_map_head
);
90 ssize_t
uncore_event_show(struct kobject
*kobj
,
91 struct kobj_attribute
*attr
, char *buf
)
93 struct uncore_event_desc
*event
=
94 container_of(attr
, struct uncore_event_desc
, attr
);
95 return sprintf(buf
, "%s", event
->config
);
98 struct intel_uncore_box
*uncore_pmu_to_box(struct intel_uncore_pmu
*pmu
, int cpu
)
100 return pmu
->boxes
[topology_logical_package_id(cpu
)];
103 u64
uncore_msr_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
107 rdmsrl(event
->hw
.event_base
, count
);
113 * generic get constraint function for shared match/mask registers.
115 struct event_constraint
*
116 uncore_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
118 struct intel_uncore_extra_reg
*er
;
119 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
120 struct hw_perf_event_extra
*reg2
= &event
->hw
.branch_reg
;
125 * reg->alloc can be set due to existing state, so for fake box we
126 * need to ignore this, otherwise we might fail to allocate proper
127 * fake state for this extra reg constraint.
129 if (reg1
->idx
== EXTRA_REG_NONE
||
130 (!uncore_box_is_fake(box
) && reg1
->alloc
))
133 er
= &box
->shared_regs
[reg1
->idx
];
134 raw_spin_lock_irqsave(&er
->lock
, flags
);
135 if (!atomic_read(&er
->ref
) ||
136 (er
->config1
== reg1
->config
&& er
->config2
== reg2
->config
)) {
137 atomic_inc(&er
->ref
);
138 er
->config1
= reg1
->config
;
139 er
->config2
= reg2
->config
;
142 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
145 if (!uncore_box_is_fake(box
))
150 return &uncore_constraint_empty
;
153 void uncore_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
155 struct intel_uncore_extra_reg
*er
;
156 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
159 * Only put constraint if extra reg was actually allocated. Also
160 * takes care of event which do not use an extra shared reg.
162 * Also, if this is a fake box we shouldn't touch any event state
163 * (reg->alloc) and we don't care about leaving inconsistent box
164 * state either since it will be thrown out.
166 if (uncore_box_is_fake(box
) || !reg1
->alloc
)
169 er
= &box
->shared_regs
[reg1
->idx
];
170 atomic_dec(&er
->ref
);
174 u64
uncore_shared_reg_config(struct intel_uncore_box
*box
, int idx
)
176 struct intel_uncore_extra_reg
*er
;
180 er
= &box
->shared_regs
[idx
];
182 raw_spin_lock_irqsave(&er
->lock
, flags
);
184 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
189 static void uncore_assign_hw_event(struct intel_uncore_box
*box
,
190 struct perf_event
*event
, int idx
)
192 struct hw_perf_event
*hwc
= &event
->hw
;
195 hwc
->last_tag
= ++box
->tags
[idx
];
197 if (hwc
->idx
== UNCORE_PMC_IDX_FIXED
) {
198 hwc
->event_base
= uncore_fixed_ctr(box
);
199 hwc
->config_base
= uncore_fixed_ctl(box
);
203 hwc
->config_base
= uncore_event_ctl(box
, hwc
->idx
);
204 hwc
->event_base
= uncore_perf_ctr(box
, hwc
->idx
);
207 void uncore_perf_event_update(struct intel_uncore_box
*box
, struct perf_event
*event
)
209 u64 prev_count
, new_count
, delta
;
212 if (event
->hw
.idx
>= UNCORE_PMC_IDX_FIXED
)
213 shift
= 64 - uncore_fixed_ctr_bits(box
);
215 shift
= 64 - uncore_perf_ctr_bits(box
);
217 /* the hrtimer might modify the previous event value */
219 prev_count
= local64_read(&event
->hw
.prev_count
);
220 new_count
= uncore_read_counter(box
, event
);
221 if (local64_xchg(&event
->hw
.prev_count
, new_count
) != prev_count
)
224 delta
= (new_count
<< shift
) - (prev_count
<< shift
);
227 local64_add(delta
, &event
->count
);
231 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
232 * for SandyBridge. So we use hrtimer to periodically poll the counter
235 static enum hrtimer_restart
uncore_pmu_hrtimer(struct hrtimer
*hrtimer
)
237 struct intel_uncore_box
*box
;
238 struct perf_event
*event
;
242 box
= container_of(hrtimer
, struct intel_uncore_box
, hrtimer
);
243 if (!box
->n_active
|| box
->cpu
!= smp_processor_id())
244 return HRTIMER_NORESTART
;
246 * disable local interrupt to prevent uncore_pmu_event_start/stop
247 * to interrupt the update process
249 local_irq_save(flags
);
252 * handle boxes with an active event list as opposed to active
255 list_for_each_entry(event
, &box
->active_list
, active_entry
) {
256 uncore_perf_event_update(box
, event
);
259 for_each_set_bit(bit
, box
->active_mask
, UNCORE_PMC_IDX_MAX
)
260 uncore_perf_event_update(box
, box
->events
[bit
]);
262 local_irq_restore(flags
);
264 hrtimer_forward_now(hrtimer
, ns_to_ktime(box
->hrtimer_duration
));
265 return HRTIMER_RESTART
;
268 void uncore_pmu_start_hrtimer(struct intel_uncore_box
*box
)
270 hrtimer_start(&box
->hrtimer
, ns_to_ktime(box
->hrtimer_duration
),
271 HRTIMER_MODE_REL_PINNED
);
274 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box
*box
)
276 hrtimer_cancel(&box
->hrtimer
);
279 static void uncore_pmu_init_hrtimer(struct intel_uncore_box
*box
)
281 hrtimer_init(&box
->hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
282 box
->hrtimer
.function
= uncore_pmu_hrtimer
;
285 static struct intel_uncore_box
*uncore_alloc_box(struct intel_uncore_type
*type
,
288 int i
, size
, numshared
= type
->num_shared_regs
;
289 struct intel_uncore_box
*box
;
291 size
= sizeof(*box
) + numshared
* sizeof(struct intel_uncore_extra_reg
);
293 box
= kzalloc_node(size
, GFP_KERNEL
, node
);
297 for (i
= 0; i
< numshared
; i
++)
298 raw_spin_lock_init(&box
->shared_regs
[i
].lock
);
300 uncore_pmu_init_hrtimer(box
);
302 box
->pci_phys_id
= -1;
305 /* set default hrtimer timeout */
306 box
->hrtimer_duration
= UNCORE_PMU_HRTIMER_INTERVAL
;
308 INIT_LIST_HEAD(&box
->active_list
);
314 * Using uncore_pmu_event_init pmu event_init callback
315 * as a detection point for uncore events.
317 static int uncore_pmu_event_init(struct perf_event
*event
);
319 static bool is_uncore_event(struct perf_event
*event
)
321 return event
->pmu
->event_init
== uncore_pmu_event_init
;
325 uncore_collect_events(struct intel_uncore_box
*box
, struct perf_event
*leader
,
328 struct perf_event
*event
;
331 max_count
= box
->pmu
->type
->num_counters
;
332 if (box
->pmu
->type
->fixed_ctl
)
335 if (box
->n_events
>= max_count
)
340 if (is_uncore_event(leader
)) {
341 box
->event_list
[n
] = leader
;
348 list_for_each_entry(event
, &leader
->sibling_list
, group_entry
) {
349 if (!is_uncore_event(event
) ||
350 event
->state
<= PERF_EVENT_STATE_OFF
)
356 box
->event_list
[n
] = event
;
362 static struct event_constraint
*
363 uncore_get_event_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
365 struct intel_uncore_type
*type
= box
->pmu
->type
;
366 struct event_constraint
*c
;
368 if (type
->ops
->get_constraint
) {
369 c
= type
->ops
->get_constraint(box
, event
);
374 if (event
->attr
.config
== UNCORE_FIXED_EVENT
)
375 return &uncore_constraint_fixed
;
377 if (type
->constraints
) {
378 for_each_event_constraint(c
, type
->constraints
) {
379 if ((event
->hw
.config
& c
->cmask
) == c
->code
)
384 return &type
->unconstrainted
;
387 static void uncore_put_event_constraint(struct intel_uncore_box
*box
,
388 struct perf_event
*event
)
390 if (box
->pmu
->type
->ops
->put_constraint
)
391 box
->pmu
->type
->ops
->put_constraint(box
, event
);
394 static int uncore_assign_events(struct intel_uncore_box
*box
, int assign
[], int n
)
396 unsigned long used_mask
[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX
)];
397 struct event_constraint
*c
;
398 int i
, wmin
, wmax
, ret
= 0;
399 struct hw_perf_event
*hwc
;
401 bitmap_zero(used_mask
, UNCORE_PMC_IDX_MAX
);
403 for (i
= 0, wmin
= UNCORE_PMC_IDX_MAX
, wmax
= 0; i
< n
; i
++) {
404 c
= uncore_get_event_constraint(box
, box
->event_list
[i
]);
405 box
->event_constraint
[i
] = c
;
406 wmin
= min(wmin
, c
->weight
);
407 wmax
= max(wmax
, c
->weight
);
410 /* fastpath, try to reuse previous register */
411 for (i
= 0; i
< n
; i
++) {
412 hwc
= &box
->event_list
[i
]->hw
;
413 c
= box
->event_constraint
[i
];
419 /* constraint still honored */
420 if (!test_bit(hwc
->idx
, c
->idxmsk
))
423 /* not already used */
424 if (test_bit(hwc
->idx
, used_mask
))
427 __set_bit(hwc
->idx
, used_mask
);
429 assign
[i
] = hwc
->idx
;
433 ret
= perf_assign_events(box
->event_constraint
, n
,
434 wmin
, wmax
, n
, assign
);
436 if (!assign
|| ret
) {
437 for (i
= 0; i
< n
; i
++)
438 uncore_put_event_constraint(box
, box
->event_list
[i
]);
440 return ret
? -EINVAL
: 0;
443 static void uncore_pmu_event_start(struct perf_event
*event
, int flags
)
445 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
446 int idx
= event
->hw
.idx
;
448 if (WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_STOPPED
)))
451 if (WARN_ON_ONCE(idx
== -1 || idx
>= UNCORE_PMC_IDX_MAX
))
455 box
->events
[idx
] = event
;
457 __set_bit(idx
, box
->active_mask
);
459 local64_set(&event
->hw
.prev_count
, uncore_read_counter(box
, event
));
460 uncore_enable_event(box
, event
);
462 if (box
->n_active
== 1) {
463 uncore_enable_box(box
);
464 uncore_pmu_start_hrtimer(box
);
468 static void uncore_pmu_event_stop(struct perf_event
*event
, int flags
)
470 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
471 struct hw_perf_event
*hwc
= &event
->hw
;
473 if (__test_and_clear_bit(hwc
->idx
, box
->active_mask
)) {
474 uncore_disable_event(box
, event
);
476 box
->events
[hwc
->idx
] = NULL
;
477 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
478 hwc
->state
|= PERF_HES_STOPPED
;
480 if (box
->n_active
== 0) {
481 uncore_disable_box(box
);
482 uncore_pmu_cancel_hrtimer(box
);
486 if ((flags
& PERF_EF_UPDATE
) && !(hwc
->state
& PERF_HES_UPTODATE
)) {
488 * Drain the remaining delta count out of a event
489 * that we are disabling:
491 uncore_perf_event_update(box
, event
);
492 hwc
->state
|= PERF_HES_UPTODATE
;
496 static int uncore_pmu_event_add(struct perf_event
*event
, int flags
)
498 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
499 struct hw_perf_event
*hwc
= &event
->hw
;
500 int assign
[UNCORE_PMC_IDX_MAX
];
506 ret
= n
= uncore_collect_events(box
, event
, false);
510 hwc
->state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
511 if (!(flags
& PERF_EF_START
))
512 hwc
->state
|= PERF_HES_ARCH
;
514 ret
= uncore_assign_events(box
, assign
, n
);
518 /* save events moving to new counters */
519 for (i
= 0; i
< box
->n_events
; i
++) {
520 event
= box
->event_list
[i
];
523 if (hwc
->idx
== assign
[i
] &&
524 hwc
->last_tag
== box
->tags
[assign
[i
]])
527 * Ensure we don't accidentally enable a stopped
528 * counter simply because we rescheduled.
530 if (hwc
->state
& PERF_HES_STOPPED
)
531 hwc
->state
|= PERF_HES_ARCH
;
533 uncore_pmu_event_stop(event
, PERF_EF_UPDATE
);
536 /* reprogram moved events into new counters */
537 for (i
= 0; i
< n
; i
++) {
538 event
= box
->event_list
[i
];
541 if (hwc
->idx
!= assign
[i
] ||
542 hwc
->last_tag
!= box
->tags
[assign
[i
]])
543 uncore_assign_hw_event(box
, event
, assign
[i
]);
544 else if (i
< box
->n_events
)
547 if (hwc
->state
& PERF_HES_ARCH
)
550 uncore_pmu_event_start(event
, 0);
557 static void uncore_pmu_event_del(struct perf_event
*event
, int flags
)
559 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
562 uncore_pmu_event_stop(event
, PERF_EF_UPDATE
);
564 for (i
= 0; i
< box
->n_events
; i
++) {
565 if (event
== box
->event_list
[i
]) {
566 uncore_put_event_constraint(box
, event
);
568 for (++i
; i
< box
->n_events
; i
++)
569 box
->event_list
[i
- 1] = box
->event_list
[i
];
577 event
->hw
.last_tag
= ~0ULL;
580 void uncore_pmu_event_read(struct perf_event
*event
)
582 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
583 uncore_perf_event_update(box
, event
);
587 * validation ensures the group can be loaded onto the
588 * PMU if it was the only group available.
590 static int uncore_validate_group(struct intel_uncore_pmu
*pmu
,
591 struct perf_event
*event
)
593 struct perf_event
*leader
= event
->group_leader
;
594 struct intel_uncore_box
*fake_box
;
595 int ret
= -EINVAL
, n
;
597 fake_box
= uncore_alloc_box(pmu
->type
, NUMA_NO_NODE
);
603 * the event is not yet connected with its
604 * siblings therefore we must first collect
605 * existing siblings, then add the new event
606 * before we can simulate the scheduling
608 n
= uncore_collect_events(fake_box
, leader
, true);
612 fake_box
->n_events
= n
;
613 n
= uncore_collect_events(fake_box
, event
, false);
617 fake_box
->n_events
= n
;
619 ret
= uncore_assign_events(fake_box
, NULL
, n
);
625 static int uncore_pmu_event_init(struct perf_event
*event
)
627 struct intel_uncore_pmu
*pmu
;
628 struct intel_uncore_box
*box
;
629 struct hw_perf_event
*hwc
= &event
->hw
;
632 if (event
->attr
.type
!= event
->pmu
->type
)
635 pmu
= uncore_event_to_pmu(event
);
636 /* no device found for this pmu */
637 if (pmu
->func_id
< 0)
641 * Uncore PMU does measure at all privilege level all the time.
642 * So it doesn't make sense to specify any exclude bits.
644 if (event
->attr
.exclude_user
|| event
->attr
.exclude_kernel
||
645 event
->attr
.exclude_hv
|| event
->attr
.exclude_idle
)
648 /* Sampling not supported yet */
649 if (hwc
->sample_period
)
653 * Place all uncore events for a particular physical package
658 box
= uncore_pmu_to_box(pmu
, event
->cpu
);
659 if (!box
|| box
->cpu
< 0)
661 event
->cpu
= box
->cpu
;
662 event
->pmu_private
= box
;
665 event
->hw
.last_tag
= ~0ULL;
666 event
->hw
.extra_reg
.idx
= EXTRA_REG_NONE
;
667 event
->hw
.branch_reg
.idx
= EXTRA_REG_NONE
;
669 if (event
->attr
.config
== UNCORE_FIXED_EVENT
) {
670 /* no fixed counter */
671 if (!pmu
->type
->fixed_ctl
)
674 * if there is only one fixed counter, only the first pmu
675 * can access the fixed counter
677 if (pmu
->type
->single_fixed
&& pmu
->pmu_idx
> 0)
680 /* fixed counters have event field hardcoded to zero */
683 hwc
->config
= event
->attr
.config
& pmu
->type
->event_mask
;
684 if (pmu
->type
->ops
->hw_config
) {
685 ret
= pmu
->type
->ops
->hw_config(box
, event
);
691 if (event
->group_leader
!= event
)
692 ret
= uncore_validate_group(pmu
, event
);
699 static ssize_t
uncore_get_attr_cpumask(struct device
*dev
,
700 struct device_attribute
*attr
, char *buf
)
702 return cpumap_print_to_pagebuf(true, buf
, &uncore_cpu_mask
);
705 static DEVICE_ATTR(cpumask
, S_IRUGO
, uncore_get_attr_cpumask
, NULL
);
707 static struct attribute
*uncore_pmu_attrs
[] = {
708 &dev_attr_cpumask
.attr
,
712 static struct attribute_group uncore_pmu_attr_group
= {
713 .attrs
= uncore_pmu_attrs
,
716 static int uncore_pmu_register(struct intel_uncore_pmu
*pmu
)
720 if (!pmu
->type
->pmu
) {
721 pmu
->pmu
= (struct pmu
) {
722 .attr_groups
= pmu
->type
->attr_groups
,
723 .task_ctx_nr
= perf_invalid_context
,
724 .event_init
= uncore_pmu_event_init
,
725 .add
= uncore_pmu_event_add
,
726 .del
= uncore_pmu_event_del
,
727 .start
= uncore_pmu_event_start
,
728 .stop
= uncore_pmu_event_stop
,
729 .read
= uncore_pmu_event_read
,
732 pmu
->pmu
= *pmu
->type
->pmu
;
733 pmu
->pmu
.attr_groups
= pmu
->type
->attr_groups
;
736 if (pmu
->type
->num_boxes
== 1) {
737 if (strlen(pmu
->type
->name
) > 0)
738 sprintf(pmu
->name
, "uncore_%s", pmu
->type
->name
);
740 sprintf(pmu
->name
, "uncore");
742 sprintf(pmu
->name
, "uncore_%s_%d", pmu
->type
->name
,
746 ret
= perf_pmu_register(&pmu
->pmu
, pmu
->name
, -1);
748 pmu
->registered
= true;
752 static void uncore_pmu_unregister(struct intel_uncore_pmu
*pmu
)
754 if (!pmu
->registered
)
756 perf_pmu_unregister(&pmu
->pmu
);
757 pmu
->registered
= false;
760 static void __uncore_exit_boxes(struct intel_uncore_type
*type
, int cpu
)
762 struct intel_uncore_pmu
*pmu
= type
->pmus
;
763 struct intel_uncore_box
*box
;
767 pkg
= topology_physical_package_id(cpu
);
768 for (i
= 0; i
< type
->num_boxes
; i
++, pmu
++) {
769 box
= pmu
->boxes
[pkg
];
771 uncore_box_exit(box
);
776 static void uncore_exit_boxes(void *dummy
)
778 struct intel_uncore_type
**types
;
780 for (types
= uncore_msr_uncores
; *types
; types
++)
781 __uncore_exit_boxes(*types
++, smp_processor_id());
784 static void uncore_free_boxes(struct intel_uncore_pmu
*pmu
)
788 for (pkg
= 0; pkg
< max_packages
; pkg
++)
789 kfree(pmu
->boxes
[pkg
]);
793 static void uncore_type_exit(struct intel_uncore_type
*type
)
795 struct intel_uncore_pmu
*pmu
= type
->pmus
;
799 for (i
= 0; i
< type
->num_boxes
; i
++, pmu
++) {
800 uncore_pmu_unregister(pmu
);
801 uncore_free_boxes(pmu
);
806 kfree(type
->events_group
);
807 type
->events_group
= NULL
;
810 static void uncore_types_exit(struct intel_uncore_type
**types
)
812 for (; *types
; types
++)
813 uncore_type_exit(*types
);
816 static int __init
uncore_type_init(struct intel_uncore_type
*type
, bool setid
)
818 struct intel_uncore_pmu
*pmus
;
819 struct attribute_group
*attr_group
;
820 struct attribute
**attrs
;
824 pmus
= kzalloc(sizeof(*pmus
) * type
->num_boxes
, GFP_KERNEL
);
828 size
= max_packages
* sizeof(struct intel_uncore_box
*);
830 for (i
= 0; i
< type
->num_boxes
; i
++) {
831 pmus
[i
].func_id
= setid
? i
: -1;
834 pmus
[i
].boxes
= kzalloc(size
, GFP_KERNEL
);
840 type
->unconstrainted
= (struct event_constraint
)
841 __EVENT_CONSTRAINT(0, (1ULL << type
->num_counters
) - 1,
842 0, type
->num_counters
, 0, 0);
844 if (type
->event_descs
) {
845 for (i
= 0; type
->event_descs
[i
].attr
.attr
.name
; i
++);
847 attr_group
= kzalloc(sizeof(struct attribute
*) * (i
+ 1) +
848 sizeof(*attr_group
), GFP_KERNEL
);
852 attrs
= (struct attribute
**)(attr_group
+ 1);
853 attr_group
->name
= "events";
854 attr_group
->attrs
= attrs
;
856 for (j
= 0; j
< i
; j
++)
857 attrs
[j
] = &type
->event_descs
[j
].attr
.attr
;
859 type
->events_group
= attr_group
;
862 type
->pmu_group
= &uncore_pmu_attr_group
;
867 uncore_types_init(struct intel_uncore_type
**types
, bool setid
)
871 for (; *types
; types
++) {
872 ret
= uncore_type_init(*types
, setid
);
880 * add a pci uncore device
882 static int uncore_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
884 struct intel_uncore_type
*type
;
885 struct intel_uncore_pmu
*pmu
= NULL
;
886 struct intel_uncore_box
*box
;
887 int phys_id
, pkg
, ret
;
889 phys_id
= uncore_pcibus_to_physid(pdev
->bus
);
893 pkg
= topology_phys_to_logical_pkg(phys_id
);
897 if (UNCORE_PCI_DEV_TYPE(id
->driver_data
) == UNCORE_EXTRA_PCI_DEV
) {
898 int idx
= UNCORE_PCI_DEV_IDX(id
->driver_data
);
900 uncore_extra_pci_dev
[pkg
].dev
[idx
] = pdev
;
901 pci_set_drvdata(pdev
, NULL
);
905 type
= uncore_pci_uncores
[UNCORE_PCI_DEV_TYPE(id
->driver_data
)];
908 * Some platforms, e.g. Knights Landing, use a common PCI device ID
909 * for multiple instances of an uncore PMU device type. We should check
910 * PCI slot and func to indicate the uncore box.
912 if (id
->driver_data
& ~0xffff) {
913 struct pci_driver
*pci_drv
= pdev
->driver
;
914 const struct pci_device_id
*ids
= pci_drv
->id_table
;
917 while (ids
&& ids
->vendor
) {
918 if ((ids
->vendor
== pdev
->vendor
) &&
919 (ids
->device
== pdev
->device
)) {
920 devfn
= PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids
->driver_data
),
921 UNCORE_PCI_DEV_FUNC(ids
->driver_data
));
922 if (devfn
== pdev
->devfn
) {
923 pmu
= &type
->pmus
[UNCORE_PCI_DEV_IDX(ids
->driver_data
)];
933 * for performance monitoring unit with multiple boxes,
934 * each box has a different function id.
936 pmu
= &type
->pmus
[UNCORE_PCI_DEV_IDX(id
->driver_data
)];
939 if (WARN_ON_ONCE(pmu
->boxes
[pkg
] != NULL
))
942 box
= uncore_alloc_box(type
, NUMA_NO_NODE
);
946 if (pmu
->func_id
< 0)
947 pmu
->func_id
= pdev
->devfn
;
949 WARN_ON_ONCE(pmu
->func_id
!= pdev
->devfn
);
951 atomic_inc(&box
->refcnt
);
952 box
->pci_phys_id
= phys_id
;
956 uncore_box_init(box
);
957 pci_set_drvdata(pdev
, box
);
959 pmu
->boxes
[pkg
] = box
;
960 if (atomic_inc_return(&pmu
->activeboxes
) > 1)
963 /* First active box registers the pmu */
964 ret
= uncore_pmu_register(pmu
);
966 pci_set_drvdata(pdev
, NULL
);
967 pmu
->boxes
[pkg
] = NULL
;
968 uncore_box_exit(box
);
974 static void uncore_pci_remove(struct pci_dev
*pdev
)
976 struct intel_uncore_box
*box
= pci_get_drvdata(pdev
);
977 struct intel_uncore_pmu
*pmu
;
980 phys_id
= uncore_pcibus_to_physid(pdev
->bus
);
981 pkg
= topology_phys_to_logical_pkg(phys_id
);
983 box
= pci_get_drvdata(pdev
);
985 for (i
= 0; i
< UNCORE_EXTRA_PCI_DEV_MAX
; i
++) {
986 if (uncore_extra_pci_dev
[pkg
].dev
[i
] == pdev
) {
987 uncore_extra_pci_dev
[pkg
].dev
[i
] = NULL
;
991 WARN_ON_ONCE(i
>= UNCORE_EXTRA_PCI_DEV_MAX
);
996 if (WARN_ON_ONCE(phys_id
!= box
->pci_phys_id
))
999 pci_set_drvdata(pdev
, NULL
);
1000 pmu
->boxes
[pkg
] = NULL
;
1001 if (atomic_dec_return(&pmu
->activeboxes
) == 0)
1002 uncore_pmu_unregister(pmu
);
1003 uncore_box_exit(box
);
1007 static int __init
uncore_pci_init(void)
1012 size
= max_packages
* sizeof(struct pci_extra_dev
);
1013 uncore_extra_pci_dev
= kzalloc(size
, GFP_KERNEL
);
1014 if (!uncore_extra_pci_dev
) {
1019 ret
= uncore_types_init(uncore_pci_uncores
, false);
1023 uncore_pci_driver
->probe
= uncore_pci_probe
;
1024 uncore_pci_driver
->remove
= uncore_pci_remove
;
1026 ret
= pci_register_driver(uncore_pci_driver
);
1030 pcidrv_registered
= true;
1034 uncore_types_exit(uncore_pci_uncores
);
1035 kfree(uncore_extra_pci_dev
);
1036 uncore_extra_pci_dev
= NULL
;
1037 uncore_free_pcibus_map();
1039 uncore_pci_uncores
= empty_uncore
;
1043 static void uncore_pci_exit(void)
1045 if (pcidrv_registered
) {
1046 pcidrv_registered
= false;
1047 pci_unregister_driver(uncore_pci_driver
);
1048 uncore_types_exit(uncore_pci_uncores
);
1049 kfree(uncore_extra_pci_dev
);
1050 uncore_free_pcibus_map();
1054 static void uncore_cpu_dying(int cpu
)
1056 struct intel_uncore_type
*type
, **types
= uncore_msr_uncores
;
1057 struct intel_uncore_pmu
*pmu
;
1058 struct intel_uncore_box
*box
;
1061 pkg
= topology_logical_package_id(cpu
);
1062 for (; *types
; types
++) {
1065 for (i
= 0; i
< type
->num_boxes
; i
++, pmu
++) {
1066 box
= pmu
->boxes
[pkg
];
1067 if (box
&& atomic_dec_return(&box
->refcnt
) == 0)
1068 uncore_box_exit(box
);
1073 static void uncore_cpu_starting(int cpu
, bool init
)
1075 struct intel_uncore_type
*type
, **types
= uncore_msr_uncores
;
1076 struct intel_uncore_pmu
*pmu
;
1077 struct intel_uncore_box
*box
;
1078 int i
, pkg
, ncpus
= 1;
1082 * On init we get the number of online cpus in the package
1083 * and set refcount for all of them.
1085 ncpus
= cpumask_weight(topology_core_cpumask(cpu
));
1088 pkg
= topology_logical_package_id(cpu
);
1089 for (; *types
; types
++) {
1092 for (i
= 0; i
< type
->num_boxes
; i
++, pmu
++) {
1093 box
= pmu
->boxes
[pkg
];
1096 /* The first cpu on a package activates the box */
1097 if (atomic_add_return(ncpus
, &box
->refcnt
) == ncpus
)
1098 uncore_box_init(box
);
1103 static int uncore_cpu_prepare(int cpu
)
1105 struct intel_uncore_type
*type
, **types
= uncore_msr_uncores
;
1106 struct intel_uncore_pmu
*pmu
;
1107 struct intel_uncore_box
*box
;
1110 pkg
= topology_logical_package_id(cpu
);
1111 for (; *types
; types
++) {
1114 for (i
= 0; i
< type
->num_boxes
; i
++, pmu
++) {
1115 if (pmu
->boxes
[pkg
])
1117 /* First cpu of a package allocates the box */
1118 box
= uncore_alloc_box(type
, cpu_to_node(cpu
));
1123 pmu
->boxes
[pkg
] = box
;
1129 static void uncore_change_type_ctx(struct intel_uncore_type
*type
, int old_cpu
,
1132 struct intel_uncore_pmu
*pmu
= type
->pmus
;
1133 struct intel_uncore_box
*box
;
1136 pkg
= topology_logical_package_id(old_cpu
< 0 ? new_cpu
: old_cpu
);
1137 for (i
= 0; i
< type
->num_boxes
; i
++, pmu
++) {
1138 box
= pmu
->boxes
[pkg
];
1143 WARN_ON_ONCE(box
->cpu
!= -1);
1148 WARN_ON_ONCE(box
->cpu
!= old_cpu
);
1153 uncore_pmu_cancel_hrtimer(box
);
1154 perf_pmu_migrate_context(&pmu
->pmu
, old_cpu
, new_cpu
);
1159 static void uncore_change_context(struct intel_uncore_type
**uncores
,
1160 int old_cpu
, int new_cpu
)
1162 for (; *uncores
; uncores
++)
1163 uncore_change_type_ctx(*uncores
, old_cpu
, new_cpu
);
1166 static void uncore_event_exit_cpu(int cpu
)
1170 /* Check if exiting cpu is used for collecting uncore events */
1171 if (!cpumask_test_and_clear_cpu(cpu
, &uncore_cpu_mask
))
1174 /* Find a new cpu to collect uncore events */
1175 target
= cpumask_any_but(topology_core_cpumask(cpu
), cpu
);
1177 /* Migrate uncore events to the new target */
1178 if (target
< nr_cpu_ids
)
1179 cpumask_set_cpu(target
, &uncore_cpu_mask
);
1183 uncore_change_context(uncore_msr_uncores
, cpu
, target
);
1184 uncore_change_context(uncore_pci_uncores
, cpu
, target
);
1187 static void uncore_event_init_cpu(int cpu
)
1192 * Check if there is an online cpu in the package
1193 * which collects uncore events already.
1195 target
= cpumask_any_and(&uncore_cpu_mask
, topology_core_cpumask(cpu
));
1196 if (target
< nr_cpu_ids
)
1199 cpumask_set_cpu(cpu
, &uncore_cpu_mask
);
1201 uncore_change_context(uncore_msr_uncores
, -1, cpu
);
1202 uncore_change_context(uncore_pci_uncores
, -1, cpu
);
1205 static int uncore_cpu_notifier(struct notifier_block
*self
,
1206 unsigned long action
, void *hcpu
)
1208 unsigned int cpu
= (long)hcpu
;
1210 switch (action
& ~CPU_TASKS_FROZEN
) {
1211 case CPU_UP_PREPARE
:
1212 return notifier_from_errno(uncore_cpu_prepare(cpu
));
1215 uncore_cpu_starting(cpu
, false);
1216 case CPU_DOWN_FAILED
:
1217 uncore_event_init_cpu(cpu
);
1220 case CPU_UP_CANCELED
:
1222 uncore_cpu_dying(cpu
);
1225 case CPU_DOWN_PREPARE
:
1226 uncore_event_exit_cpu(cpu
);
1232 static struct notifier_block uncore_cpu_nb
= {
1233 .notifier_call
= uncore_cpu_notifier
,
1235 * to migrate uncore events, our notifier should be executed
1236 * before perf core's notifier.
1238 .priority
= CPU_PRI_PERF
+ 1,
1241 static int __init
type_pmu_register(struct intel_uncore_type
*type
)
1245 for (i
= 0; i
< type
->num_boxes
; i
++) {
1246 ret
= uncore_pmu_register(&type
->pmus
[i
]);
1253 static int __init
uncore_msr_pmus_register(void)
1255 struct intel_uncore_type
**types
= uncore_msr_uncores
;
1258 for (; *types
; types
++) {
1259 ret
= type_pmu_register(*types
);
1266 static int __init
uncore_cpu_init(void)
1270 ret
= uncore_types_init(uncore_msr_uncores
, true);
1274 ret
= uncore_msr_pmus_register();
1279 uncore_types_exit(uncore_msr_uncores
);
1280 uncore_msr_uncores
= empty_uncore
;
1284 static void __init
uncore_cpu_setup(void *dummy
)
1286 uncore_cpu_starting(smp_processor_id(), true);
1289 /* Lazy to avoid allocation of a few bytes for the normal case */
1290 static __initdata
DECLARE_BITMAP(packages
, MAX_LOCAL_APIC
);
1292 static int __init
uncore_cpumask_init(bool msr
)
1296 for_each_online_cpu(cpu
) {
1297 unsigned int pkg
= topology_logical_package_id(cpu
);
1300 if (test_and_set_bit(pkg
, packages
))
1303 * The first online cpu of each package allocates and takes
1304 * the refcounts for all other online cpus in that package.
1305 * If msrs are not enabled no allocation is required.
1308 ret
= uncore_cpu_prepare(cpu
);
1312 uncore_event_init_cpu(cpu
);
1313 smp_call_function_single(cpu
, uncore_cpu_setup
, NULL
, 1);
1315 __register_cpu_notifier(&uncore_cpu_nb
);
1319 #define X86_UNCORE_MODEL_MATCH(model, init) \
1320 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
1322 struct intel_uncore_init_fun
{
1323 void (*cpu_init
)(void);
1324 int (*pci_init
)(void);
1327 static const struct intel_uncore_init_fun nhm_uncore_init __initconst
= {
1328 .cpu_init
= nhm_uncore_cpu_init
,
1331 static const struct intel_uncore_init_fun snb_uncore_init __initconst
= {
1332 .cpu_init
= snb_uncore_cpu_init
,
1333 .pci_init
= snb_uncore_pci_init
,
1336 static const struct intel_uncore_init_fun ivb_uncore_init __initconst
= {
1337 .cpu_init
= snb_uncore_cpu_init
,
1338 .pci_init
= ivb_uncore_pci_init
,
1341 static const struct intel_uncore_init_fun hsw_uncore_init __initconst
= {
1342 .cpu_init
= snb_uncore_cpu_init
,
1343 .pci_init
= hsw_uncore_pci_init
,
1346 static const struct intel_uncore_init_fun bdw_uncore_init __initconst
= {
1347 .cpu_init
= snb_uncore_cpu_init
,
1348 .pci_init
= bdw_uncore_pci_init
,
1351 static const struct intel_uncore_init_fun snbep_uncore_init __initconst
= {
1352 .cpu_init
= snbep_uncore_cpu_init
,
1353 .pci_init
= snbep_uncore_pci_init
,
1356 static const struct intel_uncore_init_fun nhmex_uncore_init __initconst
= {
1357 .cpu_init
= nhmex_uncore_cpu_init
,
1360 static const struct intel_uncore_init_fun ivbep_uncore_init __initconst
= {
1361 .cpu_init
= ivbep_uncore_cpu_init
,
1362 .pci_init
= ivbep_uncore_pci_init
,
1365 static const struct intel_uncore_init_fun hswep_uncore_init __initconst
= {
1366 .cpu_init
= hswep_uncore_cpu_init
,
1367 .pci_init
= hswep_uncore_pci_init
,
1370 static const struct intel_uncore_init_fun bdx_uncore_init __initconst
= {
1371 .cpu_init
= bdx_uncore_cpu_init
,
1372 .pci_init
= bdx_uncore_pci_init
,
1375 static const struct intel_uncore_init_fun knl_uncore_init __initconst
= {
1376 .cpu_init
= knl_uncore_cpu_init
,
1377 .pci_init
= knl_uncore_pci_init
,
1380 static const struct intel_uncore_init_fun skl_uncore_init __initconst
= {
1381 .pci_init
= skl_uncore_pci_init
,
1384 static const struct x86_cpu_id intel_uncore_match
[] __initconst
= {
1385 X86_UNCORE_MODEL_MATCH(26, nhm_uncore_init
), /* Nehalem */
1386 X86_UNCORE_MODEL_MATCH(30, nhm_uncore_init
),
1387 X86_UNCORE_MODEL_MATCH(37, nhm_uncore_init
), /* Westmere */
1388 X86_UNCORE_MODEL_MATCH(44, nhm_uncore_init
),
1389 X86_UNCORE_MODEL_MATCH(42, snb_uncore_init
), /* Sandy Bridge */
1390 X86_UNCORE_MODEL_MATCH(58, ivb_uncore_init
), /* Ivy Bridge */
1391 X86_UNCORE_MODEL_MATCH(60, hsw_uncore_init
), /* Haswell */
1392 X86_UNCORE_MODEL_MATCH(69, hsw_uncore_init
), /* Haswell Celeron */
1393 X86_UNCORE_MODEL_MATCH(70, hsw_uncore_init
), /* Haswell */
1394 X86_UNCORE_MODEL_MATCH(61, bdw_uncore_init
), /* Broadwell */
1395 X86_UNCORE_MODEL_MATCH(71, bdw_uncore_init
), /* Broadwell */
1396 X86_UNCORE_MODEL_MATCH(45, snbep_uncore_init
), /* Sandy Bridge-EP */
1397 X86_UNCORE_MODEL_MATCH(46, nhmex_uncore_init
), /* Nehalem-EX */
1398 X86_UNCORE_MODEL_MATCH(47, nhmex_uncore_init
), /* Westmere-EX aka. Xeon E7 */
1399 X86_UNCORE_MODEL_MATCH(62, ivbep_uncore_init
), /* Ivy Bridge-EP */
1400 X86_UNCORE_MODEL_MATCH(63, hswep_uncore_init
), /* Haswell-EP */
1401 X86_UNCORE_MODEL_MATCH(79, bdx_uncore_init
), /* BDX-EP */
1402 X86_UNCORE_MODEL_MATCH(86, bdx_uncore_init
), /* BDX-DE */
1403 X86_UNCORE_MODEL_MATCH(87, knl_uncore_init
), /* Knights Landing */
1404 X86_UNCORE_MODEL_MATCH(94, skl_uncore_init
), /* SkyLake */
1408 MODULE_DEVICE_TABLE(x86cpu
, intel_uncore_match
);
1410 static int __init
intel_uncore_init(void)
1412 const struct x86_cpu_id
*id
;
1413 struct intel_uncore_init_fun
*uncore_init
;
1414 int pret
= 0, cret
= 0, ret
;
1416 id
= x86_match_cpu(intel_uncore_match
);
1420 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
1423 max_packages
= topology_max_packages();
1425 uncore_init
= (struct intel_uncore_init_fun
*)id
->driver_data
;
1426 if (uncore_init
->pci_init
) {
1427 pret
= uncore_init
->pci_init();
1429 pret
= uncore_pci_init();
1432 if (uncore_init
->cpu_init
) {
1433 uncore_init
->cpu_init();
1434 cret
= uncore_cpu_init();
1440 cpu_notifier_register_begin();
1441 ret
= uncore_cpumask_init(!cret
);
1444 cpu_notifier_register_done();
1448 /* Undo box->init_box() */
1449 on_each_cpu_mask(&uncore_cpu_mask
, uncore_exit_boxes
, NULL
, 1);
1450 uncore_types_exit(uncore_msr_uncores
);
1452 cpu_notifier_register_done();
1455 module_init(intel_uncore_init
);
1457 static void __exit
intel_uncore_exit(void)
1459 cpu_notifier_register_begin();
1460 __unregister_cpu_notifier(&uncore_cpu_nb
);
1461 uncore_types_exit(uncore_msr_uncores
);
1463 cpu_notifier_register_done();
1465 module_exit(intel_uncore_exit
);