]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/events/intel/uncore.c
x86/topology: Create logical package id
[mirror_ubuntu-artful-kernel.git] / arch / x86 / events / intel / uncore.c
CommitLineData
6bcb2db5 1#include "uncore.h"
087bfbb0
YZ
2
3static struct intel_uncore_type *empty_uncore[] = { NULL, };
514b2346
YZ
4struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
5struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
14371cce 6
514b2346
YZ
7static bool pcidrv_registered;
8struct pci_driver *uncore_pci_driver;
9/* pci bus to socket mapping */
712df65c
TI
10DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
11struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
514b2346 12struct pci_dev *uncore_extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
899396cf 13
14371cce 14static DEFINE_RAW_SPINLOCK(uncore_box_lock);
087bfbb0
YZ
15/* mask of cpus that collect uncore events */
16static cpumask_t uncore_cpu_mask;
17
18/* constraint for the fixed counter */
514b2346 19static struct event_constraint uncore_constraint_fixed =
087bfbb0 20 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
514b2346 21struct event_constraint uncore_constraint_empty =
6a67943a 22 EVENT_CONSTRAINT(0, 0, 0);
087bfbb0 23
1384c704 24static int uncore_pcibus_to_physid(struct pci_bus *bus)
712df65c
TI
25{
26 struct pci2phy_map *map;
27 int phys_id = -1;
28
29 raw_spin_lock(&pci2phy_map_lock);
30 list_for_each_entry(map, &pci2phy_map_head, list) {
31 if (map->segment == pci_domain_nr(bus)) {
32 phys_id = map->pbus_to_physid[bus->number];
33 break;
34 }
35 }
36 raw_spin_unlock(&pci2phy_map_lock);
37
38 return phys_id;
39}
40
4f089678
TG
41static void uncore_free_pcibus_map(void)
42{
43 struct pci2phy_map *map, *tmp;
44
45 list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
46 list_del(&map->list);
47 kfree(map);
48 }
49}
50
712df65c
TI
51struct pci2phy_map *__find_pci2phy_map(int segment)
52{
53 struct pci2phy_map *map, *alloc = NULL;
54 int i;
55
56 lockdep_assert_held(&pci2phy_map_lock);
57
58lookup:
59 list_for_each_entry(map, &pci2phy_map_head, list) {
60 if (map->segment == segment)
61 goto end;
62 }
63
64 if (!alloc) {
65 raw_spin_unlock(&pci2phy_map_lock);
66 alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
67 raw_spin_lock(&pci2phy_map_lock);
68
69 if (!alloc)
70 return NULL;
71
72 goto lookup;
73 }
74
75 map = alloc;
76 alloc = NULL;
77 map->segment = segment;
78 for (i = 0; i < 256; i++)
79 map->pbus_to_physid[i] = -1;
80 list_add_tail(&map->list, &pci2phy_map_head);
81
82end:
83 kfree(alloc);
84 return map;
85}
86
514b2346
YZ
87ssize_t uncore_event_show(struct kobject *kobj,
88 struct kobj_attribute *attr, char *buf)
89{
90 struct uncore_event_desc *event =
91 container_of(attr, struct uncore_event_desc, attr);
92 return sprintf(buf, "%s", event->config);
93}
94
514b2346 95struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
001e413f
SE
96{
97 struct intel_uncore_box *box;
98
99 box = *per_cpu_ptr(pmu->box, cpu);
100 if (box)
101 return box;
102
103 raw_spin_lock(&uncore_box_lock);
4f971248
AK
104 /* Recheck in lock to handle races. */
105 if (*per_cpu_ptr(pmu->box, cpu))
106 goto out;
001e413f
SE
107 list_for_each_entry(box, &pmu->box_list, list) {
108 if (box->phys_id == topology_physical_package_id(cpu)) {
109 atomic_inc(&box->refcnt);
110 *per_cpu_ptr(pmu->box, cpu) = box;
111 break;
112 }
113 }
4f971248 114out:
001e413f
SE
115 raw_spin_unlock(&uncore_box_lock);
116
117 return *per_cpu_ptr(pmu->box, cpu);
118}
119
514b2346 120u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
254298c7
YZ
121{
122 u64 count;
123
124 rdmsrl(event->hw.event_base, count);
125
126 return count;
127}
128
129/*
130 * generic get constraint function for shared match/mask registers.
131 */
514b2346 132struct event_constraint *
254298c7
YZ
133uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
134{
135 struct intel_uncore_extra_reg *er;
136 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
137 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
138 unsigned long flags;
139 bool ok = false;
140
141 /*
142 * reg->alloc can be set due to existing state, so for fake box we
143 * need to ignore this, otherwise we might fail to allocate proper
144 * fake state for this extra reg constraint.
145 */
146 if (reg1->idx == EXTRA_REG_NONE ||
147 (!uncore_box_is_fake(box) && reg1->alloc))
148 return NULL;
149
150 er = &box->shared_regs[reg1->idx];
151 raw_spin_lock_irqsave(&er->lock, flags);
152 if (!atomic_read(&er->ref) ||
153 (er->config1 == reg1->config && er->config2 == reg2->config)) {
154 atomic_inc(&er->ref);
155 er->config1 = reg1->config;
156 er->config2 = reg2->config;
157 ok = true;
158 }
159 raw_spin_unlock_irqrestore(&er->lock, flags);
160
161 if (ok) {
162 if (!uncore_box_is_fake(box))
163 reg1->alloc = 1;
164 return NULL;
165 }
166
514b2346 167 return &uncore_constraint_empty;
254298c7
YZ
168}
169
514b2346 170void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
254298c7
YZ
171{
172 struct intel_uncore_extra_reg *er;
173 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
174
175 /*
176 * Only put constraint if extra reg was actually allocated. Also
177 * takes care of event which do not use an extra shared reg.
178 *
179 * Also, if this is a fake box we shouldn't touch any event state
180 * (reg->alloc) and we don't care about leaving inconsistent box
181 * state either since it will be thrown out.
182 */
183 if (uncore_box_is_fake(box) || !reg1->alloc)
184 return;
185
186 er = &box->shared_regs[reg1->idx];
187 atomic_dec(&er->ref);
188 reg1->alloc = 0;
189}
190
514b2346 191u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
46bdd905
YZ
192{
193 struct intel_uncore_extra_reg *er;
194 unsigned long flags;
195 u64 config;
196
197 er = &box->shared_regs[idx];
198
199 raw_spin_lock_irqsave(&er->lock, flags);
200 config = er->config;
201 raw_spin_unlock_irqrestore(&er->lock, flags);
202
203 return config;
204}
205
1229735b
TG
206static void uncore_assign_hw_event(struct intel_uncore_box *box,
207 struct perf_event *event, int idx)
087bfbb0
YZ
208{
209 struct hw_perf_event *hwc = &event->hw;
210
211 hwc->idx = idx;
212 hwc->last_tag = ++box->tags[idx];
213
214 if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
14371cce
YZ
215 hwc->event_base = uncore_fixed_ctr(box);
216 hwc->config_base = uncore_fixed_ctl(box);
087bfbb0
YZ
217 return;
218 }
219
14371cce
YZ
220 hwc->config_base = uncore_event_ctl(box, hwc->idx);
221 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
087bfbb0
YZ
222}
223
514b2346 224void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
087bfbb0
YZ
225{
226 u64 prev_count, new_count, delta;
227 int shift;
228
229 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
230 shift = 64 - uncore_fixed_ctr_bits(box);
231 else
232 shift = 64 - uncore_perf_ctr_bits(box);
233
234 /* the hrtimer might modify the previous event value */
235again:
236 prev_count = local64_read(&event->hw.prev_count);
237 new_count = uncore_read_counter(box, event);
238 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
239 goto again;
240
241 delta = (new_count << shift) - (prev_count << shift);
242 delta >>= shift;
243
244 local64_add(delta, &event->count);
245}
246
247/*
248 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
249 * for SandyBridge. So we use hrtimer to periodically poll the counter
250 * to avoid overflow.
251 */
252static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
253{
254 struct intel_uncore_box *box;
ced2efb0 255 struct perf_event *event;
087bfbb0
YZ
256 unsigned long flags;
257 int bit;
258
259 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
260 if (!box->n_active || box->cpu != smp_processor_id())
261 return HRTIMER_NORESTART;
262 /*
263 * disable local interrupt to prevent uncore_pmu_event_start/stop
264 * to interrupt the update process
265 */
266 local_irq_save(flags);
267
ced2efb0
SE
268 /*
269 * handle boxes with an active event list as opposed to active
270 * counters
271 */
272 list_for_each_entry(event, &box->active_list, active_entry) {
273 uncore_perf_event_update(box, event);
274 }
275
087bfbb0
YZ
276 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
277 uncore_perf_event_update(box, box->events[bit]);
278
279 local_irq_restore(flags);
280
79859cce 281 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
087bfbb0
YZ
282 return HRTIMER_RESTART;
283}
284
514b2346 285void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
087bfbb0 286{
576b0704
TG
287 hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
288 HRTIMER_MODE_REL_PINNED);
087bfbb0
YZ
289}
290
514b2346 291void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
087bfbb0
YZ
292{
293 hrtimer_cancel(&box->hrtimer);
294}
295
296static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
297{
298 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
299 box->hrtimer.function = uncore_pmu_hrtimer;
300}
301
1229735b
TG
302static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
303 int node)
087bfbb0 304{
1229735b 305 int i, size, numshared = type->num_shared_regs ;
087bfbb0
YZ
306 struct intel_uncore_box *box;
307
1229735b 308 size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
6a67943a 309
73c4427c 310 box = kzalloc_node(size, GFP_KERNEL, node);
087bfbb0
YZ
311 if (!box)
312 return NULL;
313
1229735b 314 for (i = 0; i < numshared; i++)
6a67943a
YZ
315 raw_spin_lock_init(&box->shared_regs[i].lock);
316
087bfbb0
YZ
317 uncore_pmu_init_hrtimer(box);
318 atomic_set(&box->refcnt, 1);
319 box->cpu = -1;
320 box->phys_id = -1;
321
79859cce
SE
322 /* set default hrtimer timeout */
323 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
087bfbb0 324
ced2efb0 325 INIT_LIST_HEAD(&box->active_list);
14371cce 326
087bfbb0 327 return box;
087bfbb0
YZ
328}
329
af91568e
JO
330/*
331 * Using uncore_pmu_event_init pmu event_init callback
332 * as a detection point for uncore events.
333 */
334static int uncore_pmu_event_init(struct perf_event *event);
335
336static bool is_uncore_event(struct perf_event *event)
337{
338 return event->pmu->event_init == uncore_pmu_event_init;
339}
340
254298c7 341static int
1229735b
TG
342uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
343 bool dogrp)
087bfbb0
YZ
344{
345 struct perf_event *event;
346 int n, max_count;
347
348 max_count = box->pmu->type->num_counters;
349 if (box->pmu->type->fixed_ctl)
350 max_count++;
351
352 if (box->n_events >= max_count)
353 return -EINVAL;
354
355 n = box->n_events;
af91568e
JO
356
357 if (is_uncore_event(leader)) {
358 box->event_list[n] = leader;
359 n++;
360 }
361
087bfbb0
YZ
362 if (!dogrp)
363 return n;
364
365 list_for_each_entry(event, &leader->sibling_list, group_entry) {
af91568e
JO
366 if (!is_uncore_event(event) ||
367 event->state <= PERF_EVENT_STATE_OFF)
087bfbb0
YZ
368 continue;
369
370 if (n >= max_count)
371 return -EINVAL;
372
373 box->event_list[n] = event;
374 n++;
375 }
376 return n;
377}
378
379static struct event_constraint *
254298c7 380uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
087bfbb0 381{
6a67943a 382 struct intel_uncore_type *type = box->pmu->type;
087bfbb0
YZ
383 struct event_constraint *c;
384
6a67943a
YZ
385 if (type->ops->get_constraint) {
386 c = type->ops->get_constraint(box, event);
387 if (c)
388 return c;
389 }
390
dbc33f70 391 if (event->attr.config == UNCORE_FIXED_EVENT)
514b2346 392 return &uncore_constraint_fixed;
087bfbb0
YZ
393
394 if (type->constraints) {
395 for_each_event_constraint(c, type->constraints) {
396 if ((event->hw.config & c->cmask) == c->code)
397 return c;
398 }
399 }
400
401 return &type->unconstrainted;
402}
403
1229735b
TG
404static void uncore_put_event_constraint(struct intel_uncore_box *box,
405 struct perf_event *event)
6a67943a
YZ
406{
407 if (box->pmu->type->ops->put_constraint)
408 box->pmu->type->ops->put_constraint(box, event);
409}
410
254298c7 411static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
087bfbb0
YZ
412{
413 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
43b45780 414 struct event_constraint *c;
6a67943a 415 int i, wmin, wmax, ret = 0;
087bfbb0
YZ
416 struct hw_perf_event *hwc;
417
418 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
419
420 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
6a67943a 421 c = uncore_get_event_constraint(box, box->event_list[i]);
b371b594 422 box->event_constraint[i] = c;
087bfbb0
YZ
423 wmin = min(wmin, c->weight);
424 wmax = max(wmax, c->weight);
425 }
426
427 /* fastpath, try to reuse previous register */
428 for (i = 0; i < n; i++) {
429 hwc = &box->event_list[i]->hw;
b371b594 430 c = box->event_constraint[i];
087bfbb0
YZ
431
432 /* never assigned */
433 if (hwc->idx == -1)
434 break;
435
436 /* constraint still honored */
437 if (!test_bit(hwc->idx, c->idxmsk))
438 break;
439
440 /* not already used */
441 if (test_bit(hwc->idx, used_mask))
442 break;
443
444 __set_bit(hwc->idx, used_mask);
6a67943a
YZ
445 if (assign)
446 assign[i] = hwc->idx;
087bfbb0 447 }
087bfbb0 448 /* slow path */
6a67943a 449 if (i != n)
b371b594 450 ret = perf_assign_events(box->event_constraint, n,
cc1790cf 451 wmin, wmax, n, assign);
6a67943a
YZ
452
453 if (!assign || ret) {
454 for (i = 0; i < n; i++)
455 uncore_put_event_constraint(box, box->event_list[i]);
456 }
087bfbb0
YZ
457 return ret ? -EINVAL : 0;
458}
459
460static void uncore_pmu_event_start(struct perf_event *event, int flags)
461{
462 struct intel_uncore_box *box = uncore_event_to_box(event);
463 int idx = event->hw.idx;
464
465 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
466 return;
467
468 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
469 return;
470
471 event->hw.state = 0;
472 box->events[idx] = event;
473 box->n_active++;
474 __set_bit(idx, box->active_mask);
475
476 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
477 uncore_enable_event(box, event);
478
479 if (box->n_active == 1) {
480 uncore_enable_box(box);
481 uncore_pmu_start_hrtimer(box);
482 }
483}
484
485static void uncore_pmu_event_stop(struct perf_event *event, int flags)
486{
487 struct intel_uncore_box *box = uncore_event_to_box(event);
488 struct hw_perf_event *hwc = &event->hw;
489
490 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
491 uncore_disable_event(box, event);
492 box->n_active--;
493 box->events[hwc->idx] = NULL;
494 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
495 hwc->state |= PERF_HES_STOPPED;
496
497 if (box->n_active == 0) {
498 uncore_disable_box(box);
499 uncore_pmu_cancel_hrtimer(box);
500 }
501 }
502
503 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
504 /*
505 * Drain the remaining delta count out of a event
506 * that we are disabling:
507 */
508 uncore_perf_event_update(box, event);
509 hwc->state |= PERF_HES_UPTODATE;
510 }
511}
512
513static int uncore_pmu_event_add(struct perf_event *event, int flags)
514{
515 struct intel_uncore_box *box = uncore_event_to_box(event);
516 struct hw_perf_event *hwc = &event->hw;
517 int assign[UNCORE_PMC_IDX_MAX];
518 int i, n, ret;
519
520 if (!box)
521 return -ENODEV;
522
523 ret = n = uncore_collect_events(box, event, false);
524 if (ret < 0)
525 return ret;
526
527 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
528 if (!(flags & PERF_EF_START))
529 hwc->state |= PERF_HES_ARCH;
530
531 ret = uncore_assign_events(box, assign, n);
532 if (ret)
533 return ret;
534
535 /* save events moving to new counters */
536 for (i = 0; i < box->n_events; i++) {
537 event = box->event_list[i];
538 hwc = &event->hw;
539
540 if (hwc->idx == assign[i] &&
541 hwc->last_tag == box->tags[assign[i]])
542 continue;
543 /*
544 * Ensure we don't accidentally enable a stopped
545 * counter simply because we rescheduled.
546 */
547 if (hwc->state & PERF_HES_STOPPED)
548 hwc->state |= PERF_HES_ARCH;
549
550 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
551 }
552
553 /* reprogram moved events into new counters */
554 for (i = 0; i < n; i++) {
555 event = box->event_list[i];
556 hwc = &event->hw;
557
558 if (hwc->idx != assign[i] ||
559 hwc->last_tag != box->tags[assign[i]])
560 uncore_assign_hw_event(box, event, assign[i]);
561 else if (i < box->n_events)
562 continue;
563
564 if (hwc->state & PERF_HES_ARCH)
565 continue;
566
567 uncore_pmu_event_start(event, 0);
568 }
569 box->n_events = n;
570
571 return 0;
572}
573
574static void uncore_pmu_event_del(struct perf_event *event, int flags)
575{
576 struct intel_uncore_box *box = uncore_event_to_box(event);
577 int i;
578
579 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
580
581 for (i = 0; i < box->n_events; i++) {
582 if (event == box->event_list[i]) {
6a67943a
YZ
583 uncore_put_event_constraint(box, event);
584
1229735b 585 for (++i; i < box->n_events; i++)
087bfbb0
YZ
586 box->event_list[i - 1] = box->event_list[i];
587
588 --box->n_events;
589 break;
590 }
591 }
592
593 event->hw.idx = -1;
594 event->hw.last_tag = ~0ULL;
595}
596
514b2346 597void uncore_pmu_event_read(struct perf_event *event)
087bfbb0
YZ
598{
599 struct intel_uncore_box *box = uncore_event_to_box(event);
600 uncore_perf_event_update(box, event);
601}
602
603/*
604 * validation ensures the group can be loaded onto the
605 * PMU if it was the only group available.
606 */
607static int uncore_validate_group(struct intel_uncore_pmu *pmu,
608 struct perf_event *event)
609{
610 struct perf_event *leader = event->group_leader;
611 struct intel_uncore_box *fake_box;
087bfbb0
YZ
612 int ret = -EINVAL, n;
613
73c4427c 614 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
087bfbb0
YZ
615 if (!fake_box)
616 return -ENOMEM;
617
618 fake_box->pmu = pmu;
619 /*
620 * the event is not yet connected with its
621 * siblings therefore we must first collect
622 * existing siblings, then add the new event
623 * before we can simulate the scheduling
624 */
625 n = uncore_collect_events(fake_box, leader, true);
626 if (n < 0)
627 goto out;
628
629 fake_box->n_events = n;
630 n = uncore_collect_events(fake_box, event, false);
631 if (n < 0)
632 goto out;
633
634 fake_box->n_events = n;
635
6a67943a 636 ret = uncore_assign_events(fake_box, NULL, n);
087bfbb0
YZ
637out:
638 kfree(fake_box);
639 return ret;
640}
641
46bdd905 642static int uncore_pmu_event_init(struct perf_event *event)
087bfbb0
YZ
643{
644 struct intel_uncore_pmu *pmu;
645 struct intel_uncore_box *box;
646 struct hw_perf_event *hwc = &event->hw;
647 int ret;
648
649 if (event->attr.type != event->pmu->type)
650 return -ENOENT;
651
652 pmu = uncore_event_to_pmu(event);
653 /* no device found for this pmu */
654 if (pmu->func_id < 0)
655 return -ENOENT;
656
657 /*
658 * Uncore PMU does measure at all privilege level all the time.
659 * So it doesn't make sense to specify any exclude bits.
660 */
661 if (event->attr.exclude_user || event->attr.exclude_kernel ||
662 event->attr.exclude_hv || event->attr.exclude_idle)
663 return -EINVAL;
664
665 /* Sampling not supported yet */
666 if (hwc->sample_period)
667 return -EINVAL;
668
669 /*
670 * Place all uncore events for a particular physical package
671 * onto a single cpu
672 */
673 if (event->cpu < 0)
674 return -EINVAL;
675 box = uncore_pmu_to_box(pmu, event->cpu);
676 if (!box || box->cpu < 0)
677 return -EINVAL;
678 event->cpu = box->cpu;
1f2569fa 679 event->pmu_private = box;
087bfbb0 680
6a67943a
YZ
681 event->hw.idx = -1;
682 event->hw.last_tag = ~0ULL;
683 event->hw.extra_reg.idx = EXTRA_REG_NONE;
ebb6cc03 684 event->hw.branch_reg.idx = EXTRA_REG_NONE;
6a67943a 685
087bfbb0
YZ
686 if (event->attr.config == UNCORE_FIXED_EVENT) {
687 /* no fixed counter */
688 if (!pmu->type->fixed_ctl)
689 return -EINVAL;
690 /*
691 * if there is only one fixed counter, only the first pmu
692 * can access the fixed counter
693 */
694 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
695 return -EINVAL;
dbc33f70
SE
696
697 /* fixed counters have event field hardcoded to zero */
698 hwc->config = 0ULL;
087bfbb0
YZ
699 } else {
700 hwc->config = event->attr.config & pmu->type->event_mask;
6a67943a
YZ
701 if (pmu->type->ops->hw_config) {
702 ret = pmu->type->ops->hw_config(box, event);
703 if (ret)
704 return ret;
705 }
087bfbb0
YZ
706 }
707
087bfbb0
YZ
708 if (event->group_leader != event)
709 ret = uncore_validate_group(pmu, event);
710 else
711 ret = 0;
712
713 return ret;
714}
715
314d9f63
YZ
716static ssize_t uncore_get_attr_cpumask(struct device *dev,
717 struct device_attribute *attr, char *buf)
718{
5aaba363 719 return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
314d9f63
YZ
720}
721
722static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
723
724static struct attribute *uncore_pmu_attrs[] = {
725 &dev_attr_cpumask.attr,
726 NULL,
727};
728
729static struct attribute_group uncore_pmu_attr_group = {
730 .attrs = uncore_pmu_attrs,
731};
732
a08b6769 733static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
087bfbb0
YZ
734{
735 int ret;
736
d64b25b6
SE
737 if (!pmu->type->pmu) {
738 pmu->pmu = (struct pmu) {
739 .attr_groups = pmu->type->attr_groups,
740 .task_ctx_nr = perf_invalid_context,
741 .event_init = uncore_pmu_event_init,
742 .add = uncore_pmu_event_add,
743 .del = uncore_pmu_event_del,
744 .start = uncore_pmu_event_start,
745 .stop = uncore_pmu_event_stop,
746 .read = uncore_pmu_event_read,
747 };
748 } else {
749 pmu->pmu = *pmu->type->pmu;
750 pmu->pmu.attr_groups = pmu->type->attr_groups;
751 }
087bfbb0
YZ
752
753 if (pmu->type->num_boxes == 1) {
754 if (strlen(pmu->type->name) > 0)
755 sprintf(pmu->name, "uncore_%s", pmu->type->name);
756 else
757 sprintf(pmu->name, "uncore");
758 } else {
759 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
760 pmu->pmu_idx);
761 }
762
763 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
4f089678
TG
764 if (!ret)
765 pmu->registered = true;
087bfbb0
YZ
766 return ret;
767}
768
4f089678
TG
769static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
770{
771 if (!pmu->registered)
772 return;
773 perf_pmu_unregister(&pmu->pmu);
774 pmu->registered = false;
775}
776
087bfbb0
YZ
777static void __init uncore_type_exit(struct intel_uncore_type *type)
778{
779 int i;
780
ffeda003 781 if (type->pmus) {
4f089678
TG
782 for (i = 0; i < type->num_boxes; i++) {
783 uncore_pmu_unregister(&type->pmus[i]);
ffeda003 784 free_percpu(type->pmus[i].box);
4f089678 785 }
ffeda003
TG
786 kfree(type->pmus);
787 type->pmus = NULL;
788 }
314d9f63
YZ
789 kfree(type->events_group);
790 type->events_group = NULL;
087bfbb0
YZ
791}
792
cffa59ba 793static void __init uncore_types_exit(struct intel_uncore_type **types)
14371cce 794{
1229735b
TG
795 for (; *types; types++)
796 uncore_type_exit(*types);
14371cce
YZ
797}
798
087bfbb0
YZ
799static int __init uncore_type_init(struct intel_uncore_type *type)
800{
801 struct intel_uncore_pmu *pmus;
1b0dac2a 802 struct attribute_group *attr_group;
087bfbb0
YZ
803 struct attribute **attrs;
804 int i, j;
805
806 pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
807 if (!pmus)
808 return -ENOMEM;
809
b7b4839d
DJ
810 type->pmus = pmus;
811
087bfbb0
YZ
812 type->unconstrainted = (struct event_constraint)
813 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
9fac2cf3 814 0, type->num_counters, 0, 0);
087bfbb0
YZ
815
816 for (i = 0; i < type->num_boxes; i++) {
817 pmus[i].func_id = -1;
818 pmus[i].pmu_idx = i;
819 pmus[i].type = type;
14371cce 820 INIT_LIST_HEAD(&pmus[i].box_list);
087bfbb0
YZ
821 pmus[i].box = alloc_percpu(struct intel_uncore_box *);
822 if (!pmus[i].box)
ffeda003 823 return -ENOMEM;
087bfbb0
YZ
824 }
825
826 if (type->event_descs) {
827 i = 0;
828 while (type->event_descs[i].attr.attr.name)
829 i++;
830
1b0dac2a
JSM
831 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
832 sizeof(*attr_group), GFP_KERNEL);
833 if (!attr_group)
ffeda003 834 return -ENOMEM;
087bfbb0 835
1b0dac2a
JSM
836 attrs = (struct attribute **)(attr_group + 1);
837 attr_group->name = "events";
838 attr_group->attrs = attrs;
087bfbb0
YZ
839
840 for (j = 0; j < i; j++)
841 attrs[j] = &type->event_descs[j].attr.attr;
842
1b0dac2a 843 type->events_group = attr_group;
087bfbb0
YZ
844 }
845
314d9f63 846 type->pmu_group = &uncore_pmu_attr_group;
087bfbb0 847 return 0;
087bfbb0
YZ
848}
849
850static int __init uncore_types_init(struct intel_uncore_type **types)
851{
852 int i, ret;
853
854 for (i = 0; types[i]; i++) {
855 ret = uncore_type_init(types[i]);
856 if (ret)
ffeda003 857 return ret;
087bfbb0
YZ
858 }
859 return 0;
087bfbb0
YZ
860}
861
14371cce
YZ
862/*
863 * add a pci uncore device
864 */
899396cf 865static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
14371cce
YZ
866{
867 struct intel_uncore_pmu *pmu;
868 struct intel_uncore_box *box;
899396cf 869 struct intel_uncore_type *type;
513d793e 870 bool first_box = false;
4f089678 871 int phys_id, ret;
14371cce 872
712df65c 873 phys_id = uncore_pcibus_to_physid(pdev->bus);
83f8ebd2 874 if (phys_id < 0 || phys_id >= UNCORE_SOCKET_MAX)
14371cce
YZ
875 return -ENODEV;
876
899396cf 877 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
514b2346
YZ
878 int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
879 uncore_extra_pci_dev[phys_id][idx] = pdev;
899396cf
YZ
880 pci_set_drvdata(pdev, NULL);
881 return 0;
882 }
883
514b2346 884 type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
73c4427c 885 box = uncore_alloc_box(type, NUMA_NO_NODE);
14371cce
YZ
886 if (!box)
887 return -ENOMEM;
888
889 /*
890 * for performance monitoring unit with multiple boxes,
891 * each box has a different function id.
892 */
899396cf 893 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
77af0037
HC
894 /* Knights Landing uses a common PCI device ID for multiple instances of
895 * an uncore PMU device type. There is only one entry per device type in
896 * the knl_uncore_pci_ids table inspite of multiple devices present for
897 * some device types. Hence PCI device idx would be 0 for all devices.
898 * So increment pmu pointer to point to an unused array element.
899 */
1229735b 900 if (boot_cpu_data.x86_model == 87) {
77af0037
HC
901 while (pmu->func_id >= 0)
902 pmu++;
1229735b
TG
903 }
904
899396cf
YZ
905 if (pmu->func_id < 0)
906 pmu->func_id = pdev->devfn;
907 else
908 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
14371cce
YZ
909
910 box->phys_id = phys_id;
911 box->pci_dev = pdev;
912 box->pmu = pmu;
15c12479 913 uncore_box_init(box);
14371cce
YZ
914 pci_set_drvdata(pdev, box);
915
916 raw_spin_lock(&uncore_box_lock);
513d793e
YZ
917 if (list_empty(&pmu->box_list))
918 first_box = true;
14371cce
YZ
919 list_add_tail(&box->list, &pmu->box_list);
920 raw_spin_unlock(&uncore_box_lock);
921
4f089678
TG
922 if (!first_box)
923 return 0;
924
925 ret = uncore_pmu_register(pmu);
926 if (ret) {
927 pci_set_drvdata(pdev, NULL);
928 raw_spin_lock(&uncore_box_lock);
929 list_del(&box->list);
930 raw_spin_unlock(&uncore_box_lock);
a46195f1 931 uncore_box_exit(box);
4f089678
TG
932 kfree(box);
933 }
934 return ret;
14371cce
YZ
935}
936
357398e9 937static void uncore_pci_remove(struct pci_dev *pdev)
14371cce
YZ
938{
939 struct intel_uncore_box *box = pci_get_drvdata(pdev);
899396cf 940 struct intel_uncore_pmu *pmu;
712df65c 941 int i, cpu, phys_id;
513d793e 942 bool last_box = false;
899396cf 943
712df65c 944 phys_id = uncore_pcibus_to_physid(pdev->bus);
899396cf
YZ
945 box = pci_get_drvdata(pdev);
946 if (!box) {
947 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
514b2346
YZ
948 if (uncore_extra_pci_dev[phys_id][i] == pdev) {
949 uncore_extra_pci_dev[phys_id][i] = NULL;
899396cf
YZ
950 break;
951 }
952 }
953 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
954 return;
955 }
14371cce 956
899396cf 957 pmu = box->pmu;
14371cce
YZ
958 if (WARN_ON_ONCE(phys_id != box->phys_id))
959 return;
960
e850f9c3
YZ
961 pci_set_drvdata(pdev, NULL);
962
14371cce
YZ
963 raw_spin_lock(&uncore_box_lock);
964 list_del(&box->list);
513d793e
YZ
965 if (list_empty(&pmu->box_list))
966 last_box = true;
14371cce
YZ
967 raw_spin_unlock(&uncore_box_lock);
968
969 for_each_possible_cpu(cpu) {
970 if (*per_cpu_ptr(pmu->box, cpu) == box) {
971 *per_cpu_ptr(pmu->box, cpu) = NULL;
972 atomic_dec(&box->refcnt);
973 }
974 }
975
976 WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
a46195f1 977 uncore_box_exit(box);
14371cce 978 kfree(box);
513d793e
YZ
979
980 if (last_box)
4f089678 981 uncore_pmu_unregister(pmu);
14371cce
YZ
982}
983
14371cce
YZ
984static int __init uncore_pci_init(void)
985{
986 int ret;
987
988 switch (boot_cpu_data.x86_model) {
7c94ee2e 989 case 45: /* Sandy Bridge-EP */
8268fdfc 990 ret = snbep_uncore_pci_init();
7c94ee2e 991 break;
ddcd0973
PZ
992 case 62: /* Ivy Bridge-EP */
993 ret = ivbep_uncore_pci_init();
e850f9c3 994 break;
e735b9db
YZ
995 case 63: /* Haswell-EP */
996 ret = hswep_uncore_pci_init();
997 break;
d6980ef3 998 case 79: /* BDX-EP */
070e9887
KL
999 case 86: /* BDX-DE */
1000 ret = bdx_uncore_pci_init();
1001 break;
b9e1ab6d 1002 case 42: /* Sandy Bridge */
92807ffd 1003 ret = snb_uncore_pci_init();
b9e1ab6d
SE
1004 break;
1005 case 58: /* Ivy Bridge */
92807ffd 1006 ret = ivb_uncore_pci_init();
b9e1ab6d
SE
1007 break;
1008 case 60: /* Haswell */
1009 case 69: /* Haswell Celeron */
92807ffd 1010 ret = hsw_uncore_pci_init();
b9e1ab6d 1011 break;
a41f3c8c
SE
1012 case 61: /* Broadwell */
1013 ret = bdw_uncore_pci_init();
1014 break;
77af0037
HC
1015 case 87: /* Knights Landing */
1016 ret = knl_uncore_pci_init();
1017 break;
0e1eb0a1
SE
1018 case 94: /* SkyLake */
1019 ret = skl_uncore_pci_init();
1020 break;
14371cce
YZ
1021 default:
1022 return 0;
1023 }
1024
92807ffd
YZ
1025 if (ret)
1026 return ret;
1027
514b2346 1028 ret = uncore_types_init(uncore_pci_uncores);
14371cce 1029 if (ret)
ffeda003 1030 goto err;
14371cce
YZ
1031
1032 uncore_pci_driver->probe = uncore_pci_probe;
1033 uncore_pci_driver->remove = uncore_pci_remove;
1034
1035 ret = pci_register_driver(uncore_pci_driver);
ffeda003
TG
1036 if (ret)
1037 goto err;
1038
1039 pcidrv_registered = true;
1040 return 0;
14371cce 1041
ffeda003
TG
1042err:
1043 uncore_types_exit(uncore_pci_uncores);
1044 uncore_pci_uncores = empty_uncore;
4f089678 1045 uncore_free_pcibus_map();
14371cce
YZ
1046 return ret;
1047}
1048
1049static void __init uncore_pci_exit(void)
1050{
1051 if (pcidrv_registered) {
1052 pcidrv_registered = false;
1053 pci_unregister_driver(uncore_pci_driver);
514b2346 1054 uncore_types_exit(uncore_pci_uncores);
4f089678 1055 uncore_free_pcibus_map();
14371cce
YZ
1056 }
1057}
1058
22cc4ccf
YZ
1059/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
1060static LIST_HEAD(boxes_to_free);
1061
148f9bb8 1062static void uncore_kfree_boxes(void)
22cc4ccf
YZ
1063{
1064 struct intel_uncore_box *box;
1065
1066 while (!list_empty(&boxes_to_free)) {
1067 box = list_entry(boxes_to_free.next,
1068 struct intel_uncore_box, list);
1069 list_del(&box->list);
1070 kfree(box);
1071 }
1072}
1073
148f9bb8 1074static void uncore_cpu_dying(int cpu)
087bfbb0
YZ
1075{
1076 struct intel_uncore_type *type;
1077 struct intel_uncore_pmu *pmu;
1078 struct intel_uncore_box *box;
1079 int i, j;
1080
514b2346
YZ
1081 for (i = 0; uncore_msr_uncores[i]; i++) {
1082 type = uncore_msr_uncores[i];
087bfbb0
YZ
1083 for (j = 0; j < type->num_boxes; j++) {
1084 pmu = &type->pmus[j];
1085 box = *per_cpu_ptr(pmu->box, cpu);
1086 *per_cpu_ptr(pmu->box, cpu) = NULL;
a46195f1 1087 if (box && atomic_dec_and_test(&box->refcnt)) {
22cc4ccf 1088 list_add(&box->list, &boxes_to_free);
a46195f1
TG
1089 uncore_box_exit(box);
1090 }
087bfbb0
YZ
1091 }
1092 }
1093}
1094
148f9bb8 1095static int uncore_cpu_starting(int cpu)
087bfbb0
YZ
1096{
1097 struct intel_uncore_type *type;
1098 struct intel_uncore_pmu *pmu;
1099 struct intel_uncore_box *box, *exist;
1100 int i, j, k, phys_id;
1101
1102 phys_id = topology_physical_package_id(cpu);
1103
514b2346
YZ
1104 for (i = 0; uncore_msr_uncores[i]; i++) {
1105 type = uncore_msr_uncores[i];
087bfbb0
YZ
1106 for (j = 0; j < type->num_boxes; j++) {
1107 pmu = &type->pmus[j];
1108 box = *per_cpu_ptr(pmu->box, cpu);
1109 /* called by uncore_cpu_init? */
15c12479
IM
1110 if (box && box->phys_id >= 0) {
1111 uncore_box_init(box);
087bfbb0 1112 continue;
15c12479 1113 }
087bfbb0
YZ
1114
1115 for_each_online_cpu(k) {
1116 exist = *per_cpu_ptr(pmu->box, k);
1117 if (exist && exist->phys_id == phys_id) {
1118 atomic_inc(&exist->refcnt);
1119 *per_cpu_ptr(pmu->box, cpu) = exist;
22cc4ccf
YZ
1120 if (box) {
1121 list_add(&box->list,
1122 &boxes_to_free);
1123 box = NULL;
1124 }
087bfbb0
YZ
1125 break;
1126 }
1127 }
1128
15c12479 1129 if (box) {
087bfbb0 1130 box->phys_id = phys_id;
15c12479
IM
1131 uncore_box_init(box);
1132 }
087bfbb0
YZ
1133 }
1134 }
1135 return 0;
1136}
1137
148f9bb8 1138static int uncore_cpu_prepare(int cpu, int phys_id)
087bfbb0
YZ
1139{
1140 struct intel_uncore_type *type;
1141 struct intel_uncore_pmu *pmu;
1142 struct intel_uncore_box *box;
1143 int i, j;
1144
514b2346
YZ
1145 for (i = 0; uncore_msr_uncores[i]; i++) {
1146 type = uncore_msr_uncores[i];
087bfbb0
YZ
1147 for (j = 0; j < type->num_boxes; j++) {
1148 pmu = &type->pmus[j];
1149 if (pmu->func_id < 0)
1150 pmu->func_id = j;
1151
73c4427c 1152 box = uncore_alloc_box(type, cpu_to_node(cpu));
087bfbb0
YZ
1153 if (!box)
1154 return -ENOMEM;
1155
1156 box->pmu = pmu;
1157 box->phys_id = phys_id;
1158 *per_cpu_ptr(pmu->box, cpu) = box;
1159 }
1160 }
1161 return 0;
1162}
1163
1229735b
TG
1164static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
1165 int new_cpu)
087bfbb0 1166{
1229735b 1167 struct intel_uncore_pmu *pmu = type->pmus;
087bfbb0 1168 struct intel_uncore_box *box;
1229735b 1169 int i;
087bfbb0 1170
1229735b
TG
1171 for (i = 0; i < type->num_boxes; i++, pmu++) {
1172 if (old_cpu < 0)
1173 box = uncore_pmu_to_box(pmu, new_cpu);
1174 else
1175 box = uncore_pmu_to_box(pmu, old_cpu);
1176 if (!box)
1177 continue;
087bfbb0 1178
1229735b
TG
1179 if (old_cpu < 0) {
1180 WARN_ON_ONCE(box->cpu != -1);
1181 box->cpu = new_cpu;
1182 continue;
087bfbb0 1183 }
1229735b
TG
1184
1185 WARN_ON_ONCE(box->cpu != old_cpu);
1186 box->cpu = -1;
1187 if (new_cpu < 0)
1188 continue;
1189
1190 uncore_pmu_cancel_hrtimer(box);
1191 perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
1192 box->cpu = new_cpu;
087bfbb0
YZ
1193 }
1194}
1195
1229735b
TG
1196static void uncore_change_context(struct intel_uncore_type **uncores,
1197 int old_cpu, int new_cpu)
1198{
1199 for (; *uncores; uncores++)
1200 uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
1201}
1202
148f9bb8 1203static void uncore_event_exit_cpu(int cpu)
087bfbb0
YZ
1204{
1205 int i, phys_id, target;
1206
1207 /* if exiting cpu is used for collecting uncore events */
1208 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1209 return;
1210
1211 /* find a new cpu to collect uncore events */
1212 phys_id = topology_physical_package_id(cpu);
1213 target = -1;
1214 for_each_online_cpu(i) {
1215 if (i == cpu)
1216 continue;
1217 if (phys_id == topology_physical_package_id(i)) {
1218 target = i;
1219 break;
1220 }
1221 }
1222
1223 /* migrate uncore events to the new cpu */
1224 if (target >= 0)
1225 cpumask_set_cpu(target, &uncore_cpu_mask);
1226
514b2346
YZ
1227 uncore_change_context(uncore_msr_uncores, cpu, target);
1228 uncore_change_context(uncore_pci_uncores, cpu, target);
087bfbb0
YZ
1229}
1230
148f9bb8 1231static void uncore_event_init_cpu(int cpu)
087bfbb0
YZ
1232{
1233 int i, phys_id;
1234
1235 phys_id = topology_physical_package_id(cpu);
1236 for_each_cpu(i, &uncore_cpu_mask) {
1237 if (phys_id == topology_physical_package_id(i))
1238 return;
1239 }
1240
1241 cpumask_set_cpu(cpu, &uncore_cpu_mask);
1242
514b2346
YZ
1243 uncore_change_context(uncore_msr_uncores, -1, cpu);
1244 uncore_change_context(uncore_pci_uncores, -1, cpu);
087bfbb0
YZ
1245}
1246
148f9bb8
PG
1247static int uncore_cpu_notifier(struct notifier_block *self,
1248 unsigned long action, void *hcpu)
087bfbb0
YZ
1249{
1250 unsigned int cpu = (long)hcpu;
1251
1252 /* allocate/free data structure for uncore box */
1253 switch (action & ~CPU_TASKS_FROZEN) {
1254 case CPU_UP_PREPARE:
4f089678 1255 return notifier_from_errno(uncore_cpu_prepare(cpu, -1));
087bfbb0
YZ
1256 case CPU_STARTING:
1257 uncore_cpu_starting(cpu);
1258 break;
1259 case CPU_UP_CANCELED:
1260 case CPU_DYING:
1261 uncore_cpu_dying(cpu);
1262 break;
22cc4ccf
YZ
1263 case CPU_ONLINE:
1264 case CPU_DEAD:
1265 uncore_kfree_boxes();
1266 break;
087bfbb0
YZ
1267 default:
1268 break;
1269 }
1270
1271 /* select the cpu that collects uncore events */
1272 switch (action & ~CPU_TASKS_FROZEN) {
1273 case CPU_DOWN_FAILED:
1274 case CPU_STARTING:
1275 uncore_event_init_cpu(cpu);
1276 break;
1277 case CPU_DOWN_PREPARE:
1278 uncore_event_exit_cpu(cpu);
1279 break;
1280 default:
1281 break;
1282 }
1283
1284 return NOTIFY_OK;
1285}
1286
148f9bb8 1287static struct notifier_block uncore_cpu_nb = {
254298c7 1288 .notifier_call = uncore_cpu_notifier,
087bfbb0
YZ
1289 /*
1290 * to migrate uncore events, our notifier should be executed
1291 * before perf core's notifier.
1292 */
254298c7 1293 .priority = CPU_PRI_PERF + 1,
087bfbb0
YZ
1294};
1295
4f089678 1296static int __init type_pmu_register(struct intel_uncore_type *type)
087bfbb0 1297{
4f089678
TG
1298 int i, ret;
1299
1300 for (i = 0; i < type->num_boxes; i++) {
1301 ret = uncore_pmu_register(&type->pmus[i]);
1302 if (ret)
1303 return ret;
1304 }
1305 return 0;
1306}
1307
1308static int __init uncore_msr_pmus_register(void)
1309{
1310 struct intel_uncore_type **types = uncore_msr_uncores;
1311 int ret;
1312
1229735b
TG
1313 for (; *types; types++) {
1314 ret = type_pmu_register(*types);
4f089678
TG
1315 if (ret)
1316 return ret;
1317 }
1318 return 0;
087bfbb0
YZ
1319}
1320
1321static int __init uncore_cpu_init(void)
1322{
c1e46580 1323 int ret;
087bfbb0
YZ
1324
1325 switch (boot_cpu_data.x86_model) {
fcde10e9
YZ
1326 case 26: /* Nehalem */
1327 case 30:
1328 case 37: /* Westmere */
1329 case 44:
92807ffd 1330 nhm_uncore_cpu_init();
fcde10e9
YZ
1331 break;
1332 case 42: /* Sandy Bridge */
9a6bc143 1333 case 58: /* Ivy Bridge */
3a999587
AK
1334 case 60: /* Haswell */
1335 case 69: /* Haswell */
1336 case 70: /* Haswell */
1337 case 61: /* Broadwell */
1338 case 71: /* Broadwell */
92807ffd 1339 snb_uncore_cpu_init();
fcde10e9 1340 break;
80e217e9 1341 case 45: /* Sandy Bridge-EP */
8268fdfc 1342 snbep_uncore_cpu_init();
7c94ee2e 1343 break;
cb37af77 1344 case 46: /* Nehalem-EX */
cb37af77 1345 case 47: /* Westmere-EX aka. Xeon E7 */
c1e46580 1346 nhmex_uncore_cpu_init();
254298c7 1347 break;
ddcd0973
PZ
1348 case 62: /* Ivy Bridge-EP */
1349 ivbep_uncore_cpu_init();
e850f9c3 1350 break;
e735b9db
YZ
1351 case 63: /* Haswell-EP */
1352 hswep_uncore_cpu_init();
1353 break;
d6980ef3 1354 case 79: /* BDX-EP */
070e9887
KL
1355 case 86: /* BDX-DE */
1356 bdx_uncore_cpu_init();
1357 break;
77af0037
HC
1358 case 87: /* Knights Landing */
1359 knl_uncore_cpu_init();
1360 break;
087bfbb0
YZ
1361 default:
1362 return 0;
1363 }
1364
514b2346 1365 ret = uncore_types_init(uncore_msr_uncores);
4f089678
TG
1366 if (ret)
1367 goto err;
1368
1369 ret = uncore_msr_pmus_register();
087bfbb0 1370 if (ret)
ffeda003 1371 goto err;
087bfbb0 1372 return 0;
ffeda003
TG
1373err:
1374 uncore_types_exit(uncore_msr_uncores);
1375 uncore_msr_uncores = empty_uncore;
1376 return ret;
087bfbb0
YZ
1377}
1378
4f089678 1379static void __init uncore_cpu_setup(void *dummy)
087bfbb0 1380{
4f089678 1381 uncore_cpu_starting(smp_processor_id());
087bfbb0
YZ
1382}
1383
4f089678 1384static int __init uncore_cpumask_init(void)
411cf180 1385{
4f089678 1386 int cpu, ret = 0;
411cf180 1387
467a9e16 1388 cpu_notifier_register_begin();
411cf180
SE
1389
1390 for_each_online_cpu(cpu) {
1391 int i, phys_id = topology_physical_package_id(cpu);
1392
1393 for_each_cpu(i, &uncore_cpu_mask) {
1394 if (phys_id == topology_physical_package_id(i)) {
1395 phys_id = -1;
1396 break;
1397 }
1398 }
1399 if (phys_id < 0)
1400 continue;
1401
4f089678
TG
1402 ret = uncore_cpu_prepare(cpu, phys_id);
1403 if (ret)
1404 goto out;
411cf180
SE
1405 uncore_event_init_cpu(cpu);
1406 }
1407 on_each_cpu(uncore_cpu_setup, NULL, 1);
1408
467a9e16 1409 __register_cpu_notifier(&uncore_cpu_nb);
411cf180 1410
4f089678 1411out:
467a9e16 1412 cpu_notifier_register_done();
4f089678 1413 return ret;
411cf180
SE
1414}
1415
087bfbb0
YZ
1416static int __init intel_uncore_init(void)
1417{
1418 int ret;
1419
1420 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
1421 return -ENODEV;
1422
a05123bd
YZ
1423 if (cpu_has_hypervisor)
1424 return -ENODEV;
1425
14371cce 1426 ret = uncore_pci_init();
087bfbb0 1427 if (ret)
4f089678 1428 return ret;
14371cce 1429 ret = uncore_cpu_init();
4f089678
TG
1430 if (ret)
1431 goto errpci;
1432 ret = uncore_cpumask_init();
1433 if (ret)
1434 goto errcpu;
087bfbb0 1435
087bfbb0 1436 return 0;
4f089678
TG
1437
1438errcpu:
1439 uncore_types_exit(uncore_msr_uncores);
1440errpci:
1441 uncore_pci_exit();
087bfbb0
YZ
1442 return ret;
1443}
1444device_initcall(intel_uncore_init);