]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/kernel/cpu/perf_event_intel_uncore.c
x86/vdso/pvclock: Protect STABLE check with the seqcount
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore.c
CommitLineData
087bfbb0
YZ
1#include "perf_event_intel_uncore.h"
2
3static struct intel_uncore_type *empty_uncore[] = { NULL, };
514b2346
YZ
4struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
5struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
14371cce 6
514b2346
YZ
7static bool pcidrv_registered;
8struct pci_driver *uncore_pci_driver;
9/* pci bus to socket mapping */
712df65c
TI
10DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
11struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
514b2346 12struct pci_dev *uncore_extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
899396cf 13
14371cce 14static DEFINE_RAW_SPINLOCK(uncore_box_lock);
087bfbb0
YZ
15/* mask of cpus that collect uncore events */
16static cpumask_t uncore_cpu_mask;
17
18/* constraint for the fixed counter */
514b2346 19static struct event_constraint uncore_constraint_fixed =
087bfbb0 20 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
514b2346 21struct event_constraint uncore_constraint_empty =
6a67943a 22 EVENT_CONSTRAINT(0, 0, 0);
087bfbb0 23
712df65c
TI
24int uncore_pcibus_to_physid(struct pci_bus *bus)
25{
26 struct pci2phy_map *map;
27 int phys_id = -1;
28
29 raw_spin_lock(&pci2phy_map_lock);
30 list_for_each_entry(map, &pci2phy_map_head, list) {
31 if (map->segment == pci_domain_nr(bus)) {
32 phys_id = map->pbus_to_physid[bus->number];
33 break;
34 }
35 }
36 raw_spin_unlock(&pci2phy_map_lock);
37
38 return phys_id;
39}
40
41struct pci2phy_map *__find_pci2phy_map(int segment)
42{
43 struct pci2phy_map *map, *alloc = NULL;
44 int i;
45
46 lockdep_assert_held(&pci2phy_map_lock);
47
48lookup:
49 list_for_each_entry(map, &pci2phy_map_head, list) {
50 if (map->segment == segment)
51 goto end;
52 }
53
54 if (!alloc) {
55 raw_spin_unlock(&pci2phy_map_lock);
56 alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
57 raw_spin_lock(&pci2phy_map_lock);
58
59 if (!alloc)
60 return NULL;
61
62 goto lookup;
63 }
64
65 map = alloc;
66 alloc = NULL;
67 map->segment = segment;
68 for (i = 0; i < 256; i++)
69 map->pbus_to_physid[i] = -1;
70 list_add_tail(&map->list, &pci2phy_map_head);
71
72end:
73 kfree(alloc);
74 return map;
75}
76
514b2346
YZ
77ssize_t uncore_event_show(struct kobject *kobj,
78 struct kobj_attribute *attr, char *buf)
79{
80 struct uncore_event_desc *event =
81 container_of(attr, struct uncore_event_desc, attr);
82 return sprintf(buf, "%s", event->config);
83}
84
514b2346 85struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
001e413f
SE
86{
87 return container_of(event->pmu, struct intel_uncore_pmu, pmu);
88}
89
514b2346 90struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
001e413f
SE
91{
92 struct intel_uncore_box *box;
93
94 box = *per_cpu_ptr(pmu->box, cpu);
95 if (box)
96 return box;
97
98 raw_spin_lock(&uncore_box_lock);
4f971248
AK
99 /* Recheck in lock to handle races. */
100 if (*per_cpu_ptr(pmu->box, cpu))
101 goto out;
001e413f
SE
102 list_for_each_entry(box, &pmu->box_list, list) {
103 if (box->phys_id == topology_physical_package_id(cpu)) {
104 atomic_inc(&box->refcnt);
105 *per_cpu_ptr(pmu->box, cpu) = box;
106 break;
107 }
108 }
4f971248 109out:
001e413f
SE
110 raw_spin_unlock(&uncore_box_lock);
111
112 return *per_cpu_ptr(pmu->box, cpu);
113}
114
514b2346 115struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
001e413f
SE
116{
117 /*
118 * perf core schedules event on the basis of cpu, uncore events are
119 * collected by one of the cpus inside a physical package.
120 */
121 return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
122}
123
514b2346 124u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
254298c7
YZ
125{
126 u64 count;
127
128 rdmsrl(event->hw.event_base, count);
129
130 return count;
131}
132
133/*
134 * generic get constraint function for shared match/mask registers.
135 */
514b2346 136struct event_constraint *
254298c7
YZ
137uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
138{
139 struct intel_uncore_extra_reg *er;
140 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
141 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
142 unsigned long flags;
143 bool ok = false;
144
145 /*
146 * reg->alloc can be set due to existing state, so for fake box we
147 * need to ignore this, otherwise we might fail to allocate proper
148 * fake state for this extra reg constraint.
149 */
150 if (reg1->idx == EXTRA_REG_NONE ||
151 (!uncore_box_is_fake(box) && reg1->alloc))
152 return NULL;
153
154 er = &box->shared_regs[reg1->idx];
155 raw_spin_lock_irqsave(&er->lock, flags);
156 if (!atomic_read(&er->ref) ||
157 (er->config1 == reg1->config && er->config2 == reg2->config)) {
158 atomic_inc(&er->ref);
159 er->config1 = reg1->config;
160 er->config2 = reg2->config;
161 ok = true;
162 }
163 raw_spin_unlock_irqrestore(&er->lock, flags);
164
165 if (ok) {
166 if (!uncore_box_is_fake(box))
167 reg1->alloc = 1;
168 return NULL;
169 }
170
514b2346 171 return &uncore_constraint_empty;
254298c7
YZ
172}
173
514b2346 174void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
254298c7
YZ
175{
176 struct intel_uncore_extra_reg *er;
177 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
178
179 /*
180 * Only put constraint if extra reg was actually allocated. Also
181 * takes care of event which do not use an extra shared reg.
182 *
183 * Also, if this is a fake box we shouldn't touch any event state
184 * (reg->alloc) and we don't care about leaving inconsistent box
185 * state either since it will be thrown out.
186 */
187 if (uncore_box_is_fake(box) || !reg1->alloc)
188 return;
189
190 er = &box->shared_regs[reg1->idx];
191 atomic_dec(&er->ref);
192 reg1->alloc = 0;
193}
194
514b2346 195u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
46bdd905
YZ
196{
197 struct intel_uncore_extra_reg *er;
198 unsigned long flags;
199 u64 config;
200
201 er = &box->shared_regs[idx];
202
203 raw_spin_lock_irqsave(&er->lock, flags);
204 config = er->config;
205 raw_spin_unlock_irqrestore(&er->lock, flags);
206
207 return config;
208}
209
254298c7 210static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
087bfbb0
YZ
211{
212 struct hw_perf_event *hwc = &event->hw;
213
214 hwc->idx = idx;
215 hwc->last_tag = ++box->tags[idx];
216
217 if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
14371cce
YZ
218 hwc->event_base = uncore_fixed_ctr(box);
219 hwc->config_base = uncore_fixed_ctl(box);
087bfbb0
YZ
220 return;
221 }
222
14371cce
YZ
223 hwc->config_base = uncore_event_ctl(box, hwc->idx);
224 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
087bfbb0
YZ
225}
226
514b2346 227void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
087bfbb0
YZ
228{
229 u64 prev_count, new_count, delta;
230 int shift;
231
232 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
233 shift = 64 - uncore_fixed_ctr_bits(box);
234 else
235 shift = 64 - uncore_perf_ctr_bits(box);
236
237 /* the hrtimer might modify the previous event value */
238again:
239 prev_count = local64_read(&event->hw.prev_count);
240 new_count = uncore_read_counter(box, event);
241 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
242 goto again;
243
244 delta = (new_count << shift) - (prev_count << shift);
245 delta >>= shift;
246
247 local64_add(delta, &event->count);
248}
249
250/*
251 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
252 * for SandyBridge. So we use hrtimer to periodically poll the counter
253 * to avoid overflow.
254 */
255static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
256{
257 struct intel_uncore_box *box;
ced2efb0 258 struct perf_event *event;
087bfbb0
YZ
259 unsigned long flags;
260 int bit;
261
262 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
263 if (!box->n_active || box->cpu != smp_processor_id())
264 return HRTIMER_NORESTART;
265 /*
266 * disable local interrupt to prevent uncore_pmu_event_start/stop
267 * to interrupt the update process
268 */
269 local_irq_save(flags);
270
ced2efb0
SE
271 /*
272 * handle boxes with an active event list as opposed to active
273 * counters
274 */
275 list_for_each_entry(event, &box->active_list, active_entry) {
276 uncore_perf_event_update(box, event);
277 }
278
087bfbb0
YZ
279 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
280 uncore_perf_event_update(box, box->events[bit]);
281
282 local_irq_restore(flags);
283
79859cce 284 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
087bfbb0
YZ
285 return HRTIMER_RESTART;
286}
287
514b2346 288void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
087bfbb0 289{
576b0704
TG
290 hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
291 HRTIMER_MODE_REL_PINNED);
087bfbb0
YZ
292}
293
514b2346 294void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
087bfbb0
YZ
295{
296 hrtimer_cancel(&box->hrtimer);
297}
298
299static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
300{
301 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
302 box->hrtimer.function = uncore_pmu_hrtimer;
303}
304
73c4427c 305static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
087bfbb0
YZ
306{
307 struct intel_uncore_box *box;
6a67943a 308 int i, size;
087bfbb0 309
254298c7 310 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
6a67943a 311
73c4427c 312 box = kzalloc_node(size, GFP_KERNEL, node);
087bfbb0
YZ
313 if (!box)
314 return NULL;
315
6a67943a
YZ
316 for (i = 0; i < type->num_shared_regs; i++)
317 raw_spin_lock_init(&box->shared_regs[i].lock);
318
087bfbb0
YZ
319 uncore_pmu_init_hrtimer(box);
320 atomic_set(&box->refcnt, 1);
321 box->cpu = -1;
322 box->phys_id = -1;
323
79859cce
SE
324 /* set default hrtimer timeout */
325 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
087bfbb0 326
ced2efb0 327 INIT_LIST_HEAD(&box->active_list);
14371cce 328
087bfbb0 329 return box;
087bfbb0
YZ
330}
331
af91568e
JO
332/*
333 * Using uncore_pmu_event_init pmu event_init callback
334 * as a detection point for uncore events.
335 */
336static int uncore_pmu_event_init(struct perf_event *event);
337
338static bool is_uncore_event(struct perf_event *event)
339{
340 return event->pmu->event_init == uncore_pmu_event_init;
341}
342
254298c7
YZ
343static int
344uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
087bfbb0
YZ
345{
346 struct perf_event *event;
347 int n, max_count;
348
349 max_count = box->pmu->type->num_counters;
350 if (box->pmu->type->fixed_ctl)
351 max_count++;
352
353 if (box->n_events >= max_count)
354 return -EINVAL;
355
356 n = box->n_events;
af91568e
JO
357
358 if (is_uncore_event(leader)) {
359 box->event_list[n] = leader;
360 n++;
361 }
362
087bfbb0
YZ
363 if (!dogrp)
364 return n;
365
366 list_for_each_entry(event, &leader->sibling_list, group_entry) {
af91568e
JO
367 if (!is_uncore_event(event) ||
368 event->state <= PERF_EVENT_STATE_OFF)
087bfbb0
YZ
369 continue;
370
371 if (n >= max_count)
372 return -EINVAL;
373
374 box->event_list[n] = event;
375 n++;
376 }
377 return n;
378}
379
380static struct event_constraint *
254298c7 381uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
087bfbb0 382{
6a67943a 383 struct intel_uncore_type *type = box->pmu->type;
087bfbb0
YZ
384 struct event_constraint *c;
385
6a67943a
YZ
386 if (type->ops->get_constraint) {
387 c = type->ops->get_constraint(box, event);
388 if (c)
389 return c;
390 }
391
dbc33f70 392 if (event->attr.config == UNCORE_FIXED_EVENT)
514b2346 393 return &uncore_constraint_fixed;
087bfbb0
YZ
394
395 if (type->constraints) {
396 for_each_event_constraint(c, type->constraints) {
397 if ((event->hw.config & c->cmask) == c->code)
398 return c;
399 }
400 }
401
402 return &type->unconstrainted;
403}
404
254298c7 405static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
6a67943a
YZ
406{
407 if (box->pmu->type->ops->put_constraint)
408 box->pmu->type->ops->put_constraint(box, event);
409}
410
254298c7 411static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
087bfbb0
YZ
412{
413 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
43b45780 414 struct event_constraint *c;
6a67943a 415 int i, wmin, wmax, ret = 0;
087bfbb0
YZ
416 struct hw_perf_event *hwc;
417
418 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
419
420 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
6a67943a 421 c = uncore_get_event_constraint(box, box->event_list[i]);
b371b594 422 box->event_constraint[i] = c;
087bfbb0
YZ
423 wmin = min(wmin, c->weight);
424 wmax = max(wmax, c->weight);
425 }
426
427 /* fastpath, try to reuse previous register */
428 for (i = 0; i < n; i++) {
429 hwc = &box->event_list[i]->hw;
b371b594 430 c = box->event_constraint[i];
087bfbb0
YZ
431
432 /* never assigned */
433 if (hwc->idx == -1)
434 break;
435
436 /* constraint still honored */
437 if (!test_bit(hwc->idx, c->idxmsk))
438 break;
439
440 /* not already used */
441 if (test_bit(hwc->idx, used_mask))
442 break;
443
444 __set_bit(hwc->idx, used_mask);
6a67943a
YZ
445 if (assign)
446 assign[i] = hwc->idx;
087bfbb0 447 }
087bfbb0 448 /* slow path */
6a67943a 449 if (i != n)
b371b594 450 ret = perf_assign_events(box->event_constraint, n,
cc1790cf 451 wmin, wmax, n, assign);
6a67943a
YZ
452
453 if (!assign || ret) {
454 for (i = 0; i < n; i++)
455 uncore_put_event_constraint(box, box->event_list[i]);
456 }
087bfbb0
YZ
457 return ret ? -EINVAL : 0;
458}
459
460static void uncore_pmu_event_start(struct perf_event *event, int flags)
461{
462 struct intel_uncore_box *box = uncore_event_to_box(event);
463 int idx = event->hw.idx;
464
465 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
466 return;
467
468 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
469 return;
470
471 event->hw.state = 0;
472 box->events[idx] = event;
473 box->n_active++;
474 __set_bit(idx, box->active_mask);
475
476 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
477 uncore_enable_event(box, event);
478
479 if (box->n_active == 1) {
480 uncore_enable_box(box);
481 uncore_pmu_start_hrtimer(box);
482 }
483}
484
485static void uncore_pmu_event_stop(struct perf_event *event, int flags)
486{
487 struct intel_uncore_box *box = uncore_event_to_box(event);
488 struct hw_perf_event *hwc = &event->hw;
489
490 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
491 uncore_disable_event(box, event);
492 box->n_active--;
493 box->events[hwc->idx] = NULL;
494 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
495 hwc->state |= PERF_HES_STOPPED;
496
497 if (box->n_active == 0) {
498 uncore_disable_box(box);
499 uncore_pmu_cancel_hrtimer(box);
500 }
501 }
502
503 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
504 /*
505 * Drain the remaining delta count out of a event
506 * that we are disabling:
507 */
508 uncore_perf_event_update(box, event);
509 hwc->state |= PERF_HES_UPTODATE;
510 }
511}
512
513static int uncore_pmu_event_add(struct perf_event *event, int flags)
514{
515 struct intel_uncore_box *box = uncore_event_to_box(event);
516 struct hw_perf_event *hwc = &event->hw;
517 int assign[UNCORE_PMC_IDX_MAX];
518 int i, n, ret;
519
520 if (!box)
521 return -ENODEV;
522
523 ret = n = uncore_collect_events(box, event, false);
524 if (ret < 0)
525 return ret;
526
527 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
528 if (!(flags & PERF_EF_START))
529 hwc->state |= PERF_HES_ARCH;
530
531 ret = uncore_assign_events(box, assign, n);
532 if (ret)
533 return ret;
534
535 /* save events moving to new counters */
536 for (i = 0; i < box->n_events; i++) {
537 event = box->event_list[i];
538 hwc = &event->hw;
539
540 if (hwc->idx == assign[i] &&
541 hwc->last_tag == box->tags[assign[i]])
542 continue;
543 /*
544 * Ensure we don't accidentally enable a stopped
545 * counter simply because we rescheduled.
546 */
547 if (hwc->state & PERF_HES_STOPPED)
548 hwc->state |= PERF_HES_ARCH;
549
550 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
551 }
552
553 /* reprogram moved events into new counters */
554 for (i = 0; i < n; i++) {
555 event = box->event_list[i];
556 hwc = &event->hw;
557
558 if (hwc->idx != assign[i] ||
559 hwc->last_tag != box->tags[assign[i]])
560 uncore_assign_hw_event(box, event, assign[i]);
561 else if (i < box->n_events)
562 continue;
563
564 if (hwc->state & PERF_HES_ARCH)
565 continue;
566
567 uncore_pmu_event_start(event, 0);
568 }
569 box->n_events = n;
570
571 return 0;
572}
573
574static void uncore_pmu_event_del(struct perf_event *event, int flags)
575{
576 struct intel_uncore_box *box = uncore_event_to_box(event);
577 int i;
578
579 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
580
581 for (i = 0; i < box->n_events; i++) {
582 if (event == box->event_list[i]) {
6a67943a
YZ
583 uncore_put_event_constraint(box, event);
584
087bfbb0
YZ
585 while (++i < box->n_events)
586 box->event_list[i - 1] = box->event_list[i];
587
588 --box->n_events;
589 break;
590 }
591 }
592
593 event->hw.idx = -1;
594 event->hw.last_tag = ~0ULL;
595}
596
514b2346 597void uncore_pmu_event_read(struct perf_event *event)
087bfbb0
YZ
598{
599 struct intel_uncore_box *box = uncore_event_to_box(event);
600 uncore_perf_event_update(box, event);
601}
602
603/*
604 * validation ensures the group can be loaded onto the
605 * PMU if it was the only group available.
606 */
607static int uncore_validate_group(struct intel_uncore_pmu *pmu,
608 struct perf_event *event)
609{
610 struct perf_event *leader = event->group_leader;
611 struct intel_uncore_box *fake_box;
087bfbb0
YZ
612 int ret = -EINVAL, n;
613
73c4427c 614 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
087bfbb0
YZ
615 if (!fake_box)
616 return -ENOMEM;
617
618 fake_box->pmu = pmu;
619 /*
620 * the event is not yet connected with its
621 * siblings therefore we must first collect
622 * existing siblings, then add the new event
623 * before we can simulate the scheduling
624 */
625 n = uncore_collect_events(fake_box, leader, true);
626 if (n < 0)
627 goto out;
628
629 fake_box->n_events = n;
630 n = uncore_collect_events(fake_box, event, false);
631 if (n < 0)
632 goto out;
633
634 fake_box->n_events = n;
635
6a67943a 636 ret = uncore_assign_events(fake_box, NULL, n);
087bfbb0
YZ
637out:
638 kfree(fake_box);
639 return ret;
640}
641
46bdd905 642static int uncore_pmu_event_init(struct perf_event *event)
087bfbb0
YZ
643{
644 struct intel_uncore_pmu *pmu;
645 struct intel_uncore_box *box;
646 struct hw_perf_event *hwc = &event->hw;
647 int ret;
648
649 if (event->attr.type != event->pmu->type)
650 return -ENOENT;
651
652 pmu = uncore_event_to_pmu(event);
653 /* no device found for this pmu */
654 if (pmu->func_id < 0)
655 return -ENOENT;
656
657 /*
658 * Uncore PMU does measure at all privilege level all the time.
659 * So it doesn't make sense to specify any exclude bits.
660 */
661 if (event->attr.exclude_user || event->attr.exclude_kernel ||
662 event->attr.exclude_hv || event->attr.exclude_idle)
663 return -EINVAL;
664
665 /* Sampling not supported yet */
666 if (hwc->sample_period)
667 return -EINVAL;
668
669 /*
670 * Place all uncore events for a particular physical package
671 * onto a single cpu
672 */
673 if (event->cpu < 0)
674 return -EINVAL;
675 box = uncore_pmu_to_box(pmu, event->cpu);
676 if (!box || box->cpu < 0)
677 return -EINVAL;
678 event->cpu = box->cpu;
679
6a67943a
YZ
680 event->hw.idx = -1;
681 event->hw.last_tag = ~0ULL;
682 event->hw.extra_reg.idx = EXTRA_REG_NONE;
ebb6cc03 683 event->hw.branch_reg.idx = EXTRA_REG_NONE;
6a67943a 684
087bfbb0
YZ
685 if (event->attr.config == UNCORE_FIXED_EVENT) {
686 /* no fixed counter */
687 if (!pmu->type->fixed_ctl)
688 return -EINVAL;
689 /*
690 * if there is only one fixed counter, only the first pmu
691 * can access the fixed counter
692 */
693 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
694 return -EINVAL;
dbc33f70
SE
695
696 /* fixed counters have event field hardcoded to zero */
697 hwc->config = 0ULL;
087bfbb0
YZ
698 } else {
699 hwc->config = event->attr.config & pmu->type->event_mask;
6a67943a
YZ
700 if (pmu->type->ops->hw_config) {
701 ret = pmu->type->ops->hw_config(box, event);
702 if (ret)
703 return ret;
704 }
087bfbb0
YZ
705 }
706
087bfbb0
YZ
707 if (event->group_leader != event)
708 ret = uncore_validate_group(pmu, event);
709 else
710 ret = 0;
711
712 return ret;
713}
714
314d9f63
YZ
715static ssize_t uncore_get_attr_cpumask(struct device *dev,
716 struct device_attribute *attr, char *buf)
717{
5aaba363 718 return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
314d9f63
YZ
719}
720
721static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
722
723static struct attribute *uncore_pmu_attrs[] = {
724 &dev_attr_cpumask.attr,
725 NULL,
726};
727
728static struct attribute_group uncore_pmu_attr_group = {
729 .attrs = uncore_pmu_attrs,
730};
731
a08b6769 732static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
087bfbb0
YZ
733{
734 int ret;
735
d64b25b6
SE
736 if (!pmu->type->pmu) {
737 pmu->pmu = (struct pmu) {
738 .attr_groups = pmu->type->attr_groups,
739 .task_ctx_nr = perf_invalid_context,
740 .event_init = uncore_pmu_event_init,
741 .add = uncore_pmu_event_add,
742 .del = uncore_pmu_event_del,
743 .start = uncore_pmu_event_start,
744 .stop = uncore_pmu_event_stop,
745 .read = uncore_pmu_event_read,
746 };
747 } else {
748 pmu->pmu = *pmu->type->pmu;
749 pmu->pmu.attr_groups = pmu->type->attr_groups;
750 }
087bfbb0
YZ
751
752 if (pmu->type->num_boxes == 1) {
753 if (strlen(pmu->type->name) > 0)
754 sprintf(pmu->name, "uncore_%s", pmu->type->name);
755 else
756 sprintf(pmu->name, "uncore");
757 } else {
758 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
759 pmu->pmu_idx);
760 }
761
762 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
763 return ret;
764}
765
766static void __init uncore_type_exit(struct intel_uncore_type *type)
767{
768 int i;
769
770 for (i = 0; i < type->num_boxes; i++)
771 free_percpu(type->pmus[i].box);
772 kfree(type->pmus);
773 type->pmus = NULL;
314d9f63
YZ
774 kfree(type->events_group);
775 type->events_group = NULL;
087bfbb0
YZ
776}
777
cffa59ba 778static void __init uncore_types_exit(struct intel_uncore_type **types)
14371cce
YZ
779{
780 int i;
781 for (i = 0; types[i]; i++)
782 uncore_type_exit(types[i]);
783}
784
087bfbb0
YZ
785static int __init uncore_type_init(struct intel_uncore_type *type)
786{
787 struct intel_uncore_pmu *pmus;
1b0dac2a 788 struct attribute_group *attr_group;
087bfbb0
YZ
789 struct attribute **attrs;
790 int i, j;
791
792 pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
793 if (!pmus)
794 return -ENOMEM;
795
b7b4839d
DJ
796 type->pmus = pmus;
797
087bfbb0
YZ
798 type->unconstrainted = (struct event_constraint)
799 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
9fac2cf3 800 0, type->num_counters, 0, 0);
087bfbb0
YZ
801
802 for (i = 0; i < type->num_boxes; i++) {
803 pmus[i].func_id = -1;
804 pmus[i].pmu_idx = i;
805 pmus[i].type = type;
14371cce 806 INIT_LIST_HEAD(&pmus[i].box_list);
087bfbb0
YZ
807 pmus[i].box = alloc_percpu(struct intel_uncore_box *);
808 if (!pmus[i].box)
809 goto fail;
810 }
811
812 if (type->event_descs) {
813 i = 0;
814 while (type->event_descs[i].attr.attr.name)
815 i++;
816
1b0dac2a
JSM
817 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
818 sizeof(*attr_group), GFP_KERNEL);
819 if (!attr_group)
087bfbb0
YZ
820 goto fail;
821
1b0dac2a
JSM
822 attrs = (struct attribute **)(attr_group + 1);
823 attr_group->name = "events";
824 attr_group->attrs = attrs;
087bfbb0
YZ
825
826 for (j = 0; j < i; j++)
827 attrs[j] = &type->event_descs[j].attr.attr;
828
1b0dac2a 829 type->events_group = attr_group;
087bfbb0
YZ
830 }
831
314d9f63 832 type->pmu_group = &uncore_pmu_attr_group;
087bfbb0
YZ
833 return 0;
834fail:
835 uncore_type_exit(type);
836 return -ENOMEM;
837}
838
839static int __init uncore_types_init(struct intel_uncore_type **types)
840{
841 int i, ret;
842
843 for (i = 0; types[i]; i++) {
844 ret = uncore_type_init(types[i]);
845 if (ret)
846 goto fail;
847 }
848 return 0;
849fail:
850 while (--i >= 0)
851 uncore_type_exit(types[i]);
852 return ret;
853}
854
14371cce
YZ
855/*
856 * add a pci uncore device
857 */
899396cf 858static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
14371cce
YZ
859{
860 struct intel_uncore_pmu *pmu;
861 struct intel_uncore_box *box;
899396cf
YZ
862 struct intel_uncore_type *type;
863 int phys_id;
513d793e 864 bool first_box = false;
14371cce 865
712df65c 866 phys_id = uncore_pcibus_to_physid(pdev->bus);
14371cce
YZ
867 if (phys_id < 0)
868 return -ENODEV;
869
899396cf 870 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
514b2346
YZ
871 int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
872 uncore_extra_pci_dev[phys_id][idx] = pdev;
899396cf
YZ
873 pci_set_drvdata(pdev, NULL);
874 return 0;
875 }
876
514b2346 877 type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
73c4427c 878 box = uncore_alloc_box(type, NUMA_NO_NODE);
14371cce
YZ
879 if (!box)
880 return -ENOMEM;
881
882 /*
883 * for performance monitoring unit with multiple boxes,
884 * each box has a different function id.
885 */
899396cf 886 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
77af0037
HC
887 /* Knights Landing uses a common PCI device ID for multiple instances of
888 * an uncore PMU device type. There is only one entry per device type in
889 * the knl_uncore_pci_ids table inspite of multiple devices present for
890 * some device types. Hence PCI device idx would be 0 for all devices.
891 * So increment pmu pointer to point to an unused array element.
892 */
893 if (boot_cpu_data.x86_model == 87)
894 while (pmu->func_id >= 0)
895 pmu++;
899396cf
YZ
896 if (pmu->func_id < 0)
897 pmu->func_id = pdev->devfn;
898 else
899 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
14371cce
YZ
900
901 box->phys_id = phys_id;
902 box->pci_dev = pdev;
903 box->pmu = pmu;
15c12479 904 uncore_box_init(box);
14371cce
YZ
905 pci_set_drvdata(pdev, box);
906
907 raw_spin_lock(&uncore_box_lock);
513d793e
YZ
908 if (list_empty(&pmu->box_list))
909 first_box = true;
14371cce
YZ
910 list_add_tail(&box->list, &pmu->box_list);
911 raw_spin_unlock(&uncore_box_lock);
912
513d793e
YZ
913 if (first_box)
914 uncore_pmu_register(pmu);
14371cce
YZ
915 return 0;
916}
917
357398e9 918static void uncore_pci_remove(struct pci_dev *pdev)
14371cce
YZ
919{
920 struct intel_uncore_box *box = pci_get_drvdata(pdev);
899396cf 921 struct intel_uncore_pmu *pmu;
712df65c 922 int i, cpu, phys_id;
513d793e 923 bool last_box = false;
899396cf 924
712df65c 925 phys_id = uncore_pcibus_to_physid(pdev->bus);
899396cf
YZ
926 box = pci_get_drvdata(pdev);
927 if (!box) {
928 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
514b2346
YZ
929 if (uncore_extra_pci_dev[phys_id][i] == pdev) {
930 uncore_extra_pci_dev[phys_id][i] = NULL;
899396cf
YZ
931 break;
932 }
933 }
934 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
935 return;
936 }
14371cce 937
899396cf 938 pmu = box->pmu;
14371cce
YZ
939 if (WARN_ON_ONCE(phys_id != box->phys_id))
940 return;
941
e850f9c3
YZ
942 pci_set_drvdata(pdev, NULL);
943
14371cce
YZ
944 raw_spin_lock(&uncore_box_lock);
945 list_del(&box->list);
513d793e
YZ
946 if (list_empty(&pmu->box_list))
947 last_box = true;
14371cce
YZ
948 raw_spin_unlock(&uncore_box_lock);
949
950 for_each_possible_cpu(cpu) {
951 if (*per_cpu_ptr(pmu->box, cpu) == box) {
952 *per_cpu_ptr(pmu->box, cpu) = NULL;
953 atomic_dec(&box->refcnt);
954 }
955 }
956
957 WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
958 kfree(box);
513d793e
YZ
959
960 if (last_box)
961 perf_pmu_unregister(&pmu->pmu);
14371cce
YZ
962}
963
14371cce
YZ
964static int __init uncore_pci_init(void)
965{
966 int ret;
967
968 switch (boot_cpu_data.x86_model) {
7c94ee2e 969 case 45: /* Sandy Bridge-EP */
8268fdfc 970 ret = snbep_uncore_pci_init();
7c94ee2e 971 break;
ddcd0973
PZ
972 case 62: /* Ivy Bridge-EP */
973 ret = ivbep_uncore_pci_init();
e850f9c3 974 break;
e735b9db
YZ
975 case 63: /* Haswell-EP */
976 ret = hswep_uncore_pci_init();
977 break;
d6980ef3 978 case 79: /* BDX-EP */
070e9887
KL
979 case 86: /* BDX-DE */
980 ret = bdx_uncore_pci_init();
981 break;
b9e1ab6d 982 case 42: /* Sandy Bridge */
92807ffd 983 ret = snb_uncore_pci_init();
b9e1ab6d
SE
984 break;
985 case 58: /* Ivy Bridge */
92807ffd 986 ret = ivb_uncore_pci_init();
b9e1ab6d
SE
987 break;
988 case 60: /* Haswell */
989 case 69: /* Haswell Celeron */
92807ffd 990 ret = hsw_uncore_pci_init();
b9e1ab6d 991 break;
a41f3c8c
SE
992 case 61: /* Broadwell */
993 ret = bdw_uncore_pci_init();
994 break;
77af0037
HC
995 case 87: /* Knights Landing */
996 ret = knl_uncore_pci_init();
997 break;
14371cce
YZ
998 default:
999 return 0;
1000 }
1001
92807ffd
YZ
1002 if (ret)
1003 return ret;
1004
514b2346 1005 ret = uncore_types_init(uncore_pci_uncores);
14371cce
YZ
1006 if (ret)
1007 return ret;
1008
1009 uncore_pci_driver->probe = uncore_pci_probe;
1010 uncore_pci_driver->remove = uncore_pci_remove;
1011
1012 ret = pci_register_driver(uncore_pci_driver);
1013 if (ret == 0)
1014 pcidrv_registered = true;
1015 else
514b2346 1016 uncore_types_exit(uncore_pci_uncores);
14371cce
YZ
1017
1018 return ret;
1019}
1020
1021static void __init uncore_pci_exit(void)
1022{
1023 if (pcidrv_registered) {
1024 pcidrv_registered = false;
1025 pci_unregister_driver(uncore_pci_driver);
514b2346 1026 uncore_types_exit(uncore_pci_uncores);
14371cce
YZ
1027 }
1028}
1029
22cc4ccf
YZ
1030/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
1031static LIST_HEAD(boxes_to_free);
1032
148f9bb8 1033static void uncore_kfree_boxes(void)
22cc4ccf
YZ
1034{
1035 struct intel_uncore_box *box;
1036
1037 while (!list_empty(&boxes_to_free)) {
1038 box = list_entry(boxes_to_free.next,
1039 struct intel_uncore_box, list);
1040 list_del(&box->list);
1041 kfree(box);
1042 }
1043}
1044
148f9bb8 1045static void uncore_cpu_dying(int cpu)
087bfbb0
YZ
1046{
1047 struct intel_uncore_type *type;
1048 struct intel_uncore_pmu *pmu;
1049 struct intel_uncore_box *box;
1050 int i, j;
1051
514b2346
YZ
1052 for (i = 0; uncore_msr_uncores[i]; i++) {
1053 type = uncore_msr_uncores[i];
087bfbb0
YZ
1054 for (j = 0; j < type->num_boxes; j++) {
1055 pmu = &type->pmus[j];
1056 box = *per_cpu_ptr(pmu->box, cpu);
1057 *per_cpu_ptr(pmu->box, cpu) = NULL;
1058 if (box && atomic_dec_and_test(&box->refcnt))
22cc4ccf 1059 list_add(&box->list, &boxes_to_free);
087bfbb0
YZ
1060 }
1061 }
1062}
1063
148f9bb8 1064static int uncore_cpu_starting(int cpu)
087bfbb0
YZ
1065{
1066 struct intel_uncore_type *type;
1067 struct intel_uncore_pmu *pmu;
1068 struct intel_uncore_box *box, *exist;
1069 int i, j, k, phys_id;
1070
1071 phys_id = topology_physical_package_id(cpu);
1072
514b2346
YZ
1073 for (i = 0; uncore_msr_uncores[i]; i++) {
1074 type = uncore_msr_uncores[i];
087bfbb0
YZ
1075 for (j = 0; j < type->num_boxes; j++) {
1076 pmu = &type->pmus[j];
1077 box = *per_cpu_ptr(pmu->box, cpu);
1078 /* called by uncore_cpu_init? */
15c12479
IM
1079 if (box && box->phys_id >= 0) {
1080 uncore_box_init(box);
087bfbb0 1081 continue;
15c12479 1082 }
087bfbb0
YZ
1083
1084 for_each_online_cpu(k) {
1085 exist = *per_cpu_ptr(pmu->box, k);
1086 if (exist && exist->phys_id == phys_id) {
1087 atomic_inc(&exist->refcnt);
1088 *per_cpu_ptr(pmu->box, cpu) = exist;
22cc4ccf
YZ
1089 if (box) {
1090 list_add(&box->list,
1091 &boxes_to_free);
1092 box = NULL;
1093 }
087bfbb0
YZ
1094 break;
1095 }
1096 }
1097
15c12479 1098 if (box) {
087bfbb0 1099 box->phys_id = phys_id;
15c12479
IM
1100 uncore_box_init(box);
1101 }
087bfbb0
YZ
1102 }
1103 }
1104 return 0;
1105}
1106
148f9bb8 1107static int uncore_cpu_prepare(int cpu, int phys_id)
087bfbb0
YZ
1108{
1109 struct intel_uncore_type *type;
1110 struct intel_uncore_pmu *pmu;
1111 struct intel_uncore_box *box;
1112 int i, j;
1113
514b2346
YZ
1114 for (i = 0; uncore_msr_uncores[i]; i++) {
1115 type = uncore_msr_uncores[i];
087bfbb0
YZ
1116 for (j = 0; j < type->num_boxes; j++) {
1117 pmu = &type->pmus[j];
1118 if (pmu->func_id < 0)
1119 pmu->func_id = j;
1120
73c4427c 1121 box = uncore_alloc_box(type, cpu_to_node(cpu));
087bfbb0
YZ
1122 if (!box)
1123 return -ENOMEM;
1124
1125 box->pmu = pmu;
1126 box->phys_id = phys_id;
1127 *per_cpu_ptr(pmu->box, cpu) = box;
1128 }
1129 }
1130 return 0;
1131}
1132
148f9bb8 1133static void
254298c7 1134uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
087bfbb0
YZ
1135{
1136 struct intel_uncore_type *type;
1137 struct intel_uncore_pmu *pmu;
1138 struct intel_uncore_box *box;
1139 int i, j;
1140
1141 for (i = 0; uncores[i]; i++) {
1142 type = uncores[i];
1143 for (j = 0; j < type->num_boxes; j++) {
1144 pmu = &type->pmus[j];
1145 if (old_cpu < 0)
1146 box = uncore_pmu_to_box(pmu, new_cpu);
1147 else
1148 box = uncore_pmu_to_box(pmu, old_cpu);
1149 if (!box)
1150 continue;
1151
1152 if (old_cpu < 0) {
1153 WARN_ON_ONCE(box->cpu != -1);
1154 box->cpu = new_cpu;
1155 continue;
1156 }
1157
1158 WARN_ON_ONCE(box->cpu != old_cpu);
1159 if (new_cpu >= 0) {
1160 uncore_pmu_cancel_hrtimer(box);
1161 perf_pmu_migrate_context(&pmu->pmu,
1162 old_cpu, new_cpu);
1163 box->cpu = new_cpu;
1164 } else {
1165 box->cpu = -1;
1166 }
1167 }
1168 }
1169}
1170
148f9bb8 1171static void uncore_event_exit_cpu(int cpu)
087bfbb0
YZ
1172{
1173 int i, phys_id, target;
1174
1175 /* if exiting cpu is used for collecting uncore events */
1176 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1177 return;
1178
1179 /* find a new cpu to collect uncore events */
1180 phys_id = topology_physical_package_id(cpu);
1181 target = -1;
1182 for_each_online_cpu(i) {
1183 if (i == cpu)
1184 continue;
1185 if (phys_id == topology_physical_package_id(i)) {
1186 target = i;
1187 break;
1188 }
1189 }
1190
1191 /* migrate uncore events to the new cpu */
1192 if (target >= 0)
1193 cpumask_set_cpu(target, &uncore_cpu_mask);
1194
514b2346
YZ
1195 uncore_change_context(uncore_msr_uncores, cpu, target);
1196 uncore_change_context(uncore_pci_uncores, cpu, target);
087bfbb0
YZ
1197}
1198
148f9bb8 1199static void uncore_event_init_cpu(int cpu)
087bfbb0
YZ
1200{
1201 int i, phys_id;
1202
1203 phys_id = topology_physical_package_id(cpu);
1204 for_each_cpu(i, &uncore_cpu_mask) {
1205 if (phys_id == topology_physical_package_id(i))
1206 return;
1207 }
1208
1209 cpumask_set_cpu(cpu, &uncore_cpu_mask);
1210
514b2346
YZ
1211 uncore_change_context(uncore_msr_uncores, -1, cpu);
1212 uncore_change_context(uncore_pci_uncores, -1, cpu);
087bfbb0
YZ
1213}
1214
148f9bb8
PG
1215static int uncore_cpu_notifier(struct notifier_block *self,
1216 unsigned long action, void *hcpu)
087bfbb0
YZ
1217{
1218 unsigned int cpu = (long)hcpu;
1219
1220 /* allocate/free data structure for uncore box */
1221 switch (action & ~CPU_TASKS_FROZEN) {
1222 case CPU_UP_PREPARE:
1223 uncore_cpu_prepare(cpu, -1);
1224 break;
1225 case CPU_STARTING:
1226 uncore_cpu_starting(cpu);
1227 break;
1228 case CPU_UP_CANCELED:
1229 case CPU_DYING:
1230 uncore_cpu_dying(cpu);
1231 break;
22cc4ccf
YZ
1232 case CPU_ONLINE:
1233 case CPU_DEAD:
1234 uncore_kfree_boxes();
1235 break;
087bfbb0
YZ
1236 default:
1237 break;
1238 }
1239
1240 /* select the cpu that collects uncore events */
1241 switch (action & ~CPU_TASKS_FROZEN) {
1242 case CPU_DOWN_FAILED:
1243 case CPU_STARTING:
1244 uncore_event_init_cpu(cpu);
1245 break;
1246 case CPU_DOWN_PREPARE:
1247 uncore_event_exit_cpu(cpu);
1248 break;
1249 default:
1250 break;
1251 }
1252
1253 return NOTIFY_OK;
1254}
1255
148f9bb8 1256static struct notifier_block uncore_cpu_nb = {
254298c7 1257 .notifier_call = uncore_cpu_notifier,
087bfbb0
YZ
1258 /*
1259 * to migrate uncore events, our notifier should be executed
1260 * before perf core's notifier.
1261 */
254298c7 1262 .priority = CPU_PRI_PERF + 1,
087bfbb0
YZ
1263};
1264
1265static void __init uncore_cpu_setup(void *dummy)
1266{
1267 uncore_cpu_starting(smp_processor_id());
1268}
1269
1270static int __init uncore_cpu_init(void)
1271{
c1e46580 1272 int ret;
087bfbb0
YZ
1273
1274 switch (boot_cpu_data.x86_model) {
fcde10e9
YZ
1275 case 26: /* Nehalem */
1276 case 30:
1277 case 37: /* Westmere */
1278 case 44:
92807ffd 1279 nhm_uncore_cpu_init();
fcde10e9
YZ
1280 break;
1281 case 42: /* Sandy Bridge */
9a6bc143 1282 case 58: /* Ivy Bridge */
3a999587
AK
1283 case 60: /* Haswell */
1284 case 69: /* Haswell */
1285 case 70: /* Haswell */
1286 case 61: /* Broadwell */
1287 case 71: /* Broadwell */
92807ffd 1288 snb_uncore_cpu_init();
fcde10e9 1289 break;
80e217e9 1290 case 45: /* Sandy Bridge-EP */
8268fdfc 1291 snbep_uncore_cpu_init();
7c94ee2e 1292 break;
cb37af77 1293 case 46: /* Nehalem-EX */
cb37af77 1294 case 47: /* Westmere-EX aka. Xeon E7 */
c1e46580 1295 nhmex_uncore_cpu_init();
254298c7 1296 break;
ddcd0973
PZ
1297 case 62: /* Ivy Bridge-EP */
1298 ivbep_uncore_cpu_init();
e850f9c3 1299 break;
e735b9db
YZ
1300 case 63: /* Haswell-EP */
1301 hswep_uncore_cpu_init();
1302 break;
d6980ef3 1303 case 79: /* BDX-EP */
070e9887
KL
1304 case 86: /* BDX-DE */
1305 bdx_uncore_cpu_init();
1306 break;
77af0037
HC
1307 case 87: /* Knights Landing */
1308 knl_uncore_cpu_init();
1309 break;
087bfbb0
YZ
1310 default:
1311 return 0;
1312 }
1313
514b2346 1314 ret = uncore_types_init(uncore_msr_uncores);
087bfbb0
YZ
1315 if (ret)
1316 return ret;
1317
087bfbb0
YZ
1318 return 0;
1319}
1320
1321static int __init uncore_pmus_register(void)
1322{
1323 struct intel_uncore_pmu *pmu;
1324 struct intel_uncore_type *type;
1325 int i, j;
1326
514b2346
YZ
1327 for (i = 0; uncore_msr_uncores[i]; i++) {
1328 type = uncore_msr_uncores[i];
087bfbb0
YZ
1329 for (j = 0; j < type->num_boxes; j++) {
1330 pmu = &type->pmus[j];
1331 uncore_pmu_register(pmu);
1332 }
1333 }
1334
1335 return 0;
1336}
1337
ef11dadb 1338static void __init uncore_cpumask_init(void)
411cf180
SE
1339{
1340 int cpu;
1341
1342 /*
1343 * ony invoke once from msr or pci init code
1344 */
1345 if (!cpumask_empty(&uncore_cpu_mask))
1346 return;
1347
467a9e16 1348 cpu_notifier_register_begin();
411cf180
SE
1349
1350 for_each_online_cpu(cpu) {
1351 int i, phys_id = topology_physical_package_id(cpu);
1352
1353 for_each_cpu(i, &uncore_cpu_mask) {
1354 if (phys_id == topology_physical_package_id(i)) {
1355 phys_id = -1;
1356 break;
1357 }
1358 }
1359 if (phys_id < 0)
1360 continue;
1361
1362 uncore_cpu_prepare(cpu, phys_id);
1363 uncore_event_init_cpu(cpu);
1364 }
1365 on_each_cpu(uncore_cpu_setup, NULL, 1);
1366
467a9e16 1367 __register_cpu_notifier(&uncore_cpu_nb);
411cf180 1368
467a9e16 1369 cpu_notifier_register_done();
411cf180
SE
1370}
1371
1372
087bfbb0
YZ
1373static int __init intel_uncore_init(void)
1374{
1375 int ret;
1376
1377 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
1378 return -ENODEV;
1379
a05123bd
YZ
1380 if (cpu_has_hypervisor)
1381 return -ENODEV;
1382
14371cce 1383 ret = uncore_pci_init();
087bfbb0
YZ
1384 if (ret)
1385 goto fail;
14371cce
YZ
1386 ret = uncore_cpu_init();
1387 if (ret) {
1388 uncore_pci_exit();
1389 goto fail;
1390 }
411cf180 1391 uncore_cpumask_init();
087bfbb0
YZ
1392
1393 uncore_pmus_register();
1394 return 0;
1395fail:
1396 return ret;
1397}
1398device_initcall(intel_uncore_init);