]>
Commit | Line | Data |
---|---|---|
6bcb2db5 | 1 | #include "uncore.h" |
087bfbb0 YZ |
2 | |
3 | static struct intel_uncore_type *empty_uncore[] = { NULL, }; | |
514b2346 YZ |
4 | struct intel_uncore_type **uncore_msr_uncores = empty_uncore; |
5 | struct intel_uncore_type **uncore_pci_uncores = empty_uncore; | |
14371cce | 6 | |
514b2346 YZ |
7 | static bool pcidrv_registered; |
8 | struct pci_driver *uncore_pci_driver; | |
9 | /* pci bus to socket mapping */ | |
712df65c TI |
10 | DEFINE_RAW_SPINLOCK(pci2phy_map_lock); |
11 | struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head); | |
514b2346 | 12 | struct pci_dev *uncore_extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX]; |
899396cf | 13 | |
14371cce | 14 | static DEFINE_RAW_SPINLOCK(uncore_box_lock); |
087bfbb0 YZ |
15 | /* mask of cpus that collect uncore events */ |
16 | static cpumask_t uncore_cpu_mask; | |
17 | ||
18 | /* constraint for the fixed counter */ | |
514b2346 | 19 | static struct event_constraint uncore_constraint_fixed = |
087bfbb0 | 20 | EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL); |
514b2346 | 21 | struct event_constraint uncore_constraint_empty = |
6a67943a | 22 | EVENT_CONSTRAINT(0, 0, 0); |
087bfbb0 | 23 | |
712df65c TI |
24 | int uncore_pcibus_to_physid(struct pci_bus *bus) |
25 | { | |
26 | struct pci2phy_map *map; | |
27 | int phys_id = -1; | |
28 | ||
29 | raw_spin_lock(&pci2phy_map_lock); | |
30 | list_for_each_entry(map, &pci2phy_map_head, list) { | |
31 | if (map->segment == pci_domain_nr(bus)) { | |
32 | phys_id = map->pbus_to_physid[bus->number]; | |
33 | break; | |
34 | } | |
35 | } | |
36 | raw_spin_unlock(&pci2phy_map_lock); | |
37 | ||
38 | return phys_id; | |
39 | } | |
40 | ||
41 | struct pci2phy_map *__find_pci2phy_map(int segment) | |
42 | { | |
43 | struct pci2phy_map *map, *alloc = NULL; | |
44 | int i; | |
45 | ||
46 | lockdep_assert_held(&pci2phy_map_lock); | |
47 | ||
48 | lookup: | |
49 | list_for_each_entry(map, &pci2phy_map_head, list) { | |
50 | if (map->segment == segment) | |
51 | goto end; | |
52 | } | |
53 | ||
54 | if (!alloc) { | |
55 | raw_spin_unlock(&pci2phy_map_lock); | |
56 | alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL); | |
57 | raw_spin_lock(&pci2phy_map_lock); | |
58 | ||
59 | if (!alloc) | |
60 | return NULL; | |
61 | ||
62 | goto lookup; | |
63 | } | |
64 | ||
65 | map = alloc; | |
66 | alloc = NULL; | |
67 | map->segment = segment; | |
68 | for (i = 0; i < 256; i++) | |
69 | map->pbus_to_physid[i] = -1; | |
70 | list_add_tail(&map->list, &pci2phy_map_head); | |
71 | ||
72 | end: | |
73 | kfree(alloc); | |
74 | return map; | |
75 | } | |
76 | ||
514b2346 YZ |
77 | ssize_t uncore_event_show(struct kobject *kobj, |
78 | struct kobj_attribute *attr, char *buf) | |
79 | { | |
80 | struct uncore_event_desc *event = | |
81 | container_of(attr, struct uncore_event_desc, attr); | |
82 | return sprintf(buf, "%s", event->config); | |
83 | } | |
84 | ||
514b2346 | 85 | struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) |
001e413f SE |
86 | { |
87 | return container_of(event->pmu, struct intel_uncore_pmu, pmu); | |
88 | } | |
89 | ||
514b2346 | 90 | struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) |
001e413f SE |
91 | { |
92 | struct intel_uncore_box *box; | |
93 | ||
94 | box = *per_cpu_ptr(pmu->box, cpu); | |
95 | if (box) | |
96 | return box; | |
97 | ||
98 | raw_spin_lock(&uncore_box_lock); | |
4f971248 AK |
99 | /* Recheck in lock to handle races. */ |
100 | if (*per_cpu_ptr(pmu->box, cpu)) | |
101 | goto out; | |
001e413f SE |
102 | list_for_each_entry(box, &pmu->box_list, list) { |
103 | if (box->phys_id == topology_physical_package_id(cpu)) { | |
104 | atomic_inc(&box->refcnt); | |
105 | *per_cpu_ptr(pmu->box, cpu) = box; | |
106 | break; | |
107 | } | |
108 | } | |
4f971248 | 109 | out: |
001e413f SE |
110 | raw_spin_unlock(&uncore_box_lock); |
111 | ||
112 | return *per_cpu_ptr(pmu->box, cpu); | |
113 | } | |
114 | ||
514b2346 | 115 | struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) |
001e413f SE |
116 | { |
117 | /* | |
118 | * perf core schedules event on the basis of cpu, uncore events are | |
119 | * collected by one of the cpus inside a physical package. | |
120 | */ | |
121 | return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id()); | |
122 | } | |
123 | ||
514b2346 | 124 | u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) |
254298c7 YZ |
125 | { |
126 | u64 count; | |
127 | ||
128 | rdmsrl(event->hw.event_base, count); | |
129 | ||
130 | return count; | |
131 | } | |
132 | ||
133 | /* | |
134 | * generic get constraint function for shared match/mask registers. | |
135 | */ | |
514b2346 | 136 | struct event_constraint * |
254298c7 YZ |
137 | uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event) |
138 | { | |
139 | struct intel_uncore_extra_reg *er; | |
140 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | |
141 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | |
142 | unsigned long flags; | |
143 | bool ok = false; | |
144 | ||
145 | /* | |
146 | * reg->alloc can be set due to existing state, so for fake box we | |
147 | * need to ignore this, otherwise we might fail to allocate proper | |
148 | * fake state for this extra reg constraint. | |
149 | */ | |
150 | if (reg1->idx == EXTRA_REG_NONE || | |
151 | (!uncore_box_is_fake(box) && reg1->alloc)) | |
152 | return NULL; | |
153 | ||
154 | er = &box->shared_regs[reg1->idx]; | |
155 | raw_spin_lock_irqsave(&er->lock, flags); | |
156 | if (!atomic_read(&er->ref) || | |
157 | (er->config1 == reg1->config && er->config2 == reg2->config)) { | |
158 | atomic_inc(&er->ref); | |
159 | er->config1 = reg1->config; | |
160 | er->config2 = reg2->config; | |
161 | ok = true; | |
162 | } | |
163 | raw_spin_unlock_irqrestore(&er->lock, flags); | |
164 | ||
165 | if (ok) { | |
166 | if (!uncore_box_is_fake(box)) | |
167 | reg1->alloc = 1; | |
168 | return NULL; | |
169 | } | |
170 | ||
514b2346 | 171 | return &uncore_constraint_empty; |
254298c7 YZ |
172 | } |
173 | ||
514b2346 | 174 | void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event) |
254298c7 YZ |
175 | { |
176 | struct intel_uncore_extra_reg *er; | |
177 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | |
178 | ||
179 | /* | |
180 | * Only put constraint if extra reg was actually allocated. Also | |
181 | * takes care of event which do not use an extra shared reg. | |
182 | * | |
183 | * Also, if this is a fake box we shouldn't touch any event state | |
184 | * (reg->alloc) and we don't care about leaving inconsistent box | |
185 | * state either since it will be thrown out. | |
186 | */ | |
187 | if (uncore_box_is_fake(box) || !reg1->alloc) | |
188 | return; | |
189 | ||
190 | er = &box->shared_regs[reg1->idx]; | |
191 | atomic_dec(&er->ref); | |
192 | reg1->alloc = 0; | |
193 | } | |
194 | ||
514b2346 | 195 | u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx) |
46bdd905 YZ |
196 | { |
197 | struct intel_uncore_extra_reg *er; | |
198 | unsigned long flags; | |
199 | u64 config; | |
200 | ||
201 | er = &box->shared_regs[idx]; | |
202 | ||
203 | raw_spin_lock_irqsave(&er->lock, flags); | |
204 | config = er->config; | |
205 | raw_spin_unlock_irqrestore(&er->lock, flags); | |
206 | ||
207 | return config; | |
208 | } | |
209 | ||
254298c7 | 210 | static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx) |
087bfbb0 YZ |
211 | { |
212 | struct hw_perf_event *hwc = &event->hw; | |
213 | ||
214 | hwc->idx = idx; | |
215 | hwc->last_tag = ++box->tags[idx]; | |
216 | ||
217 | if (hwc->idx == UNCORE_PMC_IDX_FIXED) { | |
14371cce YZ |
218 | hwc->event_base = uncore_fixed_ctr(box); |
219 | hwc->config_base = uncore_fixed_ctl(box); | |
087bfbb0 YZ |
220 | return; |
221 | } | |
222 | ||
14371cce YZ |
223 | hwc->config_base = uncore_event_ctl(box, hwc->idx); |
224 | hwc->event_base = uncore_perf_ctr(box, hwc->idx); | |
087bfbb0 YZ |
225 | } |
226 | ||
514b2346 | 227 | void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event) |
087bfbb0 YZ |
228 | { |
229 | u64 prev_count, new_count, delta; | |
230 | int shift; | |
231 | ||
232 | if (event->hw.idx >= UNCORE_PMC_IDX_FIXED) | |
233 | shift = 64 - uncore_fixed_ctr_bits(box); | |
234 | else | |
235 | shift = 64 - uncore_perf_ctr_bits(box); | |
236 | ||
237 | /* the hrtimer might modify the previous event value */ | |
238 | again: | |
239 | prev_count = local64_read(&event->hw.prev_count); | |
240 | new_count = uncore_read_counter(box, event); | |
241 | if (local64_xchg(&event->hw.prev_count, new_count) != prev_count) | |
242 | goto again; | |
243 | ||
244 | delta = (new_count << shift) - (prev_count << shift); | |
245 | delta >>= shift; | |
246 | ||
247 | local64_add(delta, &event->count); | |
248 | } | |
249 | ||
250 | /* | |
251 | * The overflow interrupt is unavailable for SandyBridge-EP, is broken | |
252 | * for SandyBridge. So we use hrtimer to periodically poll the counter | |
253 | * to avoid overflow. | |
254 | */ | |
255 | static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer) | |
256 | { | |
257 | struct intel_uncore_box *box; | |
ced2efb0 | 258 | struct perf_event *event; |
087bfbb0 YZ |
259 | unsigned long flags; |
260 | int bit; | |
261 | ||
262 | box = container_of(hrtimer, struct intel_uncore_box, hrtimer); | |
263 | if (!box->n_active || box->cpu != smp_processor_id()) | |
264 | return HRTIMER_NORESTART; | |
265 | /* | |
266 | * disable local interrupt to prevent uncore_pmu_event_start/stop | |
267 | * to interrupt the update process | |
268 | */ | |
269 | local_irq_save(flags); | |
270 | ||
ced2efb0 SE |
271 | /* |
272 | * handle boxes with an active event list as opposed to active | |
273 | * counters | |
274 | */ | |
275 | list_for_each_entry(event, &box->active_list, active_entry) { | |
276 | uncore_perf_event_update(box, event); | |
277 | } | |
278 | ||
087bfbb0 YZ |
279 | for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX) |
280 | uncore_perf_event_update(box, box->events[bit]); | |
281 | ||
282 | local_irq_restore(flags); | |
283 | ||
79859cce | 284 | hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration)); |
087bfbb0 YZ |
285 | return HRTIMER_RESTART; |
286 | } | |
287 | ||
514b2346 | 288 | void uncore_pmu_start_hrtimer(struct intel_uncore_box *box) |
087bfbb0 | 289 | { |
576b0704 TG |
290 | hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration), |
291 | HRTIMER_MODE_REL_PINNED); | |
087bfbb0 YZ |
292 | } |
293 | ||
514b2346 | 294 | void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box) |
087bfbb0 YZ |
295 | { |
296 | hrtimer_cancel(&box->hrtimer); | |
297 | } | |
298 | ||
299 | static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box) | |
300 | { | |
301 | hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | |
302 | box->hrtimer.function = uncore_pmu_hrtimer; | |
303 | } | |
304 | ||
73c4427c | 305 | static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node) |
087bfbb0 YZ |
306 | { |
307 | struct intel_uncore_box *box; | |
6a67943a | 308 | int i, size; |
087bfbb0 | 309 | |
254298c7 | 310 | size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg); |
6a67943a | 311 | |
73c4427c | 312 | box = kzalloc_node(size, GFP_KERNEL, node); |
087bfbb0 YZ |
313 | if (!box) |
314 | return NULL; | |
315 | ||
6a67943a YZ |
316 | for (i = 0; i < type->num_shared_regs; i++) |
317 | raw_spin_lock_init(&box->shared_regs[i].lock); | |
318 | ||
087bfbb0 YZ |
319 | uncore_pmu_init_hrtimer(box); |
320 | atomic_set(&box->refcnt, 1); | |
321 | box->cpu = -1; | |
322 | box->phys_id = -1; | |
323 | ||
79859cce SE |
324 | /* set default hrtimer timeout */ |
325 | box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL; | |
087bfbb0 | 326 | |
ced2efb0 | 327 | INIT_LIST_HEAD(&box->active_list); |
14371cce | 328 | |
087bfbb0 | 329 | return box; |
087bfbb0 YZ |
330 | } |
331 | ||
af91568e JO |
332 | /* |
333 | * Using uncore_pmu_event_init pmu event_init callback | |
334 | * as a detection point for uncore events. | |
335 | */ | |
336 | static int uncore_pmu_event_init(struct perf_event *event); | |
337 | ||
338 | static bool is_uncore_event(struct perf_event *event) | |
339 | { | |
340 | return event->pmu->event_init == uncore_pmu_event_init; | |
341 | } | |
342 | ||
254298c7 YZ |
343 | static int |
344 | uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp) | |
087bfbb0 YZ |
345 | { |
346 | struct perf_event *event; | |
347 | int n, max_count; | |
348 | ||
349 | max_count = box->pmu->type->num_counters; | |
350 | if (box->pmu->type->fixed_ctl) | |
351 | max_count++; | |
352 | ||
353 | if (box->n_events >= max_count) | |
354 | return -EINVAL; | |
355 | ||
356 | n = box->n_events; | |
af91568e JO |
357 | |
358 | if (is_uncore_event(leader)) { | |
359 | box->event_list[n] = leader; | |
360 | n++; | |
361 | } | |
362 | ||
087bfbb0 YZ |
363 | if (!dogrp) |
364 | return n; | |
365 | ||
366 | list_for_each_entry(event, &leader->sibling_list, group_entry) { | |
af91568e JO |
367 | if (!is_uncore_event(event) || |
368 | event->state <= PERF_EVENT_STATE_OFF) | |
087bfbb0 YZ |
369 | continue; |
370 | ||
371 | if (n >= max_count) | |
372 | return -EINVAL; | |
373 | ||
374 | box->event_list[n] = event; | |
375 | n++; | |
376 | } | |
377 | return n; | |
378 | } | |
379 | ||
380 | static struct event_constraint * | |
254298c7 | 381 | uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event) |
087bfbb0 | 382 | { |
6a67943a | 383 | struct intel_uncore_type *type = box->pmu->type; |
087bfbb0 YZ |
384 | struct event_constraint *c; |
385 | ||
6a67943a YZ |
386 | if (type->ops->get_constraint) { |
387 | c = type->ops->get_constraint(box, event); | |
388 | if (c) | |
389 | return c; | |
390 | } | |
391 | ||
dbc33f70 | 392 | if (event->attr.config == UNCORE_FIXED_EVENT) |
514b2346 | 393 | return &uncore_constraint_fixed; |
087bfbb0 YZ |
394 | |
395 | if (type->constraints) { | |
396 | for_each_event_constraint(c, type->constraints) { | |
397 | if ((event->hw.config & c->cmask) == c->code) | |
398 | return c; | |
399 | } | |
400 | } | |
401 | ||
402 | return &type->unconstrainted; | |
403 | } | |
404 | ||
254298c7 | 405 | static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event) |
6a67943a YZ |
406 | { |
407 | if (box->pmu->type->ops->put_constraint) | |
408 | box->pmu->type->ops->put_constraint(box, event); | |
409 | } | |
410 | ||
254298c7 | 411 | static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n) |
087bfbb0 YZ |
412 | { |
413 | unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; | |
43b45780 | 414 | struct event_constraint *c; |
6a67943a | 415 | int i, wmin, wmax, ret = 0; |
087bfbb0 YZ |
416 | struct hw_perf_event *hwc; |
417 | ||
418 | bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX); | |
419 | ||
420 | for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) { | |
6a67943a | 421 | c = uncore_get_event_constraint(box, box->event_list[i]); |
b371b594 | 422 | box->event_constraint[i] = c; |
087bfbb0 YZ |
423 | wmin = min(wmin, c->weight); |
424 | wmax = max(wmax, c->weight); | |
425 | } | |
426 | ||
427 | /* fastpath, try to reuse previous register */ | |
428 | for (i = 0; i < n; i++) { | |
429 | hwc = &box->event_list[i]->hw; | |
b371b594 | 430 | c = box->event_constraint[i]; |
087bfbb0 YZ |
431 | |
432 | /* never assigned */ | |
433 | if (hwc->idx == -1) | |
434 | break; | |
435 | ||
436 | /* constraint still honored */ | |
437 | if (!test_bit(hwc->idx, c->idxmsk)) | |
438 | break; | |
439 | ||
440 | /* not already used */ | |
441 | if (test_bit(hwc->idx, used_mask)) | |
442 | break; | |
443 | ||
444 | __set_bit(hwc->idx, used_mask); | |
6a67943a YZ |
445 | if (assign) |
446 | assign[i] = hwc->idx; | |
087bfbb0 | 447 | } |
087bfbb0 | 448 | /* slow path */ |
6a67943a | 449 | if (i != n) |
b371b594 | 450 | ret = perf_assign_events(box->event_constraint, n, |
cc1790cf | 451 | wmin, wmax, n, assign); |
6a67943a YZ |
452 | |
453 | if (!assign || ret) { | |
454 | for (i = 0; i < n; i++) | |
455 | uncore_put_event_constraint(box, box->event_list[i]); | |
456 | } | |
087bfbb0 YZ |
457 | return ret ? -EINVAL : 0; |
458 | } | |
459 | ||
460 | static void uncore_pmu_event_start(struct perf_event *event, int flags) | |
461 | { | |
462 | struct intel_uncore_box *box = uncore_event_to_box(event); | |
463 | int idx = event->hw.idx; | |
464 | ||
465 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) | |
466 | return; | |
467 | ||
468 | if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX)) | |
469 | return; | |
470 | ||
471 | event->hw.state = 0; | |
472 | box->events[idx] = event; | |
473 | box->n_active++; | |
474 | __set_bit(idx, box->active_mask); | |
475 | ||
476 | local64_set(&event->hw.prev_count, uncore_read_counter(box, event)); | |
477 | uncore_enable_event(box, event); | |
478 | ||
479 | if (box->n_active == 1) { | |
480 | uncore_enable_box(box); | |
481 | uncore_pmu_start_hrtimer(box); | |
482 | } | |
483 | } | |
484 | ||
485 | static void uncore_pmu_event_stop(struct perf_event *event, int flags) | |
486 | { | |
487 | struct intel_uncore_box *box = uncore_event_to_box(event); | |
488 | struct hw_perf_event *hwc = &event->hw; | |
489 | ||
490 | if (__test_and_clear_bit(hwc->idx, box->active_mask)) { | |
491 | uncore_disable_event(box, event); | |
492 | box->n_active--; | |
493 | box->events[hwc->idx] = NULL; | |
494 | WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); | |
495 | hwc->state |= PERF_HES_STOPPED; | |
496 | ||
497 | if (box->n_active == 0) { | |
498 | uncore_disable_box(box); | |
499 | uncore_pmu_cancel_hrtimer(box); | |
500 | } | |
501 | } | |
502 | ||
503 | if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { | |
504 | /* | |
505 | * Drain the remaining delta count out of a event | |
506 | * that we are disabling: | |
507 | */ | |
508 | uncore_perf_event_update(box, event); | |
509 | hwc->state |= PERF_HES_UPTODATE; | |
510 | } | |
511 | } | |
512 | ||
513 | static int uncore_pmu_event_add(struct perf_event *event, int flags) | |
514 | { | |
515 | struct intel_uncore_box *box = uncore_event_to_box(event); | |
516 | struct hw_perf_event *hwc = &event->hw; | |
517 | int assign[UNCORE_PMC_IDX_MAX]; | |
518 | int i, n, ret; | |
519 | ||
520 | if (!box) | |
521 | return -ENODEV; | |
522 | ||
523 | ret = n = uncore_collect_events(box, event, false); | |
524 | if (ret < 0) | |
525 | return ret; | |
526 | ||
527 | hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; | |
528 | if (!(flags & PERF_EF_START)) | |
529 | hwc->state |= PERF_HES_ARCH; | |
530 | ||
531 | ret = uncore_assign_events(box, assign, n); | |
532 | if (ret) | |
533 | return ret; | |
534 | ||
535 | /* save events moving to new counters */ | |
536 | for (i = 0; i < box->n_events; i++) { | |
537 | event = box->event_list[i]; | |
538 | hwc = &event->hw; | |
539 | ||
540 | if (hwc->idx == assign[i] && | |
541 | hwc->last_tag == box->tags[assign[i]]) | |
542 | continue; | |
543 | /* | |
544 | * Ensure we don't accidentally enable a stopped | |
545 | * counter simply because we rescheduled. | |
546 | */ | |
547 | if (hwc->state & PERF_HES_STOPPED) | |
548 | hwc->state |= PERF_HES_ARCH; | |
549 | ||
550 | uncore_pmu_event_stop(event, PERF_EF_UPDATE); | |
551 | } | |
552 | ||
553 | /* reprogram moved events into new counters */ | |
554 | for (i = 0; i < n; i++) { | |
555 | event = box->event_list[i]; | |
556 | hwc = &event->hw; | |
557 | ||
558 | if (hwc->idx != assign[i] || | |
559 | hwc->last_tag != box->tags[assign[i]]) | |
560 | uncore_assign_hw_event(box, event, assign[i]); | |
561 | else if (i < box->n_events) | |
562 | continue; | |
563 | ||
564 | if (hwc->state & PERF_HES_ARCH) | |
565 | continue; | |
566 | ||
567 | uncore_pmu_event_start(event, 0); | |
568 | } | |
569 | box->n_events = n; | |
570 | ||
571 | return 0; | |
572 | } | |
573 | ||
574 | static void uncore_pmu_event_del(struct perf_event *event, int flags) | |
575 | { | |
576 | struct intel_uncore_box *box = uncore_event_to_box(event); | |
577 | int i; | |
578 | ||
579 | uncore_pmu_event_stop(event, PERF_EF_UPDATE); | |
580 | ||
581 | for (i = 0; i < box->n_events; i++) { | |
582 | if (event == box->event_list[i]) { | |
6a67943a YZ |
583 | uncore_put_event_constraint(box, event); |
584 | ||
087bfbb0 YZ |
585 | while (++i < box->n_events) |
586 | box->event_list[i - 1] = box->event_list[i]; | |
587 | ||
588 | --box->n_events; | |
589 | break; | |
590 | } | |
591 | } | |
592 | ||
593 | event->hw.idx = -1; | |
594 | event->hw.last_tag = ~0ULL; | |
595 | } | |
596 | ||
514b2346 | 597 | void uncore_pmu_event_read(struct perf_event *event) |
087bfbb0 YZ |
598 | { |
599 | struct intel_uncore_box *box = uncore_event_to_box(event); | |
600 | uncore_perf_event_update(box, event); | |
601 | } | |
602 | ||
603 | /* | |
604 | * validation ensures the group can be loaded onto the | |
605 | * PMU if it was the only group available. | |
606 | */ | |
607 | static int uncore_validate_group(struct intel_uncore_pmu *pmu, | |
608 | struct perf_event *event) | |
609 | { | |
610 | struct perf_event *leader = event->group_leader; | |
611 | struct intel_uncore_box *fake_box; | |
087bfbb0 YZ |
612 | int ret = -EINVAL, n; |
613 | ||
73c4427c | 614 | fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE); |
087bfbb0 YZ |
615 | if (!fake_box) |
616 | return -ENOMEM; | |
617 | ||
618 | fake_box->pmu = pmu; | |
619 | /* | |
620 | * the event is not yet connected with its | |
621 | * siblings therefore we must first collect | |
622 | * existing siblings, then add the new event | |
623 | * before we can simulate the scheduling | |
624 | */ | |
625 | n = uncore_collect_events(fake_box, leader, true); | |
626 | if (n < 0) | |
627 | goto out; | |
628 | ||
629 | fake_box->n_events = n; | |
630 | n = uncore_collect_events(fake_box, event, false); | |
631 | if (n < 0) | |
632 | goto out; | |
633 | ||
634 | fake_box->n_events = n; | |
635 | ||
6a67943a | 636 | ret = uncore_assign_events(fake_box, NULL, n); |
087bfbb0 YZ |
637 | out: |
638 | kfree(fake_box); | |
639 | return ret; | |
640 | } | |
641 | ||
46bdd905 | 642 | static int uncore_pmu_event_init(struct perf_event *event) |
087bfbb0 YZ |
643 | { |
644 | struct intel_uncore_pmu *pmu; | |
645 | struct intel_uncore_box *box; | |
646 | struct hw_perf_event *hwc = &event->hw; | |
647 | int ret; | |
648 | ||
649 | if (event->attr.type != event->pmu->type) | |
650 | return -ENOENT; | |
651 | ||
652 | pmu = uncore_event_to_pmu(event); | |
653 | /* no device found for this pmu */ | |
654 | if (pmu->func_id < 0) | |
655 | return -ENOENT; | |
656 | ||
657 | /* | |
658 | * Uncore PMU does measure at all privilege level all the time. | |
659 | * So it doesn't make sense to specify any exclude bits. | |
660 | */ | |
661 | if (event->attr.exclude_user || event->attr.exclude_kernel || | |
662 | event->attr.exclude_hv || event->attr.exclude_idle) | |
663 | return -EINVAL; | |
664 | ||
665 | /* Sampling not supported yet */ | |
666 | if (hwc->sample_period) | |
667 | return -EINVAL; | |
668 | ||
669 | /* | |
670 | * Place all uncore events for a particular physical package | |
671 | * onto a single cpu | |
672 | */ | |
673 | if (event->cpu < 0) | |
674 | return -EINVAL; | |
675 | box = uncore_pmu_to_box(pmu, event->cpu); | |
676 | if (!box || box->cpu < 0) | |
677 | return -EINVAL; | |
678 | event->cpu = box->cpu; | |
679 | ||
6a67943a YZ |
680 | event->hw.idx = -1; |
681 | event->hw.last_tag = ~0ULL; | |
682 | event->hw.extra_reg.idx = EXTRA_REG_NONE; | |
ebb6cc03 | 683 | event->hw.branch_reg.idx = EXTRA_REG_NONE; |
6a67943a | 684 | |
087bfbb0 YZ |
685 | if (event->attr.config == UNCORE_FIXED_EVENT) { |
686 | /* no fixed counter */ | |
687 | if (!pmu->type->fixed_ctl) | |
688 | return -EINVAL; | |
689 | /* | |
690 | * if there is only one fixed counter, only the first pmu | |
691 | * can access the fixed counter | |
692 | */ | |
693 | if (pmu->type->single_fixed && pmu->pmu_idx > 0) | |
694 | return -EINVAL; | |
dbc33f70 SE |
695 | |
696 | /* fixed counters have event field hardcoded to zero */ | |
697 | hwc->config = 0ULL; | |
087bfbb0 YZ |
698 | } else { |
699 | hwc->config = event->attr.config & pmu->type->event_mask; | |
6a67943a YZ |
700 | if (pmu->type->ops->hw_config) { |
701 | ret = pmu->type->ops->hw_config(box, event); | |
702 | if (ret) | |
703 | return ret; | |
704 | } | |
087bfbb0 YZ |
705 | } |
706 | ||
087bfbb0 YZ |
707 | if (event->group_leader != event) |
708 | ret = uncore_validate_group(pmu, event); | |
709 | else | |
710 | ret = 0; | |
711 | ||
712 | return ret; | |
713 | } | |
714 | ||
314d9f63 YZ |
715 | static ssize_t uncore_get_attr_cpumask(struct device *dev, |
716 | struct device_attribute *attr, char *buf) | |
717 | { | |
5aaba363 | 718 | return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask); |
314d9f63 YZ |
719 | } |
720 | ||
721 | static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL); | |
722 | ||
723 | static struct attribute *uncore_pmu_attrs[] = { | |
724 | &dev_attr_cpumask.attr, | |
725 | NULL, | |
726 | }; | |
727 | ||
728 | static struct attribute_group uncore_pmu_attr_group = { | |
729 | .attrs = uncore_pmu_attrs, | |
730 | }; | |
731 | ||
a08b6769 | 732 | static int uncore_pmu_register(struct intel_uncore_pmu *pmu) |
087bfbb0 YZ |
733 | { |
734 | int ret; | |
735 | ||
d64b25b6 SE |
736 | if (!pmu->type->pmu) { |
737 | pmu->pmu = (struct pmu) { | |
738 | .attr_groups = pmu->type->attr_groups, | |
739 | .task_ctx_nr = perf_invalid_context, | |
740 | .event_init = uncore_pmu_event_init, | |
741 | .add = uncore_pmu_event_add, | |
742 | .del = uncore_pmu_event_del, | |
743 | .start = uncore_pmu_event_start, | |
744 | .stop = uncore_pmu_event_stop, | |
745 | .read = uncore_pmu_event_read, | |
746 | }; | |
747 | } else { | |
748 | pmu->pmu = *pmu->type->pmu; | |
749 | pmu->pmu.attr_groups = pmu->type->attr_groups; | |
750 | } | |
087bfbb0 YZ |
751 | |
752 | if (pmu->type->num_boxes == 1) { | |
753 | if (strlen(pmu->type->name) > 0) | |
754 | sprintf(pmu->name, "uncore_%s", pmu->type->name); | |
755 | else | |
756 | sprintf(pmu->name, "uncore"); | |
757 | } else { | |
758 | sprintf(pmu->name, "uncore_%s_%d", pmu->type->name, | |
759 | pmu->pmu_idx); | |
760 | } | |
761 | ||
762 | ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); | |
763 | return ret; | |
764 | } | |
765 | ||
766 | static void __init uncore_type_exit(struct intel_uncore_type *type) | |
767 | { | |
768 | int i; | |
769 | ||
ffeda003 TG |
770 | if (type->pmus) { |
771 | for (i = 0; i < type->num_boxes; i++) | |
772 | free_percpu(type->pmus[i].box); | |
773 | kfree(type->pmus); | |
774 | type->pmus = NULL; | |
775 | } | |
314d9f63 YZ |
776 | kfree(type->events_group); |
777 | type->events_group = NULL; | |
087bfbb0 YZ |
778 | } |
779 | ||
cffa59ba | 780 | static void __init uncore_types_exit(struct intel_uncore_type **types) |
14371cce YZ |
781 | { |
782 | int i; | |
ffeda003 | 783 | |
14371cce YZ |
784 | for (i = 0; types[i]; i++) |
785 | uncore_type_exit(types[i]); | |
786 | } | |
787 | ||
087bfbb0 YZ |
788 | static int __init uncore_type_init(struct intel_uncore_type *type) |
789 | { | |
790 | struct intel_uncore_pmu *pmus; | |
1b0dac2a | 791 | struct attribute_group *attr_group; |
087bfbb0 YZ |
792 | struct attribute **attrs; |
793 | int i, j; | |
794 | ||
795 | pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL); | |
796 | if (!pmus) | |
797 | return -ENOMEM; | |
798 | ||
b7b4839d DJ |
799 | type->pmus = pmus; |
800 | ||
087bfbb0 YZ |
801 | type->unconstrainted = (struct event_constraint) |
802 | __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1, | |
9fac2cf3 | 803 | 0, type->num_counters, 0, 0); |
087bfbb0 YZ |
804 | |
805 | for (i = 0; i < type->num_boxes; i++) { | |
806 | pmus[i].func_id = -1; | |
807 | pmus[i].pmu_idx = i; | |
808 | pmus[i].type = type; | |
14371cce | 809 | INIT_LIST_HEAD(&pmus[i].box_list); |
087bfbb0 YZ |
810 | pmus[i].box = alloc_percpu(struct intel_uncore_box *); |
811 | if (!pmus[i].box) | |
ffeda003 | 812 | return -ENOMEM; |
087bfbb0 YZ |
813 | } |
814 | ||
815 | if (type->event_descs) { | |
816 | i = 0; | |
817 | while (type->event_descs[i].attr.attr.name) | |
818 | i++; | |
819 | ||
1b0dac2a JSM |
820 | attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) + |
821 | sizeof(*attr_group), GFP_KERNEL); | |
822 | if (!attr_group) | |
ffeda003 | 823 | return -ENOMEM; |
087bfbb0 | 824 | |
1b0dac2a JSM |
825 | attrs = (struct attribute **)(attr_group + 1); |
826 | attr_group->name = "events"; | |
827 | attr_group->attrs = attrs; | |
087bfbb0 YZ |
828 | |
829 | for (j = 0; j < i; j++) | |
830 | attrs[j] = &type->event_descs[j].attr.attr; | |
831 | ||
1b0dac2a | 832 | type->events_group = attr_group; |
087bfbb0 YZ |
833 | } |
834 | ||
314d9f63 | 835 | type->pmu_group = &uncore_pmu_attr_group; |
087bfbb0 | 836 | return 0; |
087bfbb0 YZ |
837 | } |
838 | ||
839 | static int __init uncore_types_init(struct intel_uncore_type **types) | |
840 | { | |
841 | int i, ret; | |
842 | ||
843 | for (i = 0; types[i]; i++) { | |
844 | ret = uncore_type_init(types[i]); | |
845 | if (ret) | |
ffeda003 | 846 | return ret; |
087bfbb0 YZ |
847 | } |
848 | return 0; | |
087bfbb0 YZ |
849 | } |
850 | ||
14371cce YZ |
851 | /* |
852 | * add a pci uncore device | |
853 | */ | |
899396cf | 854 | static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
14371cce YZ |
855 | { |
856 | struct intel_uncore_pmu *pmu; | |
857 | struct intel_uncore_box *box; | |
899396cf YZ |
858 | struct intel_uncore_type *type; |
859 | int phys_id; | |
513d793e | 860 | bool first_box = false; |
14371cce | 861 | |
712df65c | 862 | phys_id = uncore_pcibus_to_physid(pdev->bus); |
14371cce YZ |
863 | if (phys_id < 0) |
864 | return -ENODEV; | |
865 | ||
899396cf | 866 | if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) { |
514b2346 YZ |
867 | int idx = UNCORE_PCI_DEV_IDX(id->driver_data); |
868 | uncore_extra_pci_dev[phys_id][idx] = pdev; | |
899396cf YZ |
869 | pci_set_drvdata(pdev, NULL); |
870 | return 0; | |
871 | } | |
872 | ||
514b2346 | 873 | type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; |
73c4427c | 874 | box = uncore_alloc_box(type, NUMA_NO_NODE); |
14371cce YZ |
875 | if (!box) |
876 | return -ENOMEM; | |
877 | ||
878 | /* | |
879 | * for performance monitoring unit with multiple boxes, | |
880 | * each box has a different function id. | |
881 | */ | |
899396cf | 882 | pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)]; |
77af0037 HC |
883 | /* Knights Landing uses a common PCI device ID for multiple instances of |
884 | * an uncore PMU device type. There is only one entry per device type in | |
885 | * the knl_uncore_pci_ids table inspite of multiple devices present for | |
886 | * some device types. Hence PCI device idx would be 0 for all devices. | |
887 | * So increment pmu pointer to point to an unused array element. | |
888 | */ | |
889 | if (boot_cpu_data.x86_model == 87) | |
890 | while (pmu->func_id >= 0) | |
891 | pmu++; | |
899396cf YZ |
892 | if (pmu->func_id < 0) |
893 | pmu->func_id = pdev->devfn; | |
894 | else | |
895 | WARN_ON_ONCE(pmu->func_id != pdev->devfn); | |
14371cce YZ |
896 | |
897 | box->phys_id = phys_id; | |
898 | box->pci_dev = pdev; | |
899 | box->pmu = pmu; | |
15c12479 | 900 | uncore_box_init(box); |
14371cce YZ |
901 | pci_set_drvdata(pdev, box); |
902 | ||
903 | raw_spin_lock(&uncore_box_lock); | |
513d793e YZ |
904 | if (list_empty(&pmu->box_list)) |
905 | first_box = true; | |
14371cce YZ |
906 | list_add_tail(&box->list, &pmu->box_list); |
907 | raw_spin_unlock(&uncore_box_lock); | |
908 | ||
513d793e YZ |
909 | if (first_box) |
910 | uncore_pmu_register(pmu); | |
14371cce YZ |
911 | return 0; |
912 | } | |
913 | ||
357398e9 | 914 | static void uncore_pci_remove(struct pci_dev *pdev) |
14371cce YZ |
915 | { |
916 | struct intel_uncore_box *box = pci_get_drvdata(pdev); | |
899396cf | 917 | struct intel_uncore_pmu *pmu; |
712df65c | 918 | int i, cpu, phys_id; |
513d793e | 919 | bool last_box = false; |
899396cf | 920 | |
712df65c | 921 | phys_id = uncore_pcibus_to_physid(pdev->bus); |
899396cf YZ |
922 | box = pci_get_drvdata(pdev); |
923 | if (!box) { | |
924 | for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) { | |
514b2346 YZ |
925 | if (uncore_extra_pci_dev[phys_id][i] == pdev) { |
926 | uncore_extra_pci_dev[phys_id][i] = NULL; | |
899396cf YZ |
927 | break; |
928 | } | |
929 | } | |
930 | WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX); | |
931 | return; | |
932 | } | |
14371cce | 933 | |
899396cf | 934 | pmu = box->pmu; |
14371cce YZ |
935 | if (WARN_ON_ONCE(phys_id != box->phys_id)) |
936 | return; | |
937 | ||
e850f9c3 YZ |
938 | pci_set_drvdata(pdev, NULL); |
939 | ||
14371cce YZ |
940 | raw_spin_lock(&uncore_box_lock); |
941 | list_del(&box->list); | |
513d793e YZ |
942 | if (list_empty(&pmu->box_list)) |
943 | last_box = true; | |
14371cce YZ |
944 | raw_spin_unlock(&uncore_box_lock); |
945 | ||
946 | for_each_possible_cpu(cpu) { | |
947 | if (*per_cpu_ptr(pmu->box, cpu) == box) { | |
948 | *per_cpu_ptr(pmu->box, cpu) = NULL; | |
949 | atomic_dec(&box->refcnt); | |
950 | } | |
951 | } | |
952 | ||
953 | WARN_ON_ONCE(atomic_read(&box->refcnt) != 1); | |
954 | kfree(box); | |
513d793e YZ |
955 | |
956 | if (last_box) | |
957 | perf_pmu_unregister(&pmu->pmu); | |
14371cce YZ |
958 | } |
959 | ||
14371cce YZ |
960 | static int __init uncore_pci_init(void) |
961 | { | |
962 | int ret; | |
963 | ||
964 | switch (boot_cpu_data.x86_model) { | |
7c94ee2e | 965 | case 45: /* Sandy Bridge-EP */ |
8268fdfc | 966 | ret = snbep_uncore_pci_init(); |
7c94ee2e | 967 | break; |
ddcd0973 PZ |
968 | case 62: /* Ivy Bridge-EP */ |
969 | ret = ivbep_uncore_pci_init(); | |
e850f9c3 | 970 | break; |
e735b9db YZ |
971 | case 63: /* Haswell-EP */ |
972 | ret = hswep_uncore_pci_init(); | |
973 | break; | |
d6980ef3 | 974 | case 79: /* BDX-EP */ |
070e9887 KL |
975 | case 86: /* BDX-DE */ |
976 | ret = bdx_uncore_pci_init(); | |
977 | break; | |
b9e1ab6d | 978 | case 42: /* Sandy Bridge */ |
92807ffd | 979 | ret = snb_uncore_pci_init(); |
b9e1ab6d SE |
980 | break; |
981 | case 58: /* Ivy Bridge */ | |
92807ffd | 982 | ret = ivb_uncore_pci_init(); |
b9e1ab6d SE |
983 | break; |
984 | case 60: /* Haswell */ | |
985 | case 69: /* Haswell Celeron */ | |
92807ffd | 986 | ret = hsw_uncore_pci_init(); |
b9e1ab6d | 987 | break; |
a41f3c8c SE |
988 | case 61: /* Broadwell */ |
989 | ret = bdw_uncore_pci_init(); | |
990 | break; | |
77af0037 HC |
991 | case 87: /* Knights Landing */ |
992 | ret = knl_uncore_pci_init(); | |
993 | break; | |
0e1eb0a1 SE |
994 | case 94: /* SkyLake */ |
995 | ret = skl_uncore_pci_init(); | |
996 | break; | |
14371cce YZ |
997 | default: |
998 | return 0; | |
999 | } | |
1000 | ||
92807ffd YZ |
1001 | if (ret) |
1002 | return ret; | |
1003 | ||
514b2346 | 1004 | ret = uncore_types_init(uncore_pci_uncores); |
14371cce | 1005 | if (ret) |
ffeda003 | 1006 | goto err; |
14371cce YZ |
1007 | |
1008 | uncore_pci_driver->probe = uncore_pci_probe; | |
1009 | uncore_pci_driver->remove = uncore_pci_remove; | |
1010 | ||
1011 | ret = pci_register_driver(uncore_pci_driver); | |
ffeda003 TG |
1012 | if (ret) |
1013 | goto err; | |
1014 | ||
1015 | pcidrv_registered = true; | |
1016 | return 0; | |
14371cce | 1017 | |
ffeda003 TG |
1018 | err: |
1019 | uncore_types_exit(uncore_pci_uncores); | |
1020 | uncore_pci_uncores = empty_uncore; | |
14371cce YZ |
1021 | return ret; |
1022 | } | |
1023 | ||
1024 | static void __init uncore_pci_exit(void) | |
1025 | { | |
1026 | if (pcidrv_registered) { | |
1027 | pcidrv_registered = false; | |
1028 | pci_unregister_driver(uncore_pci_driver); | |
514b2346 | 1029 | uncore_types_exit(uncore_pci_uncores); |
14371cce YZ |
1030 | } |
1031 | } | |
1032 | ||
22cc4ccf YZ |
1033 | /* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */ |
1034 | static LIST_HEAD(boxes_to_free); | |
1035 | ||
148f9bb8 | 1036 | static void uncore_kfree_boxes(void) |
22cc4ccf YZ |
1037 | { |
1038 | struct intel_uncore_box *box; | |
1039 | ||
1040 | while (!list_empty(&boxes_to_free)) { | |
1041 | box = list_entry(boxes_to_free.next, | |
1042 | struct intel_uncore_box, list); | |
1043 | list_del(&box->list); | |
1044 | kfree(box); | |
1045 | } | |
1046 | } | |
1047 | ||
148f9bb8 | 1048 | static void uncore_cpu_dying(int cpu) |
087bfbb0 YZ |
1049 | { |
1050 | struct intel_uncore_type *type; | |
1051 | struct intel_uncore_pmu *pmu; | |
1052 | struct intel_uncore_box *box; | |
1053 | int i, j; | |
1054 | ||
514b2346 YZ |
1055 | for (i = 0; uncore_msr_uncores[i]; i++) { |
1056 | type = uncore_msr_uncores[i]; | |
087bfbb0 YZ |
1057 | for (j = 0; j < type->num_boxes; j++) { |
1058 | pmu = &type->pmus[j]; | |
1059 | box = *per_cpu_ptr(pmu->box, cpu); | |
1060 | *per_cpu_ptr(pmu->box, cpu) = NULL; | |
1061 | if (box && atomic_dec_and_test(&box->refcnt)) | |
22cc4ccf | 1062 | list_add(&box->list, &boxes_to_free); |
087bfbb0 YZ |
1063 | } |
1064 | } | |
1065 | } | |
1066 | ||
148f9bb8 | 1067 | static int uncore_cpu_starting(int cpu) |
087bfbb0 YZ |
1068 | { |
1069 | struct intel_uncore_type *type; | |
1070 | struct intel_uncore_pmu *pmu; | |
1071 | struct intel_uncore_box *box, *exist; | |
1072 | int i, j, k, phys_id; | |
1073 | ||
1074 | phys_id = topology_physical_package_id(cpu); | |
1075 | ||
514b2346 YZ |
1076 | for (i = 0; uncore_msr_uncores[i]; i++) { |
1077 | type = uncore_msr_uncores[i]; | |
087bfbb0 YZ |
1078 | for (j = 0; j < type->num_boxes; j++) { |
1079 | pmu = &type->pmus[j]; | |
1080 | box = *per_cpu_ptr(pmu->box, cpu); | |
1081 | /* called by uncore_cpu_init? */ | |
15c12479 IM |
1082 | if (box && box->phys_id >= 0) { |
1083 | uncore_box_init(box); | |
087bfbb0 | 1084 | continue; |
15c12479 | 1085 | } |
087bfbb0 YZ |
1086 | |
1087 | for_each_online_cpu(k) { | |
1088 | exist = *per_cpu_ptr(pmu->box, k); | |
1089 | if (exist && exist->phys_id == phys_id) { | |
1090 | atomic_inc(&exist->refcnt); | |
1091 | *per_cpu_ptr(pmu->box, cpu) = exist; | |
22cc4ccf YZ |
1092 | if (box) { |
1093 | list_add(&box->list, | |
1094 | &boxes_to_free); | |
1095 | box = NULL; | |
1096 | } | |
087bfbb0 YZ |
1097 | break; |
1098 | } | |
1099 | } | |
1100 | ||
15c12479 | 1101 | if (box) { |
087bfbb0 | 1102 | box->phys_id = phys_id; |
15c12479 IM |
1103 | uncore_box_init(box); |
1104 | } | |
087bfbb0 YZ |
1105 | } |
1106 | } | |
1107 | return 0; | |
1108 | } | |
1109 | ||
148f9bb8 | 1110 | static int uncore_cpu_prepare(int cpu, int phys_id) |
087bfbb0 YZ |
1111 | { |
1112 | struct intel_uncore_type *type; | |
1113 | struct intel_uncore_pmu *pmu; | |
1114 | struct intel_uncore_box *box; | |
1115 | int i, j; | |
1116 | ||
514b2346 YZ |
1117 | for (i = 0; uncore_msr_uncores[i]; i++) { |
1118 | type = uncore_msr_uncores[i]; | |
087bfbb0 YZ |
1119 | for (j = 0; j < type->num_boxes; j++) { |
1120 | pmu = &type->pmus[j]; | |
1121 | if (pmu->func_id < 0) | |
1122 | pmu->func_id = j; | |
1123 | ||
73c4427c | 1124 | box = uncore_alloc_box(type, cpu_to_node(cpu)); |
087bfbb0 YZ |
1125 | if (!box) |
1126 | return -ENOMEM; | |
1127 | ||
1128 | box->pmu = pmu; | |
1129 | box->phys_id = phys_id; | |
1130 | *per_cpu_ptr(pmu->box, cpu) = box; | |
1131 | } | |
1132 | } | |
1133 | return 0; | |
1134 | } | |
1135 | ||
148f9bb8 | 1136 | static void |
254298c7 | 1137 | uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu) |
087bfbb0 YZ |
1138 | { |
1139 | struct intel_uncore_type *type; | |
1140 | struct intel_uncore_pmu *pmu; | |
1141 | struct intel_uncore_box *box; | |
1142 | int i, j; | |
1143 | ||
1144 | for (i = 0; uncores[i]; i++) { | |
1145 | type = uncores[i]; | |
1146 | for (j = 0; j < type->num_boxes; j++) { | |
1147 | pmu = &type->pmus[j]; | |
1148 | if (old_cpu < 0) | |
1149 | box = uncore_pmu_to_box(pmu, new_cpu); | |
1150 | else | |
1151 | box = uncore_pmu_to_box(pmu, old_cpu); | |
1152 | if (!box) | |
1153 | continue; | |
1154 | ||
1155 | if (old_cpu < 0) { | |
1156 | WARN_ON_ONCE(box->cpu != -1); | |
1157 | box->cpu = new_cpu; | |
1158 | continue; | |
1159 | } | |
1160 | ||
1161 | WARN_ON_ONCE(box->cpu != old_cpu); | |
1162 | if (new_cpu >= 0) { | |
1163 | uncore_pmu_cancel_hrtimer(box); | |
1164 | perf_pmu_migrate_context(&pmu->pmu, | |
1165 | old_cpu, new_cpu); | |
1166 | box->cpu = new_cpu; | |
1167 | } else { | |
1168 | box->cpu = -1; | |
1169 | } | |
1170 | } | |
1171 | } | |
1172 | } | |
1173 | ||
148f9bb8 | 1174 | static void uncore_event_exit_cpu(int cpu) |
087bfbb0 YZ |
1175 | { |
1176 | int i, phys_id, target; | |
1177 | ||
1178 | /* if exiting cpu is used for collecting uncore events */ | |
1179 | if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) | |
1180 | return; | |
1181 | ||
1182 | /* find a new cpu to collect uncore events */ | |
1183 | phys_id = topology_physical_package_id(cpu); | |
1184 | target = -1; | |
1185 | for_each_online_cpu(i) { | |
1186 | if (i == cpu) | |
1187 | continue; | |
1188 | if (phys_id == topology_physical_package_id(i)) { | |
1189 | target = i; | |
1190 | break; | |
1191 | } | |
1192 | } | |
1193 | ||
1194 | /* migrate uncore events to the new cpu */ | |
1195 | if (target >= 0) | |
1196 | cpumask_set_cpu(target, &uncore_cpu_mask); | |
1197 | ||
514b2346 YZ |
1198 | uncore_change_context(uncore_msr_uncores, cpu, target); |
1199 | uncore_change_context(uncore_pci_uncores, cpu, target); | |
087bfbb0 YZ |
1200 | } |
1201 | ||
148f9bb8 | 1202 | static void uncore_event_init_cpu(int cpu) |
087bfbb0 YZ |
1203 | { |
1204 | int i, phys_id; | |
1205 | ||
1206 | phys_id = topology_physical_package_id(cpu); | |
1207 | for_each_cpu(i, &uncore_cpu_mask) { | |
1208 | if (phys_id == topology_physical_package_id(i)) | |
1209 | return; | |
1210 | } | |
1211 | ||
1212 | cpumask_set_cpu(cpu, &uncore_cpu_mask); | |
1213 | ||
514b2346 YZ |
1214 | uncore_change_context(uncore_msr_uncores, -1, cpu); |
1215 | uncore_change_context(uncore_pci_uncores, -1, cpu); | |
087bfbb0 YZ |
1216 | } |
1217 | ||
148f9bb8 PG |
1218 | static int uncore_cpu_notifier(struct notifier_block *self, |
1219 | unsigned long action, void *hcpu) | |
087bfbb0 YZ |
1220 | { |
1221 | unsigned int cpu = (long)hcpu; | |
1222 | ||
1223 | /* allocate/free data structure for uncore box */ | |
1224 | switch (action & ~CPU_TASKS_FROZEN) { | |
1225 | case CPU_UP_PREPARE: | |
1226 | uncore_cpu_prepare(cpu, -1); | |
1227 | break; | |
1228 | case CPU_STARTING: | |
1229 | uncore_cpu_starting(cpu); | |
1230 | break; | |
1231 | case CPU_UP_CANCELED: | |
1232 | case CPU_DYING: | |
1233 | uncore_cpu_dying(cpu); | |
1234 | break; | |
22cc4ccf YZ |
1235 | case CPU_ONLINE: |
1236 | case CPU_DEAD: | |
1237 | uncore_kfree_boxes(); | |
1238 | break; | |
087bfbb0 YZ |
1239 | default: |
1240 | break; | |
1241 | } | |
1242 | ||
1243 | /* select the cpu that collects uncore events */ | |
1244 | switch (action & ~CPU_TASKS_FROZEN) { | |
1245 | case CPU_DOWN_FAILED: | |
1246 | case CPU_STARTING: | |
1247 | uncore_event_init_cpu(cpu); | |
1248 | break; | |
1249 | case CPU_DOWN_PREPARE: | |
1250 | uncore_event_exit_cpu(cpu); | |
1251 | break; | |
1252 | default: | |
1253 | break; | |
1254 | } | |
1255 | ||
1256 | return NOTIFY_OK; | |
1257 | } | |
1258 | ||
148f9bb8 | 1259 | static struct notifier_block uncore_cpu_nb = { |
254298c7 | 1260 | .notifier_call = uncore_cpu_notifier, |
087bfbb0 YZ |
1261 | /* |
1262 | * to migrate uncore events, our notifier should be executed | |
1263 | * before perf core's notifier. | |
1264 | */ | |
254298c7 | 1265 | .priority = CPU_PRI_PERF + 1, |
087bfbb0 YZ |
1266 | }; |
1267 | ||
1268 | static void __init uncore_cpu_setup(void *dummy) | |
1269 | { | |
1270 | uncore_cpu_starting(smp_processor_id()); | |
1271 | } | |
1272 | ||
1273 | static int __init uncore_cpu_init(void) | |
1274 | { | |
c1e46580 | 1275 | int ret; |
087bfbb0 YZ |
1276 | |
1277 | switch (boot_cpu_data.x86_model) { | |
fcde10e9 YZ |
1278 | case 26: /* Nehalem */ |
1279 | case 30: | |
1280 | case 37: /* Westmere */ | |
1281 | case 44: | |
92807ffd | 1282 | nhm_uncore_cpu_init(); |
fcde10e9 YZ |
1283 | break; |
1284 | case 42: /* Sandy Bridge */ | |
9a6bc143 | 1285 | case 58: /* Ivy Bridge */ |
3a999587 AK |
1286 | case 60: /* Haswell */ |
1287 | case 69: /* Haswell */ | |
1288 | case 70: /* Haswell */ | |
1289 | case 61: /* Broadwell */ | |
1290 | case 71: /* Broadwell */ | |
92807ffd | 1291 | snb_uncore_cpu_init(); |
fcde10e9 | 1292 | break; |
80e217e9 | 1293 | case 45: /* Sandy Bridge-EP */ |
8268fdfc | 1294 | snbep_uncore_cpu_init(); |
7c94ee2e | 1295 | break; |
cb37af77 | 1296 | case 46: /* Nehalem-EX */ |
cb37af77 | 1297 | case 47: /* Westmere-EX aka. Xeon E7 */ |
c1e46580 | 1298 | nhmex_uncore_cpu_init(); |
254298c7 | 1299 | break; |
ddcd0973 PZ |
1300 | case 62: /* Ivy Bridge-EP */ |
1301 | ivbep_uncore_cpu_init(); | |
e850f9c3 | 1302 | break; |
e735b9db YZ |
1303 | case 63: /* Haswell-EP */ |
1304 | hswep_uncore_cpu_init(); | |
1305 | break; | |
d6980ef3 | 1306 | case 79: /* BDX-EP */ |
070e9887 KL |
1307 | case 86: /* BDX-DE */ |
1308 | bdx_uncore_cpu_init(); | |
1309 | break; | |
77af0037 HC |
1310 | case 87: /* Knights Landing */ |
1311 | knl_uncore_cpu_init(); | |
1312 | break; | |
087bfbb0 YZ |
1313 | default: |
1314 | return 0; | |
1315 | } | |
1316 | ||
514b2346 | 1317 | ret = uncore_types_init(uncore_msr_uncores); |
087bfbb0 | 1318 | if (ret) |
ffeda003 | 1319 | goto err; |
087bfbb0 | 1320 | return 0; |
ffeda003 TG |
1321 | err: |
1322 | uncore_types_exit(uncore_msr_uncores); | |
1323 | uncore_msr_uncores = empty_uncore; | |
1324 | return ret; | |
087bfbb0 YZ |
1325 | } |
1326 | ||
1327 | static int __init uncore_pmus_register(void) | |
1328 | { | |
1329 | struct intel_uncore_pmu *pmu; | |
1330 | struct intel_uncore_type *type; | |
1331 | int i, j; | |
1332 | ||
514b2346 YZ |
1333 | for (i = 0; uncore_msr_uncores[i]; i++) { |
1334 | type = uncore_msr_uncores[i]; | |
087bfbb0 YZ |
1335 | for (j = 0; j < type->num_boxes; j++) { |
1336 | pmu = &type->pmus[j]; | |
1337 | uncore_pmu_register(pmu); | |
1338 | } | |
1339 | } | |
1340 | ||
1341 | return 0; | |
1342 | } | |
1343 | ||
ef11dadb | 1344 | static void __init uncore_cpumask_init(void) |
411cf180 SE |
1345 | { |
1346 | int cpu; | |
1347 | ||
467a9e16 | 1348 | cpu_notifier_register_begin(); |
411cf180 SE |
1349 | |
1350 | for_each_online_cpu(cpu) { | |
1351 | int i, phys_id = topology_physical_package_id(cpu); | |
1352 | ||
1353 | for_each_cpu(i, &uncore_cpu_mask) { | |
1354 | if (phys_id == topology_physical_package_id(i)) { | |
1355 | phys_id = -1; | |
1356 | break; | |
1357 | } | |
1358 | } | |
1359 | if (phys_id < 0) | |
1360 | continue; | |
1361 | ||
1362 | uncore_cpu_prepare(cpu, phys_id); | |
1363 | uncore_event_init_cpu(cpu); | |
1364 | } | |
1365 | on_each_cpu(uncore_cpu_setup, NULL, 1); | |
1366 | ||
467a9e16 | 1367 | __register_cpu_notifier(&uncore_cpu_nb); |
411cf180 | 1368 | |
467a9e16 | 1369 | cpu_notifier_register_done(); |
411cf180 SE |
1370 | } |
1371 | ||
1372 | ||
087bfbb0 YZ |
1373 | static int __init intel_uncore_init(void) |
1374 | { | |
1375 | int ret; | |
1376 | ||
1377 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) | |
1378 | return -ENODEV; | |
1379 | ||
a05123bd YZ |
1380 | if (cpu_has_hypervisor) |
1381 | return -ENODEV; | |
1382 | ||
14371cce | 1383 | ret = uncore_pci_init(); |
087bfbb0 YZ |
1384 | if (ret) |
1385 | goto fail; | |
14371cce YZ |
1386 | ret = uncore_cpu_init(); |
1387 | if (ret) { | |
1388 | uncore_pci_exit(); | |
1389 | goto fail; | |
1390 | } | |
411cf180 | 1391 | uncore_cpumask_init(); |
087bfbb0 YZ |
1392 | |
1393 | uncore_pmus_register(); | |
1394 | return 0; | |
1395 | fail: | |
1396 | return ret; | |
1397 | } | |
1398 | device_initcall(intel_uncore_init); |