]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/kernel/cpu/perf_event_intel_uncore.c
perf, x86: Fix uncore_types_exit section mismatch
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore.c
CommitLineData
087bfbb0
YZ
1#include "perf_event_intel_uncore.h"
2
3static struct intel_uncore_type *empty_uncore[] = { NULL, };
4static struct intel_uncore_type **msr_uncores = empty_uncore;
14371cce
YZ
5static struct intel_uncore_type **pci_uncores = empty_uncore;
6/* pci bus to socket mapping */
7static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
8
9static DEFINE_RAW_SPINLOCK(uncore_box_lock);
087bfbb0
YZ
10
11/* mask of cpus that collect uncore events */
12static cpumask_t uncore_cpu_mask;
13
14/* constraint for the fixed counter */
15static struct event_constraint constraint_fixed =
16 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
6a67943a
YZ
17static struct event_constraint constraint_empty =
18 EVENT_CONSTRAINT(0, 0, 0);
087bfbb0 19
fcde10e9 20DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
c1ece48c 21DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
fcde10e9
YZ
22DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
23DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
6a67943a 24DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
fcde10e9
YZ
25DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
26DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
27DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
7c94ee2e
YZ
28DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
29DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
30DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
31DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
32DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
6a67943a
YZ
33DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
34DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
35DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
36DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
4f3f713f
YZ
37DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
38DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
39DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
40DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
7c94ee2e 41
254298c7
YZ
42static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
43{
44 u64 count;
45
46 rdmsrl(event->hw.event_base, count);
47
48 return count;
49}
50
51/*
52 * generic get constraint function for shared match/mask registers.
53 */
54static struct event_constraint *
55uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
56{
57 struct intel_uncore_extra_reg *er;
58 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
59 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
60 unsigned long flags;
61 bool ok = false;
62
63 /*
64 * reg->alloc can be set due to existing state, so for fake box we
65 * need to ignore this, otherwise we might fail to allocate proper
66 * fake state for this extra reg constraint.
67 */
68 if (reg1->idx == EXTRA_REG_NONE ||
69 (!uncore_box_is_fake(box) && reg1->alloc))
70 return NULL;
71
72 er = &box->shared_regs[reg1->idx];
73 raw_spin_lock_irqsave(&er->lock, flags);
74 if (!atomic_read(&er->ref) ||
75 (er->config1 == reg1->config && er->config2 == reg2->config)) {
76 atomic_inc(&er->ref);
77 er->config1 = reg1->config;
78 er->config2 = reg2->config;
79 ok = true;
80 }
81 raw_spin_unlock_irqrestore(&er->lock, flags);
82
83 if (ok) {
84 if (!uncore_box_is_fake(box))
85 reg1->alloc = 1;
86 return NULL;
87 }
88
89 return &constraint_empty;
90}
91
92static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
93{
94 struct intel_uncore_extra_reg *er;
95 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
96
97 /*
98 * Only put constraint if extra reg was actually allocated. Also
99 * takes care of event which do not use an extra shared reg.
100 *
101 * Also, if this is a fake box we shouldn't touch any event state
102 * (reg->alloc) and we don't care about leaving inconsistent box
103 * state either since it will be thrown out.
104 */
105 if (uncore_box_is_fake(box) || !reg1->alloc)
106 return;
107
108 er = &box->shared_regs[reg1->idx];
109 atomic_dec(&er->ref);
110 reg1->alloc = 0;
111}
112
7c94ee2e 113/* Sandy Bridge-EP uncore support */
6a67943a
YZ
114static struct intel_uncore_type snbep_uncore_cbox;
115static struct intel_uncore_type snbep_uncore_pcu;
116
7c94ee2e
YZ
117static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
118{
119 struct pci_dev *pdev = box->pci_dev;
120 int box_ctl = uncore_pci_box_ctl(box);
121 u32 config;
122
123 pci_read_config_dword(pdev, box_ctl, &config);
124 config |= SNBEP_PMON_BOX_CTL_FRZ;
125 pci_write_config_dword(pdev, box_ctl, config);
126}
127
128static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
129{
130 struct pci_dev *pdev = box->pci_dev;
131 int box_ctl = uncore_pci_box_ctl(box);
132 u32 config;
133
134 pci_read_config_dword(pdev, box_ctl, &config);
135 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
136 pci_write_config_dword(pdev, box_ctl, config);
137}
138
254298c7 139static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
140{
141 struct pci_dev *pdev = box->pci_dev;
142 struct hw_perf_event *hwc = &event->hw;
143
254298c7 144 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
7c94ee2e
YZ
145}
146
254298c7 147static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
148{
149 struct pci_dev *pdev = box->pci_dev;
150 struct hw_perf_event *hwc = &event->hw;
151
152 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
153}
154
254298c7 155static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
156{
157 struct pci_dev *pdev = box->pci_dev;
158 struct hw_perf_event *hwc = &event->hw;
159 u64 count;
160
161 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
162 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
254298c7 163
7c94ee2e
YZ
164 return count;
165}
166
167static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
168{
169 struct pci_dev *pdev = box->pci_dev;
254298c7
YZ
170
171 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
7c94ee2e
YZ
172}
173
174static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
175{
176 u64 config;
177 unsigned msr;
178
179 msr = uncore_msr_box_ctl(box);
180 if (msr) {
181 rdmsrl(msr, config);
182 config |= SNBEP_PMON_BOX_CTL_FRZ;
183 wrmsrl(msr, config);
7c94ee2e
YZ
184 }
185}
186
187static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
188{
189 u64 config;
190 unsigned msr;
191
192 msr = uncore_msr_box_ctl(box);
193 if (msr) {
194 rdmsrl(msr, config);
195 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
196 wrmsrl(msr, config);
7c94ee2e
YZ
197 }
198}
199
254298c7 200static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
201{
202 struct hw_perf_event *hwc = &event->hw;
6a67943a
YZ
203 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
204
205 if (reg1->idx != EXTRA_REG_NONE)
206 wrmsrl(reg1->reg, reg1->config);
7c94ee2e
YZ
207
208 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
209}
210
211static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
212 struct perf_event *event)
213{
214 struct hw_perf_event *hwc = &event->hw;
215
216 wrmsrl(hwc->config_base, hwc->config);
217}
218
7c94ee2e
YZ
219static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
220{
221 unsigned msr = uncore_msr_box_ctl(box);
254298c7 222
7c94ee2e
YZ
223 if (msr)
224 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
225}
226
254298c7 227static int snbep_uncore_hw_config(struct intel_uncore_box *box, struct perf_event *event)
6a67943a
YZ
228{
229 struct hw_perf_event *hwc = &event->hw;
230 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
231
232 if (box->pmu->type == &snbep_uncore_cbox) {
233 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
234 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
235 reg1->config = event->attr.config1 &
236 SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK;
6a67943a 237 } else {
254298c7
YZ
238 if (box->pmu->type == &snbep_uncore_pcu) {
239 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
240 reg1->config = event->attr.config1 & SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK;
241 } else {
242 return 0;
243 }
6a67943a
YZ
244 }
245 reg1->idx = 0;
254298c7 246
6a67943a
YZ
247 return 0;
248}
249
7c94ee2e
YZ
250static struct attribute *snbep_uncore_formats_attr[] = {
251 &format_attr_event.attr,
252 &format_attr_umask.attr,
253 &format_attr_edge.attr,
254 &format_attr_inv.attr,
255 &format_attr_thresh8.attr,
256 NULL,
257};
258
259static struct attribute *snbep_uncore_ubox_formats_attr[] = {
260 &format_attr_event.attr,
261 &format_attr_umask.attr,
262 &format_attr_edge.attr,
263 &format_attr_inv.attr,
264 &format_attr_thresh5.attr,
265 NULL,
266};
267
6a67943a
YZ
268static struct attribute *snbep_uncore_cbox_formats_attr[] = {
269 &format_attr_event.attr,
270 &format_attr_umask.attr,
271 &format_attr_edge.attr,
272 &format_attr_tid_en.attr,
273 &format_attr_inv.attr,
274 &format_attr_thresh8.attr,
275 &format_attr_filter_tid.attr,
276 &format_attr_filter_nid.attr,
277 &format_attr_filter_state.attr,
278 &format_attr_filter_opc.attr,
279 NULL,
280};
281
7c94ee2e
YZ
282static struct attribute *snbep_uncore_pcu_formats_attr[] = {
283 &format_attr_event.attr,
284 &format_attr_occ_sel.attr,
285 &format_attr_edge.attr,
286 &format_attr_inv.attr,
287 &format_attr_thresh5.attr,
288 &format_attr_occ_invert.attr,
289 &format_attr_occ_edge.attr,
4f3f713f
YZ
290 &format_attr_filter_band0.attr,
291 &format_attr_filter_band1.attr,
292 &format_attr_filter_band2.attr,
293 &format_attr_filter_band3.attr,
7c94ee2e
YZ
294 NULL,
295};
296
c1ece48c
YZ
297static struct attribute *snbep_uncore_qpi_formats_attr[] = {
298 &format_attr_event_ext.attr,
299 &format_attr_umask.attr,
300 &format_attr_edge.attr,
301 &format_attr_inv.attr,
302 &format_attr_thresh8.attr,
303 NULL,
304};
305
7c94ee2e 306static struct uncore_event_desc snbep_uncore_imc_events[] = {
eca26c99 307 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
2992c542
PZ
308 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
309 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
7c94ee2e
YZ
310 { /* end: all zeroes */ },
311};
312
313static struct uncore_event_desc snbep_uncore_qpi_events[] = {
2992c542
PZ
314 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
315 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
316 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"),
317 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"),
7c94ee2e
YZ
318 { /* end: all zeroes */ },
319};
320
321static struct attribute_group snbep_uncore_format_group = {
322 .name = "format",
323 .attrs = snbep_uncore_formats_attr,
324};
325
326static struct attribute_group snbep_uncore_ubox_format_group = {
327 .name = "format",
328 .attrs = snbep_uncore_ubox_formats_attr,
329};
330
6a67943a
YZ
331static struct attribute_group snbep_uncore_cbox_format_group = {
332 .name = "format",
333 .attrs = snbep_uncore_cbox_formats_attr,
334};
335
7c94ee2e
YZ
336static struct attribute_group snbep_uncore_pcu_format_group = {
337 .name = "format",
338 .attrs = snbep_uncore_pcu_formats_attr,
339};
340
c1ece48c
YZ
341static struct attribute_group snbep_uncore_qpi_format_group = {
342 .name = "format",
343 .attrs = snbep_uncore_qpi_formats_attr,
344};
345
7c94ee2e
YZ
346static struct intel_uncore_ops snbep_uncore_msr_ops = {
347 .init_box = snbep_uncore_msr_init_box,
348 .disable_box = snbep_uncore_msr_disable_box,
349 .enable_box = snbep_uncore_msr_enable_box,
350 .disable_event = snbep_uncore_msr_disable_event,
351 .enable_event = snbep_uncore_msr_enable_event,
254298c7
YZ
352 .read_counter = uncore_msr_read_counter,
353 .get_constraint = uncore_get_constraint,
354 .put_constraint = uncore_put_constraint,
6a67943a 355 .hw_config = snbep_uncore_hw_config,
7c94ee2e
YZ
356};
357
358static struct intel_uncore_ops snbep_uncore_pci_ops = {
359 .init_box = snbep_uncore_pci_init_box,
360 .disable_box = snbep_uncore_pci_disable_box,
361 .enable_box = snbep_uncore_pci_enable_box,
362 .disable_event = snbep_uncore_pci_disable_event,
363 .enable_event = snbep_uncore_pci_enable_event,
364 .read_counter = snbep_uncore_pci_read_counter,
365};
366
367static struct event_constraint snbep_uncore_cbox_constraints[] = {
368 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
369 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
370 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
371 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
372 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
373 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
374 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
375 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
376 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
377 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
378 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
379 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
3b19e4c9 380 EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
7c94ee2e
YZ
381 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
382 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
383 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
384 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
385 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
386 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
387 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
388 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
389 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
390 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
391 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
392 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
393 EVENT_CONSTRAINT_END
394};
395
396static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
397 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
398 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
399 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
400 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
401 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
402 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
403 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
404 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
405 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
406 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
407 EVENT_CONSTRAINT_END
408};
409
410static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
411 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
412 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
413 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
414 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
415 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
416 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
417 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
418 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
419 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
420 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
421 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
422 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
423 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
424 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
425 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
426 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
427 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
428 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
429 EVENT_CONSTRAINT_END
430};
431
432static struct intel_uncore_type snbep_uncore_ubox = {
433 .name = "ubox",
434 .num_counters = 2,
435 .num_boxes = 1,
436 .perf_ctr_bits = 44,
437 .fixed_ctr_bits = 48,
438 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
439 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
440 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
441 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
442 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
443 .ops = &snbep_uncore_msr_ops,
444 .format_group = &snbep_uncore_ubox_format_group,
445};
446
447static struct intel_uncore_type snbep_uncore_cbox = {
6a67943a
YZ
448 .name = "cbox",
449 .num_counters = 4,
450 .num_boxes = 8,
451 .perf_ctr_bits = 44,
452 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
453 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
454 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
455 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
456 .msr_offset = SNBEP_CBO_MSR_OFFSET,
457 .num_shared_regs = 1,
458 .constraints = snbep_uncore_cbox_constraints,
459 .ops = &snbep_uncore_msr_ops,
460 .format_group = &snbep_uncore_cbox_format_group,
7c94ee2e
YZ
461};
462
463static struct intel_uncore_type snbep_uncore_pcu = {
6a67943a
YZ
464 .name = "pcu",
465 .num_counters = 4,
466 .num_boxes = 1,
467 .perf_ctr_bits = 48,
468 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
469 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
470 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
471 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
472 .num_shared_regs = 1,
473 .ops = &snbep_uncore_msr_ops,
474 .format_group = &snbep_uncore_pcu_format_group,
7c94ee2e
YZ
475};
476
477static struct intel_uncore_type *snbep_msr_uncores[] = {
478 &snbep_uncore_ubox,
479 &snbep_uncore_cbox,
480 &snbep_uncore_pcu,
481 NULL,
482};
483
484#define SNBEP_UNCORE_PCI_COMMON_INIT() \
485 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
486 .event_ctl = SNBEP_PCI_PMON_CTL0, \
487 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
488 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
489 .ops = &snbep_uncore_pci_ops, \
490 .format_group = &snbep_uncore_format_group
491
492static struct intel_uncore_type snbep_uncore_ha = {
493 .name = "ha",
494 .num_counters = 4,
495 .num_boxes = 1,
496 .perf_ctr_bits = 48,
497 SNBEP_UNCORE_PCI_COMMON_INIT(),
498};
499
500static struct intel_uncore_type snbep_uncore_imc = {
501 .name = "imc",
502 .num_counters = 4,
503 .num_boxes = 4,
504 .perf_ctr_bits = 48,
505 .fixed_ctr_bits = 48,
506 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
507 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
508 .event_descs = snbep_uncore_imc_events,
509 SNBEP_UNCORE_PCI_COMMON_INIT(),
510};
511
512static struct intel_uncore_type snbep_uncore_qpi = {
513 .name = "qpi",
514 .num_counters = 4,
515 .num_boxes = 2,
516 .perf_ctr_bits = 48,
c1ece48c
YZ
517 .perf_ctr = SNBEP_PCI_PMON_CTR0,
518 .event_ctl = SNBEP_PCI_PMON_CTL0,
519 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
520 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
521 .ops = &snbep_uncore_pci_ops,
7c94ee2e 522 .event_descs = snbep_uncore_qpi_events,
c1ece48c 523 .format_group = &snbep_uncore_qpi_format_group,
7c94ee2e
YZ
524};
525
526
527static struct intel_uncore_type snbep_uncore_r2pcie = {
528 .name = "r2pcie",
529 .num_counters = 4,
530 .num_boxes = 1,
531 .perf_ctr_bits = 44,
532 .constraints = snbep_uncore_r2pcie_constraints,
533 SNBEP_UNCORE_PCI_COMMON_INIT(),
534};
535
536static struct intel_uncore_type snbep_uncore_r3qpi = {
537 .name = "r3qpi",
538 .num_counters = 3,
539 .num_boxes = 2,
540 .perf_ctr_bits = 44,
541 .constraints = snbep_uncore_r3qpi_constraints,
542 SNBEP_UNCORE_PCI_COMMON_INIT(),
543};
544
545static struct intel_uncore_type *snbep_pci_uncores[] = {
546 &snbep_uncore_ha,
547 &snbep_uncore_imc,
548 &snbep_uncore_qpi,
549 &snbep_uncore_r2pcie,
550 &snbep_uncore_r3qpi,
551 NULL,
552};
553
554static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
555 { /* Home Agent */
556 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
557 .driver_data = (unsigned long)&snbep_uncore_ha,
558 },
559 { /* MC Channel 0 */
560 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
561 .driver_data = (unsigned long)&snbep_uncore_imc,
562 },
563 { /* MC Channel 1 */
564 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
565 .driver_data = (unsigned long)&snbep_uncore_imc,
566 },
567 { /* MC Channel 2 */
568 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
569 .driver_data = (unsigned long)&snbep_uncore_imc,
570 },
571 { /* MC Channel 3 */
572 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
573 .driver_data = (unsigned long)&snbep_uncore_imc,
574 },
575 { /* QPI Port 0 */
576 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
577 .driver_data = (unsigned long)&snbep_uncore_qpi,
578 },
579 { /* QPI Port 1 */
580 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
581 .driver_data = (unsigned long)&snbep_uncore_qpi,
582 },
583 { /* P2PCIe */
584 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
585 .driver_data = (unsigned long)&snbep_uncore_r2pcie,
586 },
587 { /* R3QPI Link 0 */
588 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
589 .driver_data = (unsigned long)&snbep_uncore_r3qpi,
590 },
591 { /* R3QPI Link 1 */
592 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
593 .driver_data = (unsigned long)&snbep_uncore_r3qpi,
594 },
595 { /* end: all zeroes */ }
596};
597
598static struct pci_driver snbep_uncore_pci_driver = {
599 .name = "snbep_uncore",
600 .id_table = snbep_uncore_pci_ids,
601};
602
603/*
604 * build pci bus to socket mapping
605 */
606static void snbep_pci2phy_map_init(void)
607{
608 struct pci_dev *ubox_dev = NULL;
609 int i, bus, nodeid;
610 u32 config;
611
612 while (1) {
613 /* find the UBOX device */
614 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
615 PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX,
616 ubox_dev);
617 if (!ubox_dev)
618 break;
619 bus = ubox_dev->bus->number;
620 /* get the Node ID of the local register */
621 pci_read_config_dword(ubox_dev, 0x40, &config);
622 nodeid = config;
623 /* get the Node ID mapping */
624 pci_read_config_dword(ubox_dev, 0x54, &config);
625 /*
254298c7
YZ
626 * every three bits in the Node ID mapping register maps
627 * to a particular node.
628 */
629 for (i = 0; i < 8; i++) {
630 if (nodeid == ((config >> (3 * i)) & 0x7)) {
631 pcibus_to_physid[bus] = i;
632 break;
633 }
634 }
635 };
636 return;
637}
638/* end of Sandy Bridge-EP uncore support */
639
640/* Sandy Bridge uncore support */
641static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
642{
643 struct hw_perf_event *hwc = &event->hw;
644
645 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
646 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
647 else
648 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
649}
650
651static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
652{
653 wrmsrl(event->hw.config_base, 0);
654}
655
656static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
657{
658 if (box->pmu->pmu_idx == 0) {
659 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
660 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
661 }
662}
663
664static struct attribute *snb_uncore_formats_attr[] = {
665 &format_attr_event.attr,
666 &format_attr_umask.attr,
667 &format_attr_edge.attr,
668 &format_attr_inv.attr,
669 &format_attr_cmask5.attr,
670 NULL,
671};
672
673static struct attribute_group snb_uncore_format_group = {
674 .name = "format",
675 .attrs = snb_uncore_formats_attr,
676};
677
678static struct intel_uncore_ops snb_uncore_msr_ops = {
679 .init_box = snb_uncore_msr_init_box,
680 .disable_event = snb_uncore_msr_disable_event,
681 .enable_event = snb_uncore_msr_enable_event,
682 .read_counter = uncore_msr_read_counter,
683};
684
685static struct event_constraint snb_uncore_cbox_constraints[] = {
686 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
687 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
688 EVENT_CONSTRAINT_END
689};
690
691static struct intel_uncore_type snb_uncore_cbox = {
692 .name = "cbox",
693 .num_counters = 2,
694 .num_boxes = 4,
695 .perf_ctr_bits = 44,
696 .fixed_ctr_bits = 48,
697 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
698 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
699 .fixed_ctr = SNB_UNC_FIXED_CTR,
700 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
701 .single_fixed = 1,
702 .event_mask = SNB_UNC_RAW_EVENT_MASK,
703 .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
704 .constraints = snb_uncore_cbox_constraints,
705 .ops = &snb_uncore_msr_ops,
706 .format_group = &snb_uncore_format_group,
707};
708
709static struct intel_uncore_type *snb_msr_uncores[] = {
710 &snb_uncore_cbox,
711 NULL,
712};
713/* end of Sandy Bridge uncore support */
714
715/* Nehalem uncore support */
716static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
717{
718 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
719}
720
721static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
722{
723 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
724}
725
726static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
727{
728 struct hw_perf_event *hwc = &event->hw;
729
730 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
731 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
732 else
733 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
734}
735
736static struct attribute *nhm_uncore_formats_attr[] = {
737 &format_attr_event.attr,
738 &format_attr_umask.attr,
739 &format_attr_edge.attr,
740 &format_attr_inv.attr,
741 &format_attr_cmask8.attr,
742 NULL,
743};
744
745static struct attribute_group nhm_uncore_format_group = {
746 .name = "format",
747 .attrs = nhm_uncore_formats_attr,
748};
749
750static struct uncore_event_desc nhm_uncore_events[] = {
751 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
752 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
753 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
754 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
755 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
756 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
757 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
758 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
759 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
760 { /* end: all zeroes */ },
761};
762
763static struct intel_uncore_ops nhm_uncore_msr_ops = {
764 .disable_box = nhm_uncore_msr_disable_box,
765 .enable_box = nhm_uncore_msr_enable_box,
766 .disable_event = snb_uncore_msr_disable_event,
767 .enable_event = nhm_uncore_msr_enable_event,
768 .read_counter = uncore_msr_read_counter,
769};
770
771static struct intel_uncore_type nhm_uncore = {
772 .name = "",
773 .num_counters = 8,
774 .num_boxes = 1,
775 .perf_ctr_bits = 48,
776 .fixed_ctr_bits = 48,
777 .event_ctl = NHM_UNC_PERFEVTSEL0,
778 .perf_ctr = NHM_UNC_UNCORE_PMC0,
779 .fixed_ctr = NHM_UNC_FIXED_CTR,
780 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
781 .event_mask = NHM_UNC_RAW_EVENT_MASK,
782 .event_descs = nhm_uncore_events,
783 .ops = &nhm_uncore_msr_ops,
784 .format_group = &nhm_uncore_format_group,
785};
786
787static struct intel_uncore_type *nhm_msr_uncores[] = {
788 &nhm_uncore,
789 NULL,
790};
791/* end of Nehalem uncore support */
792
793/* Nehalem-EX uncore support */
794#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
795 ((1ULL << (n)) - 1)))
796
797DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
798DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
799DEFINE_UNCORE_FORMAT_ATTR(mm_cfg, mm_cfg, "config:63");
800DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
801DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
802
803static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
804{
805 wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
806}
807
808static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
809{
810 unsigned msr = uncore_msr_box_ctl(box);
811 u64 config;
812
813 if (msr) {
814 rdmsrl(msr, config);
815 config &= ~((1ULL << uncore_num_counters(box)) - 1);
816 /* WBox has a fixed counter */
817 if (uncore_msr_fixed_ctl(box))
818 config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
819 wrmsrl(msr, config);
820 }
821}
822
823static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
824{
825 unsigned msr = uncore_msr_box_ctl(box);
826 u64 config;
827
828 if (msr) {
829 rdmsrl(msr, config);
830 config |= (1ULL << uncore_num_counters(box)) - 1;
831 /* WBox has a fixed counter */
832 if (uncore_msr_fixed_ctl(box))
833 config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
834 wrmsrl(msr, config);
835 }
836}
837
838static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
839{
840 wrmsrl(event->hw.config_base, 0);
841}
842
843static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
844{
845 struct hw_perf_event *hwc = &event->hw;
846
847 if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
848 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
849 else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
850 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
851 else
852 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
853}
854
855#define NHMEX_UNCORE_OPS_COMMON_INIT() \
856 .init_box = nhmex_uncore_msr_init_box, \
857 .disable_box = nhmex_uncore_msr_disable_box, \
858 .enable_box = nhmex_uncore_msr_enable_box, \
859 .disable_event = nhmex_uncore_msr_disable_event, \
860 .read_counter = uncore_msr_read_counter
861
862static struct intel_uncore_ops nhmex_uncore_ops = {
863 NHMEX_UNCORE_OPS_COMMON_INIT(),
864 .enable_event = nhmex_uncore_msr_enable_event,
865};
866
867static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
868 &format_attr_event.attr,
869 &format_attr_edge.attr,
870 NULL,
871};
872
873static struct attribute_group nhmex_uncore_ubox_format_group = {
874 .name = "format",
875 .attrs = nhmex_uncore_ubox_formats_attr,
876};
877
878static struct intel_uncore_type nhmex_uncore_ubox = {
879 .name = "ubox",
880 .num_counters = 1,
881 .num_boxes = 1,
882 .perf_ctr_bits = 48,
883 .event_ctl = NHMEX_U_MSR_PMON_EV_SEL,
884 .perf_ctr = NHMEX_U_MSR_PMON_CTR,
885 .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK,
886 .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL,
887 .ops = &nhmex_uncore_ops,
888 .format_group = &nhmex_uncore_ubox_format_group
889};
890
891static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
892 &format_attr_event.attr,
893 &format_attr_umask.attr,
894 &format_attr_edge.attr,
895 &format_attr_inv.attr,
896 &format_attr_thresh8.attr,
897 NULL,
898};
899
900static struct attribute_group nhmex_uncore_cbox_format_group = {
901 .name = "format",
902 .attrs = nhmex_uncore_cbox_formats_attr,
903};
904
905static struct intel_uncore_type nhmex_uncore_cbox = {
906 .name = "cbox",
907 .num_counters = 6,
908 .num_boxes = 8,
909 .perf_ctr_bits = 48,
910 .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0,
911 .perf_ctr = NHMEX_C0_MSR_PMON_CTR0,
912 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
913 .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
914 .msr_offset = NHMEX_C_MSR_OFFSET,
915 .pair_ctr_ctl = 1,
916 .ops = &nhmex_uncore_ops,
917 .format_group = &nhmex_uncore_cbox_format_group
918};
919
920static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
921 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
922 { /* end: all zeroes */ },
923};
924
925static struct intel_uncore_type nhmex_uncore_wbox = {
926 .name = "wbox",
927 .num_counters = 4,
928 .num_boxes = 1,
929 .perf_ctr_bits = 48,
930 .event_ctl = NHMEX_W_MSR_PMON_CNT0,
931 .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0,
932 .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR,
933 .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL,
934 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
935 .box_ctl = NHMEX_W_MSR_GLOBAL_CTL,
936 .pair_ctr_ctl = 1,
937 .event_descs = nhmex_uncore_wbox_events,
938 .ops = &nhmex_uncore_ops,
939 .format_group = &nhmex_uncore_cbox_format_group
940};
941
942static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
943{
944 struct hw_perf_event *hwc = &event->hw;
945 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
946 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
947 int ctr, ev_sel;
948
949 ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
950 NHMEX_B_PMON_CTR_SHIFT;
951 ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
952 NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
953
954 /* events that do not use the match/mask registers */
955 if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
956 (ctr == 2 && ev_sel != 0x4) || ctr == 3)
957 return 0;
958
959 if (box->pmu->pmu_idx == 0)
960 reg1->reg = NHMEX_B0_MSR_MATCH;
961 else
962 reg1->reg = NHMEX_B1_MSR_MATCH;
963 reg1->idx = 0;
964 reg1->config = event->attr.config1;
965 reg2->config = event->attr.config2;
966 return 0;
967}
968
969static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
970{
971 struct hw_perf_event *hwc = &event->hw;
972 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
973 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
974
975 if (reg1->idx != EXTRA_REG_NONE) {
976 wrmsrl(reg1->reg, reg1->config);
977 wrmsrl(reg1->reg + 1, reg2->config);
978 }
979 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
980 (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
981}
982
983/*
984 * The Bbox has 4 counters, but each counter monitors different events.
985 * Use bits 6-7 in the event config to select counter.
986 */
987static struct event_constraint nhmex_uncore_bbox_constraints[] = {
988 EVENT_CONSTRAINT(0 , 1, 0xc0),
989 EVENT_CONSTRAINT(0x40, 2, 0xc0),
990 EVENT_CONSTRAINT(0x80, 4, 0xc0),
991 EVENT_CONSTRAINT(0xc0, 8, 0xc0),
992 EVENT_CONSTRAINT_END,
993};
994
995static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
996 &format_attr_event5.attr,
997 &format_attr_counter.attr,
998 &format_attr_match.attr,
999 &format_attr_mask.attr,
1000 NULL,
1001};
1002
1003static struct attribute_group nhmex_uncore_bbox_format_group = {
1004 .name = "format",
1005 .attrs = nhmex_uncore_bbox_formats_attr,
1006};
1007
1008static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
1009 NHMEX_UNCORE_OPS_COMMON_INIT(),
1010 .enable_event = nhmex_bbox_msr_enable_event,
1011 .hw_config = nhmex_bbox_hw_config,
1012 .get_constraint = uncore_get_constraint,
1013 .put_constraint = uncore_put_constraint,
1014};
1015
1016static struct intel_uncore_type nhmex_uncore_bbox = {
1017 .name = "bbox",
1018 .num_counters = 4,
1019 .num_boxes = 2,
1020 .perf_ctr_bits = 48,
1021 .event_ctl = NHMEX_B0_MSR_PMON_CTL0,
1022 .perf_ctr = NHMEX_B0_MSR_PMON_CTR0,
1023 .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK,
1024 .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL,
1025 .msr_offset = NHMEX_B_MSR_OFFSET,
1026 .pair_ctr_ctl = 1,
1027 .num_shared_regs = 1,
1028 .constraints = nhmex_uncore_bbox_constraints,
1029 .ops = &nhmex_uncore_bbox_ops,
1030 .format_group = &nhmex_uncore_bbox_format_group
1031};
1032
1033static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1034{
1035 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1036 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1037
1038 if (event->attr.config & NHMEX_S_PMON_MM_CFG_EN) {
1039 reg1->config = event->attr.config1;
1040 reg2->config = event->attr.config2;
1041 } else {
1042 reg1->config = ~0ULL;
1043 reg2->config = ~0ULL;
1044 }
1045
1046 if (box->pmu->pmu_idx == 0)
1047 reg1->reg = NHMEX_S0_MSR_MM_CFG;
1048 else
1049 reg1->reg = NHMEX_S1_MSR_MM_CFG;
1050
1051 reg1->idx = 0;
1052
1053 return 0;
1054}
1055
1056static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1057{
1058 struct hw_perf_event *hwc = &event->hw;
1059 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1060 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1061
1062 wrmsrl(reg1->reg, 0);
1063 if (reg1->config != ~0ULL || reg2->config != ~0ULL) {
1064 wrmsrl(reg1->reg + 1, reg1->config);
1065 wrmsrl(reg1->reg + 2, reg2->config);
1066 wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
1067 }
1068 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
1069}
1070
1071static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
1072 &format_attr_event.attr,
1073 &format_attr_umask.attr,
1074 &format_attr_edge.attr,
1075 &format_attr_inv.attr,
1076 &format_attr_thresh8.attr,
1077 &format_attr_mm_cfg.attr,
1078 &format_attr_match.attr,
1079 &format_attr_mask.attr,
1080 NULL,
1081};
1082
1083static struct attribute_group nhmex_uncore_sbox_format_group = {
1084 .name = "format",
1085 .attrs = nhmex_uncore_sbox_formats_attr,
1086};
1087
1088static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
1089 NHMEX_UNCORE_OPS_COMMON_INIT(),
1090 .enable_event = nhmex_sbox_msr_enable_event,
1091 .hw_config = nhmex_sbox_hw_config,
1092 .get_constraint = uncore_get_constraint,
1093 .put_constraint = uncore_put_constraint,
1094};
1095
1096static struct intel_uncore_type nhmex_uncore_sbox = {
1097 .name = "sbox",
1098 .num_counters = 4,
1099 .num_boxes = 2,
1100 .perf_ctr_bits = 48,
1101 .event_ctl = NHMEX_S0_MSR_PMON_CTL0,
1102 .perf_ctr = NHMEX_S0_MSR_PMON_CTR0,
1103 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
1104 .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL,
1105 .msr_offset = NHMEX_S_MSR_OFFSET,
1106 .pair_ctr_ctl = 1,
1107 .num_shared_regs = 1,
1108 .ops = &nhmex_uncore_sbox_ops,
1109 .format_group = &nhmex_uncore_sbox_format_group
1110};
1111
1112enum {
1113 EXTRA_REG_NHMEX_M_FILTER,
1114 EXTRA_REG_NHMEX_M_DSP,
1115 EXTRA_REG_NHMEX_M_ISS,
1116 EXTRA_REG_NHMEX_M_MAP,
1117 EXTRA_REG_NHMEX_M_MSC_THR,
1118 EXTRA_REG_NHMEX_M_PGT,
1119 EXTRA_REG_NHMEX_M_PLD,
1120 EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
1121};
1122
1123static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
1124 MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
1125 MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
1126 MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
1127 MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
1128 /* event 0xa uses two extra registers */
1129 MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
1130 MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
1131 MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
1132 /* events 0xd ~ 0x10 use the same extra register */
1133 MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
1134 MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
1135 MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
1136 MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
1137 MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
1138 MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
1139 MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
1140 MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
1141 MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
1142 EVENT_EXTRA_END
1143};
1144
1145static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
1146{
1147 struct intel_uncore_extra_reg *er;
1148 unsigned long flags;
1149 bool ret = false;
1150 u64 mask;
1151
1152 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
1153 er = &box->shared_regs[idx];
1154 raw_spin_lock_irqsave(&er->lock, flags);
1155 if (!atomic_read(&er->ref) || er->config == config) {
1156 atomic_inc(&er->ref);
1157 er->config = config;
1158 ret = true;
1159 }
1160 raw_spin_unlock_irqrestore(&er->lock, flags);
1161
1162 return ret;
1163 }
1164 /*
1165 * The ZDP_CTL_FVC MSR has 4 fields which are used to control
1166 * events 0xd ~ 0x10. Besides these 4 fields, there are additional
1167 * fields which are shared.
1168 */
1169 idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1170 if (WARN_ON_ONCE(idx >= 4))
1171 return false;
1172
1173 /* mask of the shared fields */
1174 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
1175 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
1176
1177 raw_spin_lock_irqsave(&er->lock, flags);
1178 /* add mask of the non-shared field if it's in use */
1179 if (__BITS_VALUE(atomic_read(&er->ref), idx, 8))
1180 mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1181
1182 if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
1183 atomic_add(1 << (idx * 8), &er->ref);
1184 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
1185 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1186 er->config &= ~mask;
1187 er->config |= (config & mask);
1188 ret = true;
1189 }
1190 raw_spin_unlock_irqrestore(&er->lock, flags);
1191
1192 return ret;
1193}
1194
1195static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
1196{
1197 struct intel_uncore_extra_reg *er;
1198
1199 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
1200 er = &box->shared_regs[idx];
1201 atomic_dec(&er->ref);
1202 return;
1203 }
1204
1205 idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1206 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
1207 atomic_sub(1 << (idx * 8), &er->ref);
1208}
1209
1210u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
1211{
1212 struct hw_perf_event *hwc = &event->hw;
1213 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1214 int idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
1215 u64 config = reg1->config;
1216
1217 /* get the non-shared control bits and shift them */
1218 idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1219 config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1220 if (new_idx > orig_idx) {
1221 idx = new_idx - orig_idx;
1222 config <<= 3 * idx;
1223 } else {
1224 idx = orig_idx - new_idx;
1225 config >>= 3 * idx;
1226 }
1227
1228 /* add the shared control bits back */
1229 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
1230 if (modify) {
1231 /* adjust the main event selector */
1232 if (new_idx > orig_idx)
1233 hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
1234 else
1235 hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
1236 reg1->config = config;
1237 reg1->idx = ~0xff | new_idx;
1238 }
1239 return config;
1240}
1241
1242static struct event_constraint *
1243nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1244{
1245 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1246 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1247 int i, idx[2], alloc = 0;
1248 u64 config1 = reg1->config;
1249
1250 idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
1251 idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
1252again:
1253 for (i = 0; i < 2; i++) {
1254 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
1255 idx[i] = 0xff;
1256
1257 if (idx[i] == 0xff)
1258 continue;
1259
1260 if (!nhmex_mbox_get_shared_reg(box, idx[i],
1261 __BITS_VALUE(config1, i, 32)))
1262 goto fail;
1263 alloc |= (0x1 << i);
1264 }
1265
1266 /* for the match/mask registers */
1267 if ((uncore_box_is_fake(box) || !reg2->alloc) &&
1268 !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
1269 goto fail;
1270
1271 /*
1272 * If it's a fake box -- as per validate_{group,event}() we
1273 * shouldn't touch event state and we can avoid doing so
1274 * since both will only call get_event_constraints() once
1275 * on each event, this avoids the need for reg->alloc.
1276 */
1277 if (!uncore_box_is_fake(box)) {
1278 if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
1279 nhmex_mbox_alter_er(event, idx[0], true);
1280 reg1->alloc |= alloc;
1281 reg2->alloc = 1;
1282 }
1283 return NULL;
1284fail:
1285 if (idx[0] != 0xff && !(alloc & 0x1) &&
1286 idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
1287 /*
1288 * events 0xd ~ 0x10 are functional identical, but are
1289 * controlled by different fields in the ZDP_CTL_FVC
1290 * register. If we failed to take one field, try the
1291 * rest 3 choices.
7c94ee2e 1292 */
254298c7
YZ
1293 BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
1294 idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1295 idx[0] = (idx[0] + 1) % 4;
1296 idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1297 if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
1298 config1 = nhmex_mbox_alter_er(event, idx[0], false);
1299 goto again;
7c94ee2e 1300 }
254298c7 1301 }
7c94ee2e 1302
254298c7
YZ
1303 if (alloc & 0x1)
1304 nhmex_mbox_put_shared_reg(box, idx[0]);
1305 if (alloc & 0x2)
1306 nhmex_mbox_put_shared_reg(box, idx[1]);
1307 return &constraint_empty;
1308}
fcde10e9 1309
254298c7 1310static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 1311{
254298c7
YZ
1312 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1313 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
fcde10e9 1314
254298c7
YZ
1315 if (uncore_box_is_fake(box))
1316 return;
1317
1318 if (reg1->alloc & 0x1)
1319 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
1320 if (reg1->alloc & 0x2)
1321 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
1322 reg1->alloc = 0;
1323
1324 if (reg2->alloc) {
1325 nhmex_mbox_put_shared_reg(box, reg2->idx);
1326 reg2->alloc = 0;
1327 }
fcde10e9
YZ
1328}
1329
254298c7 1330static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
fcde10e9 1331{
254298c7
YZ
1332 if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
1333 return er->idx;
1334 return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
fcde10e9
YZ
1335}
1336
254298c7 1337static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 1338{
254298c7
YZ
1339 struct intel_uncore_type *type = box->pmu->type;
1340 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1341 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1342 struct extra_reg *er;
1343 unsigned msr;
1344 int reg_idx = 0;
1345
1346 if (WARN_ON_ONCE(reg1->idx != -1))
1347 return -EINVAL;
1348 /*
1349 * The mbox events may require 2 extra MSRs at the most. But only
1350 * the lower 32 bits in these MSRs are significant, so we can use
1351 * config1 to pass two MSRs' config.
1352 */
1353 for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
1354 if (er->event != (event->hw.config & er->config_mask))
1355 continue;
1356 if (event->attr.config1 & ~er->valid_mask)
1357 return -EINVAL;
1358 if (er->idx == __BITS_VALUE(reg1->idx, 0, 8) ||
1359 er->idx == __BITS_VALUE(reg1->idx, 1, 8))
1360 continue;
1361 if (WARN_ON_ONCE(reg_idx >= 2))
1362 return -EINVAL;
1363
1364 msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
1365 if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
1366 return -EINVAL;
1367
1368 /* always use the 32~63 bits to pass the PLD config */
1369 if (er->idx == EXTRA_REG_NHMEX_M_PLD)
1370 reg_idx = 1;
1371
1372 reg1->idx &= ~(0xff << (reg_idx * 8));
1373 reg1->reg &= ~(0xffff << (reg_idx * 16));
1374 reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
1375 reg1->reg |= msr << (reg_idx * 16);
1376 reg1->config = event->attr.config1;
1377 reg_idx++;
1378 }
1379 /* use config2 to pass the filter config */
1380 reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
1381 if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
1382 reg2->config = event->attr.config2;
1383 else
1384 reg2->config = ~0ULL;
1385 if (box->pmu->pmu_idx == 0)
1386 reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
1387 else
1388 reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
1389
1390 return 0;
fcde10e9
YZ
1391}
1392
254298c7 1393static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
fcde10e9 1394{
254298c7
YZ
1395 struct intel_uncore_extra_reg *er;
1396 unsigned long flags;
1397 u64 config;
1398
1399 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
1400 return box->shared_regs[idx].config;
1401
1402 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
1403 raw_spin_lock_irqsave(&er->lock, flags);
1404 config = er->config;
1405 raw_spin_unlock_irqrestore(&er->lock, flags);
1406 return config;
1407}
1408
1409static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1410{
1411 struct hw_perf_event *hwc = &event->hw;
1412 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1413 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1414 int idx;
1415
1416 idx = __BITS_VALUE(reg1->idx, 0, 8);
1417 if (idx != 0xff)
1418 wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
1419 nhmex_mbox_shared_reg_config(box, idx));
1420 idx = __BITS_VALUE(reg1->idx, 1, 8);
1421 if (idx != 0xff)
1422 wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
1423 nhmex_mbox_shared_reg_config(box, idx));
1424
1425 wrmsrl(reg2->reg, 0);
1426 if (reg2->config != ~0ULL) {
1427 wrmsrl(reg2->reg + 1,
1428 reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
1429 wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
1430 (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
1431 wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
fcde10e9 1432 }
254298c7
YZ
1433
1434 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
fcde10e9
YZ
1435}
1436
254298c7
YZ
1437DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3");
1438DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5");
1439DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6");
1440DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7");
1441DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13");
1442DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21");
1443DEFINE_UNCORE_FORMAT_ATTR(filter_cfg, filter_cfg, "config2:63");
1444DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33");
1445DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61");
1446DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31");
1447DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31");
1448DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31");
1449DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
1450DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31");
1451DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31");
1452DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63");
1453
1454static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
1455 &format_attr_count_mode.attr,
1456 &format_attr_storage_mode.attr,
1457 &format_attr_wrap_mode.attr,
1458 &format_attr_flag_mode.attr,
1459 &format_attr_inc_sel.attr,
1460 &format_attr_set_flag_sel.attr,
1461 &format_attr_filter_cfg.attr,
1462 &format_attr_filter_match.attr,
1463 &format_attr_filter_mask.attr,
1464 &format_attr_dsp.attr,
1465 &format_attr_thr.attr,
1466 &format_attr_fvc.attr,
1467 &format_attr_pgt.attr,
1468 &format_attr_map.attr,
1469 &format_attr_iss.attr,
1470 &format_attr_pld.attr,
fcde10e9
YZ
1471 NULL,
1472};
1473
254298c7
YZ
1474static struct attribute_group nhmex_uncore_mbox_format_group = {
1475 .name = "format",
1476 .attrs = nhmex_uncore_mbox_formats_attr,
fcde10e9
YZ
1477};
1478
254298c7
YZ
1479static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
1480 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
1481 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
1482 { /* end: all zeroes */ },
fcde10e9
YZ
1483};
1484
254298c7
YZ
1485static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
1486 NHMEX_UNCORE_OPS_COMMON_INIT(),
1487 .enable_event = nhmex_mbox_msr_enable_event,
1488 .hw_config = nhmex_mbox_hw_config,
1489 .get_constraint = nhmex_mbox_get_constraint,
1490 .put_constraint = nhmex_mbox_put_constraint,
fcde10e9
YZ
1491};
1492
254298c7
YZ
1493static struct intel_uncore_type nhmex_uncore_mbox = {
1494 .name = "mbox",
1495 .num_counters = 6,
1496 .num_boxes = 2,
1497 .perf_ctr_bits = 48,
1498 .event_ctl = NHMEX_M0_MSR_PMU_CTL0,
1499 .perf_ctr = NHMEX_M0_MSR_PMU_CNT0,
1500 .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK,
1501 .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL,
1502 .msr_offset = NHMEX_M_MSR_OFFSET,
1503 .pair_ctr_ctl = 1,
1504 .num_shared_regs = 8,
1505 .event_descs = nhmex_uncore_mbox_events,
1506 .ops = &nhmex_uncore_mbox_ops,
1507 .format_group = &nhmex_uncore_mbox_format_group,
fcde10e9
YZ
1508};
1509
254298c7
YZ
1510void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
1511{
1512 struct hw_perf_event *hwc = &event->hw;
1513 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1514 int port;
fcde10e9 1515
254298c7
YZ
1516 /* adjust the main event selector */
1517 if (reg1->idx % 2) {
1518 reg1->idx--;
1519 hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
1520 } else {
1521 reg1->idx++;
1522 hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
1523 }
1524
1525 /* adjust address or config of extra register */
1526 port = reg1->idx / 6 + box->pmu->pmu_idx * 4;
1527 switch (reg1->idx % 6) {
1528 case 0:
1529 reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port);
1530 break;
1531 case 1:
1532 reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port);
1533 break;
1534 case 2:
1535 /* the 8~15 bits to the 0~7 bits */
1536 reg1->config >>= 8;
1537 break;
1538 case 3:
1539 /* the 0~7 bits to the 8~15 bits */
1540 reg1->config <<= 8;
1541 break;
1542 case 4:
1543 reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port);
1544 break;
1545 case 5:
1546 reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port);
1547 break;
1548 };
1549}
1550
1551/*
1552 * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
1553 * An event set consists of 6 events, the 3rd and 4th events in
1554 * an event set use the same extra register. So an event set uses
1555 * 5 extra registers.
1556 */
1557static struct event_constraint *
1558nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 1559{
254298c7
YZ
1560 struct hw_perf_event *hwc = &event->hw;
1561 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1562 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1563 struct intel_uncore_extra_reg *er;
1564 unsigned long flags;
1565 int idx, er_idx;
1566 u64 config1;
1567 bool ok = false;
1568
1569 if (!uncore_box_is_fake(box) && reg1->alloc)
1570 return NULL;
1571
1572 idx = reg1->idx % 6;
1573 config1 = reg1->config;
1574again:
1575 er_idx = idx;
1576 /* the 3rd and 4th events use the same extra register */
1577 if (er_idx > 2)
1578 er_idx--;
1579 er_idx += (reg1->idx / 6) * 5;
1580
1581 er = &box->shared_regs[er_idx];
1582 raw_spin_lock_irqsave(&er->lock, flags);
1583 if (idx < 2) {
1584 if (!atomic_read(&er->ref) || er->config == reg1->config) {
1585 atomic_inc(&er->ref);
1586 er->config = reg1->config;
1587 ok = true;
1588 }
1589 } else if (idx == 2 || idx == 3) {
1590 /*
1591 * these two events use different fields in a extra register,
1592 * the 0~7 bits and the 8~15 bits respectively.
1593 */
1594 u64 mask = 0xff << ((idx - 2) * 8);
1595 if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
1596 !((er->config ^ config1) & mask)) {
1597 atomic_add(1 << ((idx - 2) * 8), &er->ref);
1598 er->config &= ~mask;
1599 er->config |= config1 & mask;
1600 ok = true;
1601 }
1602 } else {
1603 if (!atomic_read(&er->ref) ||
1604 (er->config == (hwc->config >> 32) &&
1605 er->config1 == reg1->config &&
1606 er->config2 == reg2->config)) {
1607 atomic_inc(&er->ref);
1608 er->config = (hwc->config >> 32);
1609 er->config1 = reg1->config;
1610 er->config2 = reg2->config;
1611 ok = true;
1612 }
1613 }
1614 raw_spin_unlock_irqrestore(&er->lock, flags);
1615
1616 if (!ok) {
1617 /*
1618 * The Rbox events are always in pairs. The paired
1619 * events are functional identical, but use different
1620 * extra registers. If we failed to take an extra
1621 * register, try the alternative.
1622 */
1623 if (idx % 2)
1624 idx--;
1625 else
1626 idx++;
1627 if (idx != reg1->idx % 6) {
1628 if (idx == 2)
1629 config1 >>= 8;
1630 else if (idx == 3)
1631 config1 <<= 8;
1632 goto again;
1633 }
1634 } else {
1635 if (!uncore_box_is_fake(box)) {
1636 if (idx != reg1->idx % 6)
1637 nhmex_rbox_alter_er(box, event);
1638 reg1->alloc = 1;
1639 }
1640 return NULL;
1641 }
1642 return &constraint_empty;
fcde10e9
YZ
1643}
1644
254298c7 1645static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 1646{
254298c7
YZ
1647 struct intel_uncore_extra_reg *er;
1648 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1649 int idx, er_idx;
1650
1651 if (uncore_box_is_fake(box) || !reg1->alloc)
1652 return;
1653
1654 idx = reg1->idx % 6;
1655 er_idx = idx;
1656 if (er_idx > 2)
1657 er_idx--;
1658 er_idx += (reg1->idx / 6) * 5;
1659
1660 er = &box->shared_regs[er_idx];
1661 if (idx == 2 || idx == 3)
1662 atomic_sub(1 << ((idx - 2) * 8), &er->ref);
1663 else
1664 atomic_dec(&er->ref);
1665
1666 reg1->alloc = 0;
fcde10e9
YZ
1667}
1668
254298c7 1669static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9
YZ
1670{
1671 struct hw_perf_event *hwc = &event->hw;
254298c7
YZ
1672 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1673 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1674 int port, idx;
fcde10e9 1675
254298c7
YZ
1676 idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
1677 NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
1678 if (idx >= 0x18)
1679 return -EINVAL;
1680
1681 reg1->idx = idx;
1682 reg1->config = event->attr.config1;
1683
1684 port = idx / 6 + box->pmu->pmu_idx * 4;
1685 idx %= 6;
1686 switch (idx) {
1687 case 0:
1688 reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port);
1689 break;
1690 case 1:
1691 reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port);
1692 break;
1693 case 2:
1694 case 3:
1695 reg1->reg = NHMEX_R_MSR_PORTN_QLX_CFG(port);
1696 break;
1697 case 4:
1698 case 5:
1699 if (idx == 4)
1700 reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port);
1701 else
1702 reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port);
1703 reg2->config = event->attr.config2;
1704 hwc->config |= event->attr.config & (~0ULL << 32);
1705 break;
1706 };
1707 return 0;
fcde10e9
YZ
1708}
1709
254298c7
YZ
1710static u64 nhmex_rbox_shared_reg_config(struct intel_uncore_box *box, int idx)
1711{
1712 struct intel_uncore_extra_reg *er;
1713 unsigned long flags;
1714 u64 config;
1715
1716 er = &box->shared_regs[idx];
1717
1718 raw_spin_lock_irqsave(&er->lock, flags);
1719 config = er->config;
1720 raw_spin_unlock_irqrestore(&er->lock, flags);
1721
1722 return config;
1723}
1724
1725static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1726{
1727 struct hw_perf_event *hwc = &event->hw;
1728 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1729 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1730 int idx, er_idx;
1731
1732 idx = reg1->idx % 6;
1733 er_idx = idx;
1734 if (er_idx > 2)
1735 er_idx--;
1736 er_idx += (reg1->idx / 6) * 5;
1737
1738 switch (idx) {
1739 case 0:
1740 case 1:
1741 wrmsrl(reg1->reg, reg1->config);
1742 break;
1743 case 2:
1744 case 3:
1745 wrmsrl(reg1->reg, nhmex_rbox_shared_reg_config(box, er_idx));
1746 break;
1747 case 4:
1748 case 5:
1749 wrmsrl(reg1->reg, reg1->config);
1750 wrmsrl(reg1->reg + 1, hwc->config >> 32);
1751 wrmsrl(reg1->reg + 2, reg2->config);
1752 break;
1753 };
1754
1755 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
1756 (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
1757}
1758
1759DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config:32-63");
1760DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config1:0-63");
1761DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
1762DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
1763DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
1764
1765static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
1766 &format_attr_event5.attr,
1767 &format_attr_xbr_mm_cfg.attr,
1768 &format_attr_xbr_match.attr,
1769 &format_attr_xbr_mask.attr,
1770 &format_attr_qlx_cfg.attr,
1771 &format_attr_iperf_cfg.attr,
fcde10e9
YZ
1772 NULL,
1773};
1774
254298c7 1775static struct attribute_group nhmex_uncore_rbox_format_group = {
fcde10e9 1776 .name = "format",
254298c7 1777 .attrs = nhmex_uncore_rbox_formats_attr,
fcde10e9
YZ
1778};
1779
254298c7
YZ
1780static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
1781 INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"),
1782 INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"),
1783 INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"),
1784 INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"),
1785 INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"),
1786 INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"),
fcde10e9
YZ
1787 { /* end: all zeroes */ },
1788};
1789
254298c7
YZ
1790static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
1791 NHMEX_UNCORE_OPS_COMMON_INIT(),
1792 .enable_event = nhmex_rbox_msr_enable_event,
1793 .hw_config = nhmex_rbox_hw_config,
1794 .get_constraint = nhmex_rbox_get_constraint,
1795 .put_constraint = nhmex_rbox_put_constraint,
fcde10e9
YZ
1796};
1797
254298c7
YZ
1798static struct intel_uncore_type nhmex_uncore_rbox = {
1799 .name = "rbox",
1800 .num_counters = 8,
1801 .num_boxes = 2,
1802 .perf_ctr_bits = 48,
1803 .event_ctl = NHMEX_R_MSR_PMON_CTL0,
1804 .perf_ctr = NHMEX_R_MSR_PMON_CNT0,
1805 .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK,
1806 .box_ctl = NHMEX_R_MSR_GLOBAL_CTL,
1807 .msr_offset = NHMEX_R_MSR_OFFSET,
1808 .pair_ctr_ctl = 1,
1809 .num_shared_regs = 20,
1810 .event_descs = nhmex_uncore_rbox_events,
1811 .ops = &nhmex_uncore_rbox_ops,
1812 .format_group = &nhmex_uncore_rbox_format_group
fcde10e9
YZ
1813};
1814
254298c7
YZ
1815static struct intel_uncore_type *nhmex_msr_uncores[] = {
1816 &nhmex_uncore_ubox,
1817 &nhmex_uncore_cbox,
1818 &nhmex_uncore_bbox,
1819 &nhmex_uncore_sbox,
1820 &nhmex_uncore_mbox,
1821 &nhmex_uncore_rbox,
1822 &nhmex_uncore_wbox,
fcde10e9
YZ
1823 NULL,
1824};
254298c7 1825/* end of Nehalem-EX uncore support */
fcde10e9 1826
254298c7 1827static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
087bfbb0
YZ
1828{
1829 struct hw_perf_event *hwc = &event->hw;
1830
1831 hwc->idx = idx;
1832 hwc->last_tag = ++box->tags[idx];
1833
1834 if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
14371cce
YZ
1835 hwc->event_base = uncore_fixed_ctr(box);
1836 hwc->config_base = uncore_fixed_ctl(box);
087bfbb0
YZ
1837 return;
1838 }
1839
14371cce
YZ
1840 hwc->config_base = uncore_event_ctl(box, hwc->idx);
1841 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
087bfbb0
YZ
1842}
1843
254298c7 1844static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
087bfbb0
YZ
1845{
1846 u64 prev_count, new_count, delta;
1847 int shift;
1848
1849 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
1850 shift = 64 - uncore_fixed_ctr_bits(box);
1851 else
1852 shift = 64 - uncore_perf_ctr_bits(box);
1853
1854 /* the hrtimer might modify the previous event value */
1855again:
1856 prev_count = local64_read(&event->hw.prev_count);
1857 new_count = uncore_read_counter(box, event);
1858 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
1859 goto again;
1860
1861 delta = (new_count << shift) - (prev_count << shift);
1862 delta >>= shift;
1863
1864 local64_add(delta, &event->count);
1865}
1866
1867/*
1868 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
1869 * for SandyBridge. So we use hrtimer to periodically poll the counter
1870 * to avoid overflow.
1871 */
1872static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
1873{
1874 struct intel_uncore_box *box;
1875 unsigned long flags;
1876 int bit;
1877
1878 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
1879 if (!box->n_active || box->cpu != smp_processor_id())
1880 return HRTIMER_NORESTART;
1881 /*
1882 * disable local interrupt to prevent uncore_pmu_event_start/stop
1883 * to interrupt the update process
1884 */
1885 local_irq_save(flags);
1886
1887 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
1888 uncore_perf_event_update(box, box->events[bit]);
1889
1890 local_irq_restore(flags);
1891
1892 hrtimer_forward_now(hrtimer, ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL));
1893 return HRTIMER_RESTART;
1894}
1895
1896static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
1897{
1898 __hrtimer_start_range_ns(&box->hrtimer,
1899 ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL), 0,
1900 HRTIMER_MODE_REL_PINNED, 0);
1901}
1902
1903static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
1904{
1905 hrtimer_cancel(&box->hrtimer);
1906}
1907
1908static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
1909{
1910 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1911 box->hrtimer.function = uncore_pmu_hrtimer;
1912}
1913
254298c7 1914struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu)
087bfbb0
YZ
1915{
1916 struct intel_uncore_box *box;
6a67943a 1917 int i, size;
087bfbb0 1918
254298c7 1919 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
6a67943a
YZ
1920
1921 box = kmalloc_node(size, GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu));
087bfbb0
YZ
1922 if (!box)
1923 return NULL;
1924
6a67943a
YZ
1925 for (i = 0; i < type->num_shared_regs; i++)
1926 raw_spin_lock_init(&box->shared_regs[i].lock);
1927
087bfbb0
YZ
1928 uncore_pmu_init_hrtimer(box);
1929 atomic_set(&box->refcnt, 1);
1930 box->cpu = -1;
1931 box->phys_id = -1;
1932
1933 return box;
1934}
1935
1936static struct intel_uncore_box *
1937uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
1938{
14371cce
YZ
1939 static struct intel_uncore_box *box;
1940
1941 box = *per_cpu_ptr(pmu->box, cpu);
1942 if (box)
1943 return box;
1944
1945 raw_spin_lock(&uncore_box_lock);
1946 list_for_each_entry(box, &pmu->box_list, list) {
1947 if (box->phys_id == topology_physical_package_id(cpu)) {
1948 atomic_inc(&box->refcnt);
1949 *per_cpu_ptr(pmu->box, cpu) = box;
1950 break;
1951 }
1952 }
1953 raw_spin_unlock(&uncore_box_lock);
1954
087bfbb0
YZ
1955 return *per_cpu_ptr(pmu->box, cpu);
1956}
1957
1958static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
1959{
1960 return container_of(event->pmu, struct intel_uncore_pmu, pmu);
1961}
1962
1963static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
1964{
1965 /*
1966 * perf core schedules event on the basis of cpu, uncore events are
1967 * collected by one of the cpus inside a physical package.
1968 */
254298c7 1969 return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
087bfbb0
YZ
1970}
1971
254298c7
YZ
1972static int
1973uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
087bfbb0
YZ
1974{
1975 struct perf_event *event;
1976 int n, max_count;
1977
1978 max_count = box->pmu->type->num_counters;
1979 if (box->pmu->type->fixed_ctl)
1980 max_count++;
1981
1982 if (box->n_events >= max_count)
1983 return -EINVAL;
1984
1985 n = box->n_events;
1986 box->event_list[n] = leader;
1987 n++;
1988 if (!dogrp)
1989 return n;
1990
1991 list_for_each_entry(event, &leader->sibling_list, group_entry) {
1992 if (event->state <= PERF_EVENT_STATE_OFF)
1993 continue;
1994
1995 if (n >= max_count)
1996 return -EINVAL;
1997
1998 box->event_list[n] = event;
1999 n++;
2000 }
2001 return n;
2002}
2003
2004static struct event_constraint *
254298c7 2005uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
087bfbb0 2006{
6a67943a 2007 struct intel_uncore_type *type = box->pmu->type;
087bfbb0
YZ
2008 struct event_constraint *c;
2009
6a67943a
YZ
2010 if (type->ops->get_constraint) {
2011 c = type->ops->get_constraint(box, event);
2012 if (c)
2013 return c;
2014 }
2015
087bfbb0
YZ
2016 if (event->hw.config == ~0ULL)
2017 return &constraint_fixed;
2018
2019 if (type->constraints) {
2020 for_each_event_constraint(c, type->constraints) {
2021 if ((event->hw.config & c->cmask) == c->code)
2022 return c;
2023 }
2024 }
2025
2026 return &type->unconstrainted;
2027}
2028
254298c7 2029static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
6a67943a
YZ
2030{
2031 if (box->pmu->type->ops->put_constraint)
2032 box->pmu->type->ops->put_constraint(box, event);
2033}
2034
254298c7 2035static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
087bfbb0
YZ
2036{
2037 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
2038 struct event_constraint *c, *constraints[UNCORE_PMC_IDX_MAX];
6a67943a 2039 int i, wmin, wmax, ret = 0;
087bfbb0
YZ
2040 struct hw_perf_event *hwc;
2041
2042 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
2043
2044 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
6a67943a 2045 c = uncore_get_event_constraint(box, box->event_list[i]);
087bfbb0
YZ
2046 constraints[i] = c;
2047 wmin = min(wmin, c->weight);
2048 wmax = max(wmax, c->weight);
2049 }
2050
2051 /* fastpath, try to reuse previous register */
2052 for (i = 0; i < n; i++) {
2053 hwc = &box->event_list[i]->hw;
2054 c = constraints[i];
2055
2056 /* never assigned */
2057 if (hwc->idx == -1)
2058 break;
2059
2060 /* constraint still honored */
2061 if (!test_bit(hwc->idx, c->idxmsk))
2062 break;
2063
2064 /* not already used */
2065 if (test_bit(hwc->idx, used_mask))
2066 break;
2067
2068 __set_bit(hwc->idx, used_mask);
6a67943a
YZ
2069 if (assign)
2070 assign[i] = hwc->idx;
087bfbb0 2071 }
087bfbb0 2072 /* slow path */
6a67943a
YZ
2073 if (i != n)
2074 ret = perf_assign_events(constraints, n, wmin, wmax, assign);
2075
2076 if (!assign || ret) {
2077 for (i = 0; i < n; i++)
2078 uncore_put_event_constraint(box, box->event_list[i]);
2079 }
087bfbb0
YZ
2080 return ret ? -EINVAL : 0;
2081}
2082
2083static void uncore_pmu_event_start(struct perf_event *event, int flags)
2084{
2085 struct intel_uncore_box *box = uncore_event_to_box(event);
2086 int idx = event->hw.idx;
2087
2088 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
2089 return;
2090
2091 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
2092 return;
2093
2094 event->hw.state = 0;
2095 box->events[idx] = event;
2096 box->n_active++;
2097 __set_bit(idx, box->active_mask);
2098
2099 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
2100 uncore_enable_event(box, event);
2101
2102 if (box->n_active == 1) {
2103 uncore_enable_box(box);
2104 uncore_pmu_start_hrtimer(box);
2105 }
2106}
2107
2108static void uncore_pmu_event_stop(struct perf_event *event, int flags)
2109{
2110 struct intel_uncore_box *box = uncore_event_to_box(event);
2111 struct hw_perf_event *hwc = &event->hw;
2112
2113 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
2114 uncore_disable_event(box, event);
2115 box->n_active--;
2116 box->events[hwc->idx] = NULL;
2117 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
2118 hwc->state |= PERF_HES_STOPPED;
2119
2120 if (box->n_active == 0) {
2121 uncore_disable_box(box);
2122 uncore_pmu_cancel_hrtimer(box);
2123 }
2124 }
2125
2126 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
2127 /*
2128 * Drain the remaining delta count out of a event
2129 * that we are disabling:
2130 */
2131 uncore_perf_event_update(box, event);
2132 hwc->state |= PERF_HES_UPTODATE;
2133 }
2134}
2135
2136static int uncore_pmu_event_add(struct perf_event *event, int flags)
2137{
2138 struct intel_uncore_box *box = uncore_event_to_box(event);
2139 struct hw_perf_event *hwc = &event->hw;
2140 int assign[UNCORE_PMC_IDX_MAX];
2141 int i, n, ret;
2142
2143 if (!box)
2144 return -ENODEV;
2145
2146 ret = n = uncore_collect_events(box, event, false);
2147 if (ret < 0)
2148 return ret;
2149
2150 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
2151 if (!(flags & PERF_EF_START))
2152 hwc->state |= PERF_HES_ARCH;
2153
2154 ret = uncore_assign_events(box, assign, n);
2155 if (ret)
2156 return ret;
2157
2158 /* save events moving to new counters */
2159 for (i = 0; i < box->n_events; i++) {
2160 event = box->event_list[i];
2161 hwc = &event->hw;
2162
2163 if (hwc->idx == assign[i] &&
2164 hwc->last_tag == box->tags[assign[i]])
2165 continue;
2166 /*
2167 * Ensure we don't accidentally enable a stopped
2168 * counter simply because we rescheduled.
2169 */
2170 if (hwc->state & PERF_HES_STOPPED)
2171 hwc->state |= PERF_HES_ARCH;
2172
2173 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
2174 }
2175
2176 /* reprogram moved events into new counters */
2177 for (i = 0; i < n; i++) {
2178 event = box->event_list[i];
2179 hwc = &event->hw;
2180
2181 if (hwc->idx != assign[i] ||
2182 hwc->last_tag != box->tags[assign[i]])
2183 uncore_assign_hw_event(box, event, assign[i]);
2184 else if (i < box->n_events)
2185 continue;
2186
2187 if (hwc->state & PERF_HES_ARCH)
2188 continue;
2189
2190 uncore_pmu_event_start(event, 0);
2191 }
2192 box->n_events = n;
2193
2194 return 0;
2195}
2196
2197static void uncore_pmu_event_del(struct perf_event *event, int flags)
2198{
2199 struct intel_uncore_box *box = uncore_event_to_box(event);
2200 int i;
2201
2202 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
2203
2204 for (i = 0; i < box->n_events; i++) {
2205 if (event == box->event_list[i]) {
6a67943a
YZ
2206 uncore_put_event_constraint(box, event);
2207
087bfbb0
YZ
2208 while (++i < box->n_events)
2209 box->event_list[i - 1] = box->event_list[i];
2210
2211 --box->n_events;
2212 break;
2213 }
2214 }
2215
2216 event->hw.idx = -1;
2217 event->hw.last_tag = ~0ULL;
2218}
2219
2220static void uncore_pmu_event_read(struct perf_event *event)
2221{
2222 struct intel_uncore_box *box = uncore_event_to_box(event);
2223 uncore_perf_event_update(box, event);
2224}
2225
2226/*
2227 * validation ensures the group can be loaded onto the
2228 * PMU if it was the only group available.
2229 */
2230static int uncore_validate_group(struct intel_uncore_pmu *pmu,
2231 struct perf_event *event)
2232{
2233 struct perf_event *leader = event->group_leader;
2234 struct intel_uncore_box *fake_box;
087bfbb0
YZ
2235 int ret = -EINVAL, n;
2236
6a67943a 2237 fake_box = uncore_alloc_box(pmu->type, smp_processor_id());
087bfbb0
YZ
2238 if (!fake_box)
2239 return -ENOMEM;
2240
2241 fake_box->pmu = pmu;
2242 /*
2243 * the event is not yet connected with its
2244 * siblings therefore we must first collect
2245 * existing siblings, then add the new event
2246 * before we can simulate the scheduling
2247 */
2248 n = uncore_collect_events(fake_box, leader, true);
2249 if (n < 0)
2250 goto out;
2251
2252 fake_box->n_events = n;
2253 n = uncore_collect_events(fake_box, event, false);
2254 if (n < 0)
2255 goto out;
2256
2257 fake_box->n_events = n;
2258
6a67943a 2259 ret = uncore_assign_events(fake_box, NULL, n);
087bfbb0
YZ
2260out:
2261 kfree(fake_box);
2262 return ret;
2263}
2264
2265int uncore_pmu_event_init(struct perf_event *event)
2266{
2267 struct intel_uncore_pmu *pmu;
2268 struct intel_uncore_box *box;
2269 struct hw_perf_event *hwc = &event->hw;
2270 int ret;
2271
2272 if (event->attr.type != event->pmu->type)
2273 return -ENOENT;
2274
2275 pmu = uncore_event_to_pmu(event);
2276 /* no device found for this pmu */
2277 if (pmu->func_id < 0)
2278 return -ENOENT;
2279
2280 /*
2281 * Uncore PMU does measure at all privilege level all the time.
2282 * So it doesn't make sense to specify any exclude bits.
2283 */
2284 if (event->attr.exclude_user || event->attr.exclude_kernel ||
2285 event->attr.exclude_hv || event->attr.exclude_idle)
2286 return -EINVAL;
2287
2288 /* Sampling not supported yet */
2289 if (hwc->sample_period)
2290 return -EINVAL;
2291
2292 /*
2293 * Place all uncore events for a particular physical package
2294 * onto a single cpu
2295 */
2296 if (event->cpu < 0)
2297 return -EINVAL;
2298 box = uncore_pmu_to_box(pmu, event->cpu);
2299 if (!box || box->cpu < 0)
2300 return -EINVAL;
2301 event->cpu = box->cpu;
2302
6a67943a
YZ
2303 event->hw.idx = -1;
2304 event->hw.last_tag = ~0ULL;
2305 event->hw.extra_reg.idx = EXTRA_REG_NONE;
2306
087bfbb0
YZ
2307 if (event->attr.config == UNCORE_FIXED_EVENT) {
2308 /* no fixed counter */
2309 if (!pmu->type->fixed_ctl)
2310 return -EINVAL;
2311 /*
2312 * if there is only one fixed counter, only the first pmu
2313 * can access the fixed counter
2314 */
2315 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
2316 return -EINVAL;
2317 hwc->config = ~0ULL;
2318 } else {
2319 hwc->config = event->attr.config & pmu->type->event_mask;
6a67943a
YZ
2320 if (pmu->type->ops->hw_config) {
2321 ret = pmu->type->ops->hw_config(box, event);
2322 if (ret)
2323 return ret;
2324 }
087bfbb0
YZ
2325 }
2326
087bfbb0
YZ
2327 if (event->group_leader != event)
2328 ret = uncore_validate_group(pmu, event);
2329 else
2330 ret = 0;
2331
2332 return ret;
2333}
2334
2335static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
2336{
2337 int ret;
2338
2339 pmu->pmu = (struct pmu) {
2340 .attr_groups = pmu->type->attr_groups,
2341 .task_ctx_nr = perf_invalid_context,
2342 .event_init = uncore_pmu_event_init,
2343 .add = uncore_pmu_event_add,
2344 .del = uncore_pmu_event_del,
2345 .start = uncore_pmu_event_start,
2346 .stop = uncore_pmu_event_stop,
2347 .read = uncore_pmu_event_read,
2348 };
2349
2350 if (pmu->type->num_boxes == 1) {
2351 if (strlen(pmu->type->name) > 0)
2352 sprintf(pmu->name, "uncore_%s", pmu->type->name);
2353 else
2354 sprintf(pmu->name, "uncore");
2355 } else {
2356 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
2357 pmu->pmu_idx);
2358 }
2359
2360 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
2361 return ret;
2362}
2363
2364static void __init uncore_type_exit(struct intel_uncore_type *type)
2365{
2366 int i;
2367
2368 for (i = 0; i < type->num_boxes; i++)
2369 free_percpu(type->pmus[i].box);
2370 kfree(type->pmus);
2371 type->pmus = NULL;
2372 kfree(type->attr_groups[1]);
2373 type->attr_groups[1] = NULL;
2374}
2375
cffa59ba 2376static void __init uncore_types_exit(struct intel_uncore_type **types)
14371cce
YZ
2377{
2378 int i;
2379 for (i = 0; types[i]; i++)
2380 uncore_type_exit(types[i]);
2381}
2382
087bfbb0
YZ
2383static int __init uncore_type_init(struct intel_uncore_type *type)
2384{
2385 struct intel_uncore_pmu *pmus;
2386 struct attribute_group *events_group;
2387 struct attribute **attrs;
2388 int i, j;
2389
2390 pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
2391 if (!pmus)
2392 return -ENOMEM;
2393
2394 type->unconstrainted = (struct event_constraint)
2395 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
2396 0, type->num_counters, 0);
2397
2398 for (i = 0; i < type->num_boxes; i++) {
2399 pmus[i].func_id = -1;
2400 pmus[i].pmu_idx = i;
2401 pmus[i].type = type;
14371cce 2402 INIT_LIST_HEAD(&pmus[i].box_list);
087bfbb0
YZ
2403 pmus[i].box = alloc_percpu(struct intel_uncore_box *);
2404 if (!pmus[i].box)
2405 goto fail;
2406 }
2407
2408 if (type->event_descs) {
2409 i = 0;
2410 while (type->event_descs[i].attr.attr.name)
2411 i++;
2412
2413 events_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
2414 sizeof(*events_group), GFP_KERNEL);
2415 if (!events_group)
2416 goto fail;
2417
2418 attrs = (struct attribute **)(events_group + 1);
2419 events_group->name = "events";
2420 events_group->attrs = attrs;
2421
2422 for (j = 0; j < i; j++)
2423 attrs[j] = &type->event_descs[j].attr.attr;
2424
2425 type->attr_groups[1] = events_group;
2426 }
2427
2428 type->pmus = pmus;
2429 return 0;
2430fail:
2431 uncore_type_exit(type);
2432 return -ENOMEM;
2433}
2434
2435static int __init uncore_types_init(struct intel_uncore_type **types)
2436{
2437 int i, ret;
2438
2439 for (i = 0; types[i]; i++) {
2440 ret = uncore_type_init(types[i]);
2441 if (ret)
2442 goto fail;
2443 }
2444 return 0;
2445fail:
2446 while (--i >= 0)
2447 uncore_type_exit(types[i]);
2448 return ret;
2449}
2450
14371cce
YZ
2451static struct pci_driver *uncore_pci_driver;
2452static bool pcidrv_registered;
2453
2454/*
2455 * add a pci uncore device
2456 */
254298c7 2457static int __devinit uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev)
14371cce
YZ
2458{
2459 struct intel_uncore_pmu *pmu;
2460 struct intel_uncore_box *box;
2461 int i, phys_id;
2462
2463 phys_id = pcibus_to_physid[pdev->bus->number];
2464 if (phys_id < 0)
2465 return -ENODEV;
2466
6a67943a 2467 box = uncore_alloc_box(type, 0);
14371cce
YZ
2468 if (!box)
2469 return -ENOMEM;
2470
2471 /*
2472 * for performance monitoring unit with multiple boxes,
2473 * each box has a different function id.
2474 */
2475 for (i = 0; i < type->num_boxes; i++) {
2476 pmu = &type->pmus[i];
2477 if (pmu->func_id == pdev->devfn)
2478 break;
2479 if (pmu->func_id < 0) {
2480 pmu->func_id = pdev->devfn;
2481 break;
2482 }
2483 pmu = NULL;
2484 }
2485
2486 if (!pmu) {
2487 kfree(box);
2488 return -EINVAL;
2489 }
2490
2491 box->phys_id = phys_id;
2492 box->pci_dev = pdev;
2493 box->pmu = pmu;
2494 uncore_box_init(box);
2495 pci_set_drvdata(pdev, box);
2496
2497 raw_spin_lock(&uncore_box_lock);
2498 list_add_tail(&box->list, &pmu->box_list);
2499 raw_spin_unlock(&uncore_box_lock);
2500
2501 return 0;
2502}
2503
357398e9 2504static void uncore_pci_remove(struct pci_dev *pdev)
14371cce
YZ
2505{
2506 struct intel_uncore_box *box = pci_get_drvdata(pdev);
2507 struct intel_uncore_pmu *pmu = box->pmu;
2508 int cpu, phys_id = pcibus_to_physid[pdev->bus->number];
2509
2510 if (WARN_ON_ONCE(phys_id != box->phys_id))
2511 return;
2512
2513 raw_spin_lock(&uncore_box_lock);
2514 list_del(&box->list);
2515 raw_spin_unlock(&uncore_box_lock);
2516
2517 for_each_possible_cpu(cpu) {
2518 if (*per_cpu_ptr(pmu->box, cpu) == box) {
2519 *per_cpu_ptr(pmu->box, cpu) = NULL;
2520 atomic_dec(&box->refcnt);
2521 }
2522 }
2523
2524 WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
2525 kfree(box);
2526}
2527
2528static int __devinit uncore_pci_probe(struct pci_dev *pdev,
2529 const struct pci_device_id *id)
2530{
2531 struct intel_uncore_type *type;
2532
2533 type = (struct intel_uncore_type *)id->driver_data;
254298c7 2534
14371cce
YZ
2535 return uncore_pci_add(type, pdev);
2536}
2537
2538static int __init uncore_pci_init(void)
2539{
2540 int ret;
2541
2542 switch (boot_cpu_data.x86_model) {
7c94ee2e
YZ
2543 case 45: /* Sandy Bridge-EP */
2544 pci_uncores = snbep_pci_uncores;
2545 uncore_pci_driver = &snbep_uncore_pci_driver;
2546 snbep_pci2phy_map_init();
2547 break;
14371cce
YZ
2548 default:
2549 return 0;
2550 }
2551
2552 ret = uncore_types_init(pci_uncores);
2553 if (ret)
2554 return ret;
2555
2556 uncore_pci_driver->probe = uncore_pci_probe;
2557 uncore_pci_driver->remove = uncore_pci_remove;
2558
2559 ret = pci_register_driver(uncore_pci_driver);
2560 if (ret == 0)
2561 pcidrv_registered = true;
2562 else
2563 uncore_types_exit(pci_uncores);
2564
2565 return ret;
2566}
2567
2568static void __init uncore_pci_exit(void)
2569{
2570 if (pcidrv_registered) {
2571 pcidrv_registered = false;
2572 pci_unregister_driver(uncore_pci_driver);
2573 uncore_types_exit(pci_uncores);
2574 }
2575}
2576
087bfbb0
YZ
2577static void __cpuinit uncore_cpu_dying(int cpu)
2578{
2579 struct intel_uncore_type *type;
2580 struct intel_uncore_pmu *pmu;
2581 struct intel_uncore_box *box;
2582 int i, j;
2583
2584 for (i = 0; msr_uncores[i]; i++) {
2585 type = msr_uncores[i];
2586 for (j = 0; j < type->num_boxes; j++) {
2587 pmu = &type->pmus[j];
2588 box = *per_cpu_ptr(pmu->box, cpu);
2589 *per_cpu_ptr(pmu->box, cpu) = NULL;
2590 if (box && atomic_dec_and_test(&box->refcnt))
2591 kfree(box);
2592 }
2593 }
2594}
2595
2596static int __cpuinit uncore_cpu_starting(int cpu)
2597{
2598 struct intel_uncore_type *type;
2599 struct intel_uncore_pmu *pmu;
2600 struct intel_uncore_box *box, *exist;
2601 int i, j, k, phys_id;
2602
2603 phys_id = topology_physical_package_id(cpu);
2604
2605 for (i = 0; msr_uncores[i]; i++) {
2606 type = msr_uncores[i];
2607 for (j = 0; j < type->num_boxes; j++) {
2608 pmu = &type->pmus[j];
2609 box = *per_cpu_ptr(pmu->box, cpu);
2610 /* called by uncore_cpu_init? */
2611 if (box && box->phys_id >= 0) {
2612 uncore_box_init(box);
2613 continue;
2614 }
2615
2616 for_each_online_cpu(k) {
2617 exist = *per_cpu_ptr(pmu->box, k);
2618 if (exist && exist->phys_id == phys_id) {
2619 atomic_inc(&exist->refcnt);
2620 *per_cpu_ptr(pmu->box, cpu) = exist;
2621 kfree(box);
2622 box = NULL;
2623 break;
2624 }
2625 }
2626
2627 if (box) {
2628 box->phys_id = phys_id;
2629 uncore_box_init(box);
2630 }
2631 }
2632 }
2633 return 0;
2634}
2635
2636static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id)
2637{
2638 struct intel_uncore_type *type;
2639 struct intel_uncore_pmu *pmu;
2640 struct intel_uncore_box *box;
2641 int i, j;
2642
2643 for (i = 0; msr_uncores[i]; i++) {
2644 type = msr_uncores[i];
2645 for (j = 0; j < type->num_boxes; j++) {
2646 pmu = &type->pmus[j];
2647 if (pmu->func_id < 0)
2648 pmu->func_id = j;
2649
6a67943a 2650 box = uncore_alloc_box(type, cpu);
087bfbb0
YZ
2651 if (!box)
2652 return -ENOMEM;
2653
2654 box->pmu = pmu;
2655 box->phys_id = phys_id;
2656 *per_cpu_ptr(pmu->box, cpu) = box;
2657 }
2658 }
2659 return 0;
2660}
2661
254298c7
YZ
2662static void __cpuinit
2663uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
087bfbb0
YZ
2664{
2665 struct intel_uncore_type *type;
2666 struct intel_uncore_pmu *pmu;
2667 struct intel_uncore_box *box;
2668 int i, j;
2669
2670 for (i = 0; uncores[i]; i++) {
2671 type = uncores[i];
2672 for (j = 0; j < type->num_boxes; j++) {
2673 pmu = &type->pmus[j];
2674 if (old_cpu < 0)
2675 box = uncore_pmu_to_box(pmu, new_cpu);
2676 else
2677 box = uncore_pmu_to_box(pmu, old_cpu);
2678 if (!box)
2679 continue;
2680
2681 if (old_cpu < 0) {
2682 WARN_ON_ONCE(box->cpu != -1);
2683 box->cpu = new_cpu;
2684 continue;
2685 }
2686
2687 WARN_ON_ONCE(box->cpu != old_cpu);
2688 if (new_cpu >= 0) {
2689 uncore_pmu_cancel_hrtimer(box);
2690 perf_pmu_migrate_context(&pmu->pmu,
2691 old_cpu, new_cpu);
2692 box->cpu = new_cpu;
2693 } else {
2694 box->cpu = -1;
2695 }
2696 }
2697 }
2698}
2699
2700static void __cpuinit uncore_event_exit_cpu(int cpu)
2701{
2702 int i, phys_id, target;
2703
2704 /* if exiting cpu is used for collecting uncore events */
2705 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
2706 return;
2707
2708 /* find a new cpu to collect uncore events */
2709 phys_id = topology_physical_package_id(cpu);
2710 target = -1;
2711 for_each_online_cpu(i) {
2712 if (i == cpu)
2713 continue;
2714 if (phys_id == topology_physical_package_id(i)) {
2715 target = i;
2716 break;
2717 }
2718 }
2719
2720 /* migrate uncore events to the new cpu */
2721 if (target >= 0)
2722 cpumask_set_cpu(target, &uncore_cpu_mask);
2723
2724 uncore_change_context(msr_uncores, cpu, target);
14371cce 2725 uncore_change_context(pci_uncores, cpu, target);
087bfbb0
YZ
2726}
2727
2728static void __cpuinit uncore_event_init_cpu(int cpu)
2729{
2730 int i, phys_id;
2731
2732 phys_id = topology_physical_package_id(cpu);
2733 for_each_cpu(i, &uncore_cpu_mask) {
2734 if (phys_id == topology_physical_package_id(i))
2735 return;
2736 }
2737
2738 cpumask_set_cpu(cpu, &uncore_cpu_mask);
2739
2740 uncore_change_context(msr_uncores, -1, cpu);
14371cce 2741 uncore_change_context(pci_uncores, -1, cpu);
087bfbb0
YZ
2742}
2743
254298c7
YZ
2744static int
2745 __cpuinit uncore_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
087bfbb0
YZ
2746{
2747 unsigned int cpu = (long)hcpu;
2748
2749 /* allocate/free data structure for uncore box */
2750 switch (action & ~CPU_TASKS_FROZEN) {
2751 case CPU_UP_PREPARE:
2752 uncore_cpu_prepare(cpu, -1);
2753 break;
2754 case CPU_STARTING:
2755 uncore_cpu_starting(cpu);
2756 break;
2757 case CPU_UP_CANCELED:
2758 case CPU_DYING:
2759 uncore_cpu_dying(cpu);
2760 break;
2761 default:
2762 break;
2763 }
2764
2765 /* select the cpu that collects uncore events */
2766 switch (action & ~CPU_TASKS_FROZEN) {
2767 case CPU_DOWN_FAILED:
2768 case CPU_STARTING:
2769 uncore_event_init_cpu(cpu);
2770 break;
2771 case CPU_DOWN_PREPARE:
2772 uncore_event_exit_cpu(cpu);
2773 break;
2774 default:
2775 break;
2776 }
2777
2778 return NOTIFY_OK;
2779}
2780
2781static struct notifier_block uncore_cpu_nb __cpuinitdata = {
254298c7 2782 .notifier_call = uncore_cpu_notifier,
087bfbb0
YZ
2783 /*
2784 * to migrate uncore events, our notifier should be executed
2785 * before perf core's notifier.
2786 */
254298c7 2787 .priority = CPU_PRI_PERF + 1,
087bfbb0
YZ
2788};
2789
2790static void __init uncore_cpu_setup(void *dummy)
2791{
2792 uncore_cpu_starting(smp_processor_id());
2793}
2794
2795static int __init uncore_cpu_init(void)
2796{
42089697 2797 int ret, cpu, max_cores;
087bfbb0 2798
42089697 2799 max_cores = boot_cpu_data.x86_max_cores;
087bfbb0 2800 switch (boot_cpu_data.x86_model) {
fcde10e9
YZ
2801 case 26: /* Nehalem */
2802 case 30:
2803 case 37: /* Westmere */
2804 case 44:
2805 msr_uncores = nhm_msr_uncores;
2806 break;
2807 case 42: /* Sandy Bridge */
42089697
YZ
2808 if (snb_uncore_cbox.num_boxes > max_cores)
2809 snb_uncore_cbox.num_boxes = max_cores;
fcde10e9
YZ
2810 msr_uncores = snb_msr_uncores;
2811 break;
7c94ee2e 2812 case 45: /* Sandy Birdge-EP */
42089697
YZ
2813 if (snbep_uncore_cbox.num_boxes > max_cores)
2814 snbep_uncore_cbox.num_boxes = max_cores;
7c94ee2e
YZ
2815 msr_uncores = snbep_msr_uncores;
2816 break;
254298c7
YZ
2817 case 46:
2818 msr_uncores = nhmex_msr_uncores;
2819 break;
087bfbb0
YZ
2820 default:
2821 return 0;
2822 }
2823
2824 ret = uncore_types_init(msr_uncores);
2825 if (ret)
2826 return ret;
2827
2828 get_online_cpus();
2829
2830 for_each_online_cpu(cpu) {
2831 int i, phys_id = topology_physical_package_id(cpu);
2832
2833 for_each_cpu(i, &uncore_cpu_mask) {
2834 if (phys_id == topology_physical_package_id(i)) {
2835 phys_id = -1;
2836 break;
2837 }
2838 }
2839 if (phys_id < 0)
2840 continue;
2841
2842 uncore_cpu_prepare(cpu, phys_id);
2843 uncore_event_init_cpu(cpu);
2844 }
2845 on_each_cpu(uncore_cpu_setup, NULL, 1);
2846
2847 register_cpu_notifier(&uncore_cpu_nb);
2848
2849 put_online_cpus();
2850
2851 return 0;
2852}
2853
2854static int __init uncore_pmus_register(void)
2855{
2856 struct intel_uncore_pmu *pmu;
2857 struct intel_uncore_type *type;
2858 int i, j;
2859
2860 for (i = 0; msr_uncores[i]; i++) {
2861 type = msr_uncores[i];
2862 for (j = 0; j < type->num_boxes; j++) {
2863 pmu = &type->pmus[j];
2864 uncore_pmu_register(pmu);
2865 }
2866 }
2867
14371cce
YZ
2868 for (i = 0; pci_uncores[i]; i++) {
2869 type = pci_uncores[i];
2870 for (j = 0; j < type->num_boxes; j++) {
2871 pmu = &type->pmus[j];
2872 uncore_pmu_register(pmu);
2873 }
2874 }
2875
087bfbb0
YZ
2876 return 0;
2877}
2878
2879static int __init intel_uncore_init(void)
2880{
2881 int ret;
2882
2883 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2884 return -ENODEV;
2885
14371cce 2886 ret = uncore_pci_init();
087bfbb0
YZ
2887 if (ret)
2888 goto fail;
14371cce
YZ
2889 ret = uncore_cpu_init();
2890 if (ret) {
2891 uncore_pci_exit();
2892 goto fail;
2893 }
087bfbb0
YZ
2894
2895 uncore_pmus_register();
2896 return 0;
2897fail:
2898 return ret;
2899}
2900device_initcall(intel_uncore_init);