]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/x86/kernel/cpu/perf_event_intel_uncore.c
perf: Make perf_event_init_context() function static
[mirror_ubuntu-zesty-kernel.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore.c
CommitLineData
087bfbb0
YZ
1#include "perf_event_intel_uncore.h"
2
3static struct intel_uncore_type *empty_uncore[] = { NULL, };
4static struct intel_uncore_type **msr_uncores = empty_uncore;
14371cce
YZ
5static struct intel_uncore_type **pci_uncores = empty_uncore;
6/* pci bus to socket mapping */
7static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
8
899396cf
YZ
9static struct pci_dev *extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
10
14371cce 11static DEFINE_RAW_SPINLOCK(uncore_box_lock);
087bfbb0
YZ
12
13/* mask of cpus that collect uncore events */
14static cpumask_t uncore_cpu_mask;
15
16/* constraint for the fixed counter */
17static struct event_constraint constraint_fixed =
18 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
6a67943a
YZ
19static struct event_constraint constraint_empty =
20 EVENT_CONSTRAINT(0, 0, 0);
087bfbb0 21
46bdd905
YZ
22#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
23 ((1ULL << (n)) - 1)))
24
fcde10e9 25DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
c1ece48c 26DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
fcde10e9
YZ
27DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
28DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
6a67943a 29DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
fcde10e9
YZ
30DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
31DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
32DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
7c94ee2e
YZ
33DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
34DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
35DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
36DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
37DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
6a67943a 38DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
e850f9c3 39DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
6a67943a 40DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
e850f9c3 41DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
6a67943a 42DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
e850f9c3 43DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
6a67943a 44DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
e850f9c3 45DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
4f3f713f
YZ
46DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
47DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
48DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
49DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
fd1ec259
YZ
50DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
51DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
52DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
53DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
54DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
55DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
56DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
57DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
58DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
59DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
60DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
61DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
62DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
63DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
64DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
65DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
66DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
67DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
7c94ee2e 68
b9e1ab6d
SE
69static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
70static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
71static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
72static void uncore_pmu_event_read(struct perf_event *event);
73
001e413f
SE
74static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
75{
76 return container_of(event->pmu, struct intel_uncore_pmu, pmu);
77}
78
79static struct intel_uncore_box *
80uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
81{
82 struct intel_uncore_box *box;
83
84 box = *per_cpu_ptr(pmu->box, cpu);
85 if (box)
86 return box;
87
88 raw_spin_lock(&uncore_box_lock);
89 list_for_each_entry(box, &pmu->box_list, list) {
90 if (box->phys_id == topology_physical_package_id(cpu)) {
91 atomic_inc(&box->refcnt);
92 *per_cpu_ptr(pmu->box, cpu) = box;
93 break;
94 }
95 }
96 raw_spin_unlock(&uncore_box_lock);
97
98 return *per_cpu_ptr(pmu->box, cpu);
99}
100
101static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
102{
103 /*
104 * perf core schedules event on the basis of cpu, uncore events are
105 * collected by one of the cpus inside a physical package.
106 */
107 return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
108}
109
254298c7
YZ
110static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
111{
112 u64 count;
113
114 rdmsrl(event->hw.event_base, count);
115
116 return count;
117}
118
119/*
120 * generic get constraint function for shared match/mask registers.
121 */
122static struct event_constraint *
123uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
124{
125 struct intel_uncore_extra_reg *er;
126 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
127 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
128 unsigned long flags;
129 bool ok = false;
130
131 /*
132 * reg->alloc can be set due to existing state, so for fake box we
133 * need to ignore this, otherwise we might fail to allocate proper
134 * fake state for this extra reg constraint.
135 */
136 if (reg1->idx == EXTRA_REG_NONE ||
137 (!uncore_box_is_fake(box) && reg1->alloc))
138 return NULL;
139
140 er = &box->shared_regs[reg1->idx];
141 raw_spin_lock_irqsave(&er->lock, flags);
142 if (!atomic_read(&er->ref) ||
143 (er->config1 == reg1->config && er->config2 == reg2->config)) {
144 atomic_inc(&er->ref);
145 er->config1 = reg1->config;
146 er->config2 = reg2->config;
147 ok = true;
148 }
149 raw_spin_unlock_irqrestore(&er->lock, flags);
150
151 if (ok) {
152 if (!uncore_box_is_fake(box))
153 reg1->alloc = 1;
154 return NULL;
155 }
156
157 return &constraint_empty;
158}
159
160static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
161{
162 struct intel_uncore_extra_reg *er;
163 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
164
165 /*
166 * Only put constraint if extra reg was actually allocated. Also
167 * takes care of event which do not use an extra shared reg.
168 *
169 * Also, if this is a fake box we shouldn't touch any event state
170 * (reg->alloc) and we don't care about leaving inconsistent box
171 * state either since it will be thrown out.
172 */
173 if (uncore_box_is_fake(box) || !reg1->alloc)
174 return;
175
176 er = &box->shared_regs[reg1->idx];
177 atomic_dec(&er->ref);
178 reg1->alloc = 0;
179}
180
46bdd905
YZ
181static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
182{
183 struct intel_uncore_extra_reg *er;
184 unsigned long flags;
185 u64 config;
186
187 er = &box->shared_regs[idx];
188
189 raw_spin_lock_irqsave(&er->lock, flags);
190 config = er->config;
191 raw_spin_unlock_irqrestore(&er->lock, flags);
192
193 return config;
194}
195
7c94ee2e 196/* Sandy Bridge-EP uncore support */
6a67943a
YZ
197static struct intel_uncore_type snbep_uncore_cbox;
198static struct intel_uncore_type snbep_uncore_pcu;
199
7c94ee2e
YZ
200static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
201{
202 struct pci_dev *pdev = box->pci_dev;
203 int box_ctl = uncore_pci_box_ctl(box);
032c3851 204 u32 config = 0;
7c94ee2e 205
032c3851
YZ
206 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
207 config |= SNBEP_PMON_BOX_CTL_FRZ;
208 pci_write_config_dword(pdev, box_ctl, config);
209 }
7c94ee2e
YZ
210}
211
212static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
213{
214 struct pci_dev *pdev = box->pci_dev;
215 int box_ctl = uncore_pci_box_ctl(box);
032c3851 216 u32 config = 0;
7c94ee2e 217
032c3851
YZ
218 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
219 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
220 pci_write_config_dword(pdev, box_ctl, config);
221 }
7c94ee2e
YZ
222}
223
254298c7 224static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
225{
226 struct pci_dev *pdev = box->pci_dev;
227 struct hw_perf_event *hwc = &event->hw;
228
254298c7 229 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
7c94ee2e
YZ
230}
231
254298c7 232static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
233{
234 struct pci_dev *pdev = box->pci_dev;
235 struct hw_perf_event *hwc = &event->hw;
236
237 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
238}
239
254298c7 240static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
241{
242 struct pci_dev *pdev = box->pci_dev;
243 struct hw_perf_event *hwc = &event->hw;
032c3851 244 u64 count = 0;
7c94ee2e
YZ
245
246 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
247 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
254298c7 248
7c94ee2e
YZ
249 return count;
250}
251
252static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
253{
254 struct pci_dev *pdev = box->pci_dev;
254298c7
YZ
255
256 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
7c94ee2e
YZ
257}
258
259static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
260{
261 u64 config;
262 unsigned msr;
263
264 msr = uncore_msr_box_ctl(box);
265 if (msr) {
266 rdmsrl(msr, config);
267 config |= SNBEP_PMON_BOX_CTL_FRZ;
268 wrmsrl(msr, config);
7c94ee2e
YZ
269 }
270}
271
272static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
273{
274 u64 config;
275 unsigned msr;
276
277 msr = uncore_msr_box_ctl(box);
278 if (msr) {
279 rdmsrl(msr, config);
280 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
281 wrmsrl(msr, config);
7c94ee2e
YZ
282 }
283}
284
254298c7 285static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
286{
287 struct hw_perf_event *hwc = &event->hw;
6a67943a
YZ
288 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
289
290 if (reg1->idx != EXTRA_REG_NONE)
46bdd905 291 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
7c94ee2e
YZ
292
293 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
294}
295
296static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
297 struct perf_event *event)
298{
299 struct hw_perf_event *hwc = &event->hw;
300
301 wrmsrl(hwc->config_base, hwc->config);
302}
303
7c94ee2e
YZ
304static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
305{
306 unsigned msr = uncore_msr_box_ctl(box);
254298c7 307
7c94ee2e
YZ
308 if (msr)
309 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
310}
311
312static struct attribute *snbep_uncore_formats_attr[] = {
313 &format_attr_event.attr,
314 &format_attr_umask.attr,
315 &format_attr_edge.attr,
316 &format_attr_inv.attr,
317 &format_attr_thresh8.attr,
318 NULL,
319};
320
321static struct attribute *snbep_uncore_ubox_formats_attr[] = {
322 &format_attr_event.attr,
323 &format_attr_umask.attr,
324 &format_attr_edge.attr,
325 &format_attr_inv.attr,
326 &format_attr_thresh5.attr,
327 NULL,
328};
329
6a67943a
YZ
330static struct attribute *snbep_uncore_cbox_formats_attr[] = {
331 &format_attr_event.attr,
332 &format_attr_umask.attr,
333 &format_attr_edge.attr,
334 &format_attr_tid_en.attr,
335 &format_attr_inv.attr,
336 &format_attr_thresh8.attr,
337 &format_attr_filter_tid.attr,
338 &format_attr_filter_nid.attr,
339 &format_attr_filter_state.attr,
340 &format_attr_filter_opc.attr,
341 NULL,
342};
343
7c94ee2e 344static struct attribute *snbep_uncore_pcu_formats_attr[] = {
77b339bc 345 &format_attr_event_ext.attr,
7c94ee2e
YZ
346 &format_attr_occ_sel.attr,
347 &format_attr_edge.attr,
348 &format_attr_inv.attr,
349 &format_attr_thresh5.attr,
350 &format_attr_occ_invert.attr,
351 &format_attr_occ_edge.attr,
4f3f713f
YZ
352 &format_attr_filter_band0.attr,
353 &format_attr_filter_band1.attr,
354 &format_attr_filter_band2.attr,
355 &format_attr_filter_band3.attr,
7c94ee2e
YZ
356 NULL,
357};
358
c1ece48c
YZ
359static struct attribute *snbep_uncore_qpi_formats_attr[] = {
360 &format_attr_event_ext.attr,
361 &format_attr_umask.attr,
362 &format_attr_edge.attr,
363 &format_attr_inv.attr,
364 &format_attr_thresh8.attr,
fd1ec259
YZ
365 &format_attr_match_rds.attr,
366 &format_attr_match_rnid30.attr,
367 &format_attr_match_rnid4.attr,
368 &format_attr_match_dnid.attr,
369 &format_attr_match_mc.attr,
370 &format_attr_match_opc.attr,
371 &format_attr_match_vnw.attr,
372 &format_attr_match0.attr,
373 &format_attr_match1.attr,
374 &format_attr_mask_rds.attr,
375 &format_attr_mask_rnid30.attr,
376 &format_attr_mask_rnid4.attr,
377 &format_attr_mask_dnid.attr,
378 &format_attr_mask_mc.attr,
379 &format_attr_mask_opc.attr,
380 &format_attr_mask_vnw.attr,
381 &format_attr_mask0.attr,
382 &format_attr_mask1.attr,
c1ece48c
YZ
383 NULL,
384};
385
7c94ee2e 386static struct uncore_event_desc snbep_uncore_imc_events[] = {
eca26c99 387 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
2992c542
PZ
388 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
389 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
7c94ee2e
YZ
390 { /* end: all zeroes */ },
391};
392
393static struct uncore_event_desc snbep_uncore_qpi_events[] = {
2992c542
PZ
394 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
395 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
c9601247
VW
396 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
397 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
7c94ee2e
YZ
398 { /* end: all zeroes */ },
399};
400
401static struct attribute_group snbep_uncore_format_group = {
402 .name = "format",
403 .attrs = snbep_uncore_formats_attr,
404};
405
406static struct attribute_group snbep_uncore_ubox_format_group = {
407 .name = "format",
408 .attrs = snbep_uncore_ubox_formats_attr,
409};
410
6a67943a
YZ
411static struct attribute_group snbep_uncore_cbox_format_group = {
412 .name = "format",
413 .attrs = snbep_uncore_cbox_formats_attr,
414};
415
7c94ee2e
YZ
416static struct attribute_group snbep_uncore_pcu_format_group = {
417 .name = "format",
418 .attrs = snbep_uncore_pcu_formats_attr,
419};
420
c1ece48c
YZ
421static struct attribute_group snbep_uncore_qpi_format_group = {
422 .name = "format",
423 .attrs = snbep_uncore_qpi_formats_attr,
424};
425
46bdd905
YZ
426#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
427 .init_box = snbep_uncore_msr_init_box, \
428 .disable_box = snbep_uncore_msr_disable_box, \
429 .enable_box = snbep_uncore_msr_enable_box, \
430 .disable_event = snbep_uncore_msr_disable_event, \
431 .enable_event = snbep_uncore_msr_enable_event, \
432 .read_counter = uncore_msr_read_counter
433
7c94ee2e 434static struct intel_uncore_ops snbep_uncore_msr_ops = {
46bdd905 435 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
7c94ee2e
YZ
436};
437
fd1ec259
YZ
438#define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
439 .init_box = snbep_uncore_pci_init_box, \
440 .disable_box = snbep_uncore_pci_disable_box, \
441 .enable_box = snbep_uncore_pci_enable_box, \
442 .disable_event = snbep_uncore_pci_disable_event, \
443 .read_counter = snbep_uncore_pci_read_counter
444
7c94ee2e 445static struct intel_uncore_ops snbep_uncore_pci_ops = {
fd1ec259
YZ
446 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
447 .enable_event = snbep_uncore_pci_enable_event, \
7c94ee2e
YZ
448};
449
450static struct event_constraint snbep_uncore_cbox_constraints[] = {
451 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
452 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
453 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
454 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
455 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
e850f9c3 456 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
7c94ee2e
YZ
457 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
458 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
459 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
460 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
461 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
462 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
463 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
3b19e4c9 464 EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
7c94ee2e
YZ
465 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
466 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
467 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
468 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
469 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
470 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
471 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
472 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
473 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
474 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
475 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
476 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
477 EVENT_CONSTRAINT_END
478};
479
480static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
481 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
482 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
483 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
484 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
485 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
486 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
487 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
488 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
489 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
490 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
491 EVENT_CONSTRAINT_END
492};
493
494static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
495 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
496 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
497 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
498 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
499 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
500 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
501 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
502 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
503 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
504 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
505 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
e850f9c3
YZ
506 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
507 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
508 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
509 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
510 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
511 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
512 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
513 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
7c94ee2e
YZ
514 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
515 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
516 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
517 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
518 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
519 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
520 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
e850f9c3
YZ
521 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
522 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
7c94ee2e
YZ
523 EVENT_CONSTRAINT_END
524};
525
526static struct intel_uncore_type snbep_uncore_ubox = {
527 .name = "ubox",
528 .num_counters = 2,
529 .num_boxes = 1,
530 .perf_ctr_bits = 44,
531 .fixed_ctr_bits = 48,
532 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
533 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
534 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
535 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
536 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
537 .ops = &snbep_uncore_msr_ops,
538 .format_group = &snbep_uncore_ubox_format_group,
539};
540
46bdd905
YZ
541static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
542 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
543 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
544 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
337397f3 545 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
46bdd905 546 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
337397f3 547 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
46bdd905 548 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
337397f3 549 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
46bdd905
YZ
550 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
551 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
552 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
553 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
554 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
555 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
556 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
557 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
558 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
559 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
560 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
561 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
562 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
563 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
564 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
565 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
566 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
567 EVENT_EXTRA_END
568};
569
570static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
571{
572 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
573 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
574 int i;
575
576 if (uncore_box_is_fake(box))
577 return;
578
579 for (i = 0; i < 5; i++) {
580 if (reg1->alloc & (0x1 << i))
581 atomic_sub(1 << (i * 6), &er->ref);
582 }
583 reg1->alloc = 0;
584}
585
586static struct event_constraint *
587__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
588 u64 (*cbox_filter_mask)(int fields))
589{
590 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
591 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
592 int i, alloc = 0;
593 unsigned long flags;
594 u64 mask;
595
596 if (reg1->idx == EXTRA_REG_NONE)
597 return NULL;
598
599 raw_spin_lock_irqsave(&er->lock, flags);
600 for (i = 0; i < 5; i++) {
601 if (!(reg1->idx & (0x1 << i)))
602 continue;
603 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
604 continue;
605
606 mask = cbox_filter_mask(0x1 << i);
607 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
608 !((reg1->config ^ er->config) & mask)) {
609 atomic_add(1 << (i * 6), &er->ref);
610 er->config &= ~mask;
611 er->config |= reg1->config & mask;
612 alloc |= (0x1 << i);
613 } else {
614 break;
615 }
616 }
617 raw_spin_unlock_irqrestore(&er->lock, flags);
618 if (i < 5)
619 goto fail;
620
621 if (!uncore_box_is_fake(box))
622 reg1->alloc |= alloc;
623
b2fa344d 624 return NULL;
46bdd905
YZ
625fail:
626 for (; i >= 0; i--) {
627 if (alloc & (0x1 << i))
628 atomic_sub(1 << (i * 6), &er->ref);
629 }
630 return &constraint_empty;
631}
632
633static u64 snbep_cbox_filter_mask(int fields)
634{
635 u64 mask = 0;
636
637 if (fields & 0x1)
638 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
639 if (fields & 0x2)
640 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
641 if (fields & 0x4)
642 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
643 if (fields & 0x8)
644 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
645
646 return mask;
647}
648
649static struct event_constraint *
650snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
651{
652 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
653}
654
655static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
656{
657 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
658 struct extra_reg *er;
659 int idx = 0;
660
661 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
662 if (er->event != (event->hw.config & er->config_mask))
663 continue;
664 idx |= er->idx;
665 }
666
667 if (idx) {
668 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
669 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
670 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
671 reg1->idx = idx;
672 }
673 return 0;
674}
675
676static struct intel_uncore_ops snbep_uncore_cbox_ops = {
677 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
678 .hw_config = snbep_cbox_hw_config,
679 .get_constraint = snbep_cbox_get_constraint,
680 .put_constraint = snbep_cbox_put_constraint,
681};
682
7c94ee2e 683static struct intel_uncore_type snbep_uncore_cbox = {
6a67943a
YZ
684 .name = "cbox",
685 .num_counters = 4,
686 .num_boxes = 8,
687 .perf_ctr_bits = 44,
688 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
689 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
690 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
691 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
692 .msr_offset = SNBEP_CBO_MSR_OFFSET,
693 .num_shared_regs = 1,
694 .constraints = snbep_uncore_cbox_constraints,
46bdd905 695 .ops = &snbep_uncore_cbox_ops,
6a67943a 696 .format_group = &snbep_uncore_cbox_format_group,
7c94ee2e
YZ
697};
698
46bdd905
YZ
699static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
700{
701 struct hw_perf_event *hwc = &event->hw;
702 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
703 u64 config = reg1->config;
704
705 if (new_idx > reg1->idx)
706 config <<= 8 * (new_idx - reg1->idx);
707 else
708 config >>= 8 * (reg1->idx - new_idx);
709
710 if (modify) {
711 hwc->config += new_idx - reg1->idx;
712 reg1->config = config;
713 reg1->idx = new_idx;
714 }
715 return config;
716}
717
718static struct event_constraint *
719snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
720{
721 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
722 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
723 unsigned long flags;
724 int idx = reg1->idx;
725 u64 mask, config1 = reg1->config;
726 bool ok = false;
727
728 if (reg1->idx == EXTRA_REG_NONE ||
729 (!uncore_box_is_fake(box) && reg1->alloc))
730 return NULL;
731again:
13acac30 732 mask = 0xffULL << (idx * 8);
46bdd905
YZ
733 raw_spin_lock_irqsave(&er->lock, flags);
734 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
735 !((config1 ^ er->config) & mask)) {
736 atomic_add(1 << (idx * 8), &er->ref);
737 er->config &= ~mask;
738 er->config |= config1 & mask;
739 ok = true;
740 }
741 raw_spin_unlock_irqrestore(&er->lock, flags);
742
743 if (!ok) {
744 idx = (idx + 1) % 4;
745 if (idx != reg1->idx) {
746 config1 = snbep_pcu_alter_er(event, idx, false);
747 goto again;
748 }
749 return &constraint_empty;
750 }
751
752 if (!uncore_box_is_fake(box)) {
753 if (idx != reg1->idx)
754 snbep_pcu_alter_er(event, idx, true);
755 reg1->alloc = 1;
756 }
757 return NULL;
758}
759
760static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
761{
762 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
763 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
764
765 if (uncore_box_is_fake(box) || !reg1->alloc)
766 return;
767
768 atomic_sub(1 << (reg1->idx * 8), &er->ref);
769 reg1->alloc = 0;
770}
771
772static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
773{
774 struct hw_perf_event *hwc = &event->hw;
775 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
776 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
777
778 if (ev_sel >= 0xb && ev_sel <= 0xe) {
779 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
780 reg1->idx = ev_sel - 0xb;
781 reg1->config = event->attr.config1 & (0xff << reg1->idx);
782 }
783 return 0;
784}
785
786static struct intel_uncore_ops snbep_uncore_pcu_ops = {
787 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
788 .hw_config = snbep_pcu_hw_config,
789 .get_constraint = snbep_pcu_get_constraint,
790 .put_constraint = snbep_pcu_put_constraint,
791};
792
7c94ee2e 793static struct intel_uncore_type snbep_uncore_pcu = {
6a67943a
YZ
794 .name = "pcu",
795 .num_counters = 4,
796 .num_boxes = 1,
797 .perf_ctr_bits = 48,
798 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
799 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
800 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
801 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
802 .num_shared_regs = 1,
46bdd905 803 .ops = &snbep_uncore_pcu_ops,
6a67943a 804 .format_group = &snbep_uncore_pcu_format_group,
7c94ee2e
YZ
805};
806
807static struct intel_uncore_type *snbep_msr_uncores[] = {
808 &snbep_uncore_ubox,
809 &snbep_uncore_cbox,
810 &snbep_uncore_pcu,
811 NULL,
812};
813
fd1ec259
YZ
814enum {
815 SNBEP_PCI_QPI_PORT0_FILTER,
816 SNBEP_PCI_QPI_PORT1_FILTER,
817};
818
819static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
820{
821 struct hw_perf_event *hwc = &event->hw;
822 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
823 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
824
825 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
826 reg1->idx = 0;
827 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
828 reg1->config = event->attr.config1;
829 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
830 reg2->config = event->attr.config2;
831 }
832 return 0;
833}
834
835static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
836{
837 struct pci_dev *pdev = box->pci_dev;
838 struct hw_perf_event *hwc = &event->hw;
839 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
840 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
841
842 if (reg1->idx != EXTRA_REG_NONE) {
843 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
844 struct pci_dev *filter_pdev = extra_pci_dev[box->phys_id][idx];
845 WARN_ON_ONCE(!filter_pdev);
846 if (filter_pdev) {
847 pci_write_config_dword(filter_pdev, reg1->reg,
848 (u32)reg1->config);
849 pci_write_config_dword(filter_pdev, reg1->reg + 4,
850 (u32)(reg1->config >> 32));
851 pci_write_config_dword(filter_pdev, reg2->reg,
852 (u32)reg2->config);
853 pci_write_config_dword(filter_pdev, reg2->reg + 4,
854 (u32)(reg2->config >> 32));
855 }
856 }
857
858 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
859}
860
861static struct intel_uncore_ops snbep_uncore_qpi_ops = {
862 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
863 .enable_event = snbep_qpi_enable_event,
864 .hw_config = snbep_qpi_hw_config,
865 .get_constraint = uncore_get_constraint,
866 .put_constraint = uncore_put_constraint,
867};
868
7c94ee2e
YZ
869#define SNBEP_UNCORE_PCI_COMMON_INIT() \
870 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
871 .event_ctl = SNBEP_PCI_PMON_CTL0, \
872 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
873 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
874 .ops = &snbep_uncore_pci_ops, \
875 .format_group = &snbep_uncore_format_group
876
877static struct intel_uncore_type snbep_uncore_ha = {
878 .name = "ha",
879 .num_counters = 4,
880 .num_boxes = 1,
881 .perf_ctr_bits = 48,
882 SNBEP_UNCORE_PCI_COMMON_INIT(),
883};
884
885static struct intel_uncore_type snbep_uncore_imc = {
886 .name = "imc",
887 .num_counters = 4,
888 .num_boxes = 4,
889 .perf_ctr_bits = 48,
890 .fixed_ctr_bits = 48,
891 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
892 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
893 .event_descs = snbep_uncore_imc_events,
894 SNBEP_UNCORE_PCI_COMMON_INIT(),
895};
896
897static struct intel_uncore_type snbep_uncore_qpi = {
fd1ec259
YZ
898 .name = "qpi",
899 .num_counters = 4,
900 .num_boxes = 2,
901 .perf_ctr_bits = 48,
902 .perf_ctr = SNBEP_PCI_PMON_CTR0,
903 .event_ctl = SNBEP_PCI_PMON_CTL0,
904 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
905 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
906 .num_shared_regs = 1,
907 .ops = &snbep_uncore_qpi_ops,
908 .event_descs = snbep_uncore_qpi_events,
909 .format_group = &snbep_uncore_qpi_format_group,
7c94ee2e
YZ
910};
911
912
913static struct intel_uncore_type snbep_uncore_r2pcie = {
914 .name = "r2pcie",
915 .num_counters = 4,
916 .num_boxes = 1,
917 .perf_ctr_bits = 44,
918 .constraints = snbep_uncore_r2pcie_constraints,
919 SNBEP_UNCORE_PCI_COMMON_INIT(),
920};
921
922static struct intel_uncore_type snbep_uncore_r3qpi = {
923 .name = "r3qpi",
924 .num_counters = 3,
925 .num_boxes = 2,
926 .perf_ctr_bits = 44,
927 .constraints = snbep_uncore_r3qpi_constraints,
928 SNBEP_UNCORE_PCI_COMMON_INIT(),
929};
930
e850f9c3
YZ
931enum {
932 SNBEP_PCI_UNCORE_HA,
933 SNBEP_PCI_UNCORE_IMC,
934 SNBEP_PCI_UNCORE_QPI,
935 SNBEP_PCI_UNCORE_R2PCIE,
936 SNBEP_PCI_UNCORE_R3QPI,
937};
938
7c94ee2e 939static struct intel_uncore_type *snbep_pci_uncores[] = {
e850f9c3
YZ
940 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
941 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
942 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
943 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
944 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
7c94ee2e
YZ
945 NULL,
946};
947
948static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
949 { /* Home Agent */
950 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
899396cf 951 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
7c94ee2e
YZ
952 },
953 { /* MC Channel 0 */
954 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
899396cf 955 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
7c94ee2e
YZ
956 },
957 { /* MC Channel 1 */
958 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
899396cf 959 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
7c94ee2e
YZ
960 },
961 { /* MC Channel 2 */
962 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
899396cf 963 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
7c94ee2e
YZ
964 },
965 { /* MC Channel 3 */
966 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
899396cf 967 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
7c94ee2e
YZ
968 },
969 { /* QPI Port 0 */
970 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
899396cf 971 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
7c94ee2e
YZ
972 },
973 { /* QPI Port 1 */
974 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
899396cf 975 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
7c94ee2e 976 },
e850f9c3 977 { /* R2PCIe */
7c94ee2e 978 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
899396cf 979 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
7c94ee2e
YZ
980 },
981 { /* R3QPI Link 0 */
982 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
899396cf 983 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
7c94ee2e
YZ
984 },
985 { /* R3QPI Link 1 */
986 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
899396cf 987 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
7c94ee2e 988 },
fd1ec259
YZ
989 { /* QPI Port 0 filter */
990 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
991 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
992 SNBEP_PCI_QPI_PORT0_FILTER),
993 },
994 { /* QPI Port 0 filter */
995 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
996 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
997 SNBEP_PCI_QPI_PORT1_FILTER),
998 },
7c94ee2e
YZ
999 { /* end: all zeroes */ }
1000};
1001
1002static struct pci_driver snbep_uncore_pci_driver = {
1003 .name = "snbep_uncore",
1004 .id_table = snbep_uncore_pci_ids,
1005};
1006
1007/*
1008 * build pci bus to socket mapping
1009 */
e850f9c3 1010static int snbep_pci2phy_map_init(int devid)
7c94ee2e
YZ
1011{
1012 struct pci_dev *ubox_dev = NULL;
1013 int i, bus, nodeid;
032c3851
YZ
1014 int err = 0;
1015 u32 config = 0;
7c94ee2e
YZ
1016
1017 while (1) {
1018 /* find the UBOX device */
e850f9c3 1019 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
7c94ee2e
YZ
1020 if (!ubox_dev)
1021 break;
1022 bus = ubox_dev->bus->number;
1023 /* get the Node ID of the local register */
032c3851
YZ
1024 err = pci_read_config_dword(ubox_dev, 0x40, &config);
1025 if (err)
1026 break;
7c94ee2e
YZ
1027 nodeid = config;
1028 /* get the Node ID mapping */
032c3851
YZ
1029 err = pci_read_config_dword(ubox_dev, 0x54, &config);
1030 if (err)
1031 break;
7c94ee2e 1032 /*
254298c7
YZ
1033 * every three bits in the Node ID mapping register maps
1034 * to a particular node.
1035 */
1036 for (i = 0; i < 8; i++) {
1037 if (nodeid == ((config >> (3 * i)) & 0x7)) {
1038 pcibus_to_physid[bus] = i;
1039 break;
1040 }
1041 }
e850f9c3 1042 }
032c3851 1043
f891d8cf
YZ
1044 if (!err) {
1045 /*
1046 * For PCI bus with no UBOX device, find the next bus
1047 * that has UBOX device and use its mapping.
1048 */
1049 i = -1;
1050 for (bus = 255; bus >= 0; bus--) {
1051 if (pcibus_to_physid[bus] >= 0)
1052 i = pcibus_to_physid[bus];
1053 else
1054 pcibus_to_physid[bus] = i;
1055 }
1056 }
1057
032c3851
YZ
1058 if (ubox_dev)
1059 pci_dev_put(ubox_dev);
1060
1061 return err ? pcibios_err_to_errno(err) : 0;
254298c7
YZ
1062}
1063/* end of Sandy Bridge-EP uncore support */
1064
e850f9c3
YZ
1065/* IvyTown uncore support */
1066static void ivt_uncore_msr_init_box(struct intel_uncore_box *box)
1067{
1068 unsigned msr = uncore_msr_box_ctl(box);
1069 if (msr)
1070 wrmsrl(msr, IVT_PMON_BOX_CTL_INT);
1071}
1072
1073static void ivt_uncore_pci_init_box(struct intel_uncore_box *box)
1074{
1075 struct pci_dev *pdev = box->pci_dev;
1076
1077 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVT_PMON_BOX_CTL_INT);
1078}
1079
1080#define IVT_UNCORE_MSR_OPS_COMMON_INIT() \
1081 .init_box = ivt_uncore_msr_init_box, \
1082 .disable_box = snbep_uncore_msr_disable_box, \
1083 .enable_box = snbep_uncore_msr_enable_box, \
1084 .disable_event = snbep_uncore_msr_disable_event, \
1085 .enable_event = snbep_uncore_msr_enable_event, \
1086 .read_counter = uncore_msr_read_counter
1087
1088static struct intel_uncore_ops ivt_uncore_msr_ops = {
1089 IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1090};
1091
1092static struct intel_uncore_ops ivt_uncore_pci_ops = {
1093 .init_box = ivt_uncore_pci_init_box,
1094 .disable_box = snbep_uncore_pci_disable_box,
1095 .enable_box = snbep_uncore_pci_enable_box,
1096 .disable_event = snbep_uncore_pci_disable_event,
1097 .enable_event = snbep_uncore_pci_enable_event,
1098 .read_counter = snbep_uncore_pci_read_counter,
1099};
1100
1101#define IVT_UNCORE_PCI_COMMON_INIT() \
1102 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1103 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1104 .event_mask = IVT_PMON_RAW_EVENT_MASK, \
1105 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1106 .ops = &ivt_uncore_pci_ops, \
1107 .format_group = &ivt_uncore_format_group
1108
1109static struct attribute *ivt_uncore_formats_attr[] = {
1110 &format_attr_event.attr,
1111 &format_attr_umask.attr,
1112 &format_attr_edge.attr,
1113 &format_attr_inv.attr,
1114 &format_attr_thresh8.attr,
1115 NULL,
1116};
1117
1118static struct attribute *ivt_uncore_ubox_formats_attr[] = {
1119 &format_attr_event.attr,
1120 &format_attr_umask.attr,
1121 &format_attr_edge.attr,
1122 &format_attr_inv.attr,
1123 &format_attr_thresh5.attr,
1124 NULL,
1125};
1126
1127static struct attribute *ivt_uncore_cbox_formats_attr[] = {
1128 &format_attr_event.attr,
1129 &format_attr_umask.attr,
1130 &format_attr_edge.attr,
1131 &format_attr_tid_en.attr,
1132 &format_attr_thresh8.attr,
1133 &format_attr_filter_tid.attr,
1134 &format_attr_filter_link.attr,
1135 &format_attr_filter_state2.attr,
1136 &format_attr_filter_nid2.attr,
1137 &format_attr_filter_opc2.attr,
1138 NULL,
1139};
1140
1141static struct attribute *ivt_uncore_pcu_formats_attr[] = {
1142 &format_attr_event_ext.attr,
1143 &format_attr_occ_sel.attr,
1144 &format_attr_edge.attr,
1145 &format_attr_thresh5.attr,
1146 &format_attr_occ_invert.attr,
1147 &format_attr_occ_edge.attr,
1148 &format_attr_filter_band0.attr,
1149 &format_attr_filter_band1.attr,
1150 &format_attr_filter_band2.attr,
1151 &format_attr_filter_band3.attr,
1152 NULL,
1153};
1154
1155static struct attribute *ivt_uncore_qpi_formats_attr[] = {
1156 &format_attr_event_ext.attr,
1157 &format_attr_umask.attr,
1158 &format_attr_edge.attr,
1159 &format_attr_thresh8.attr,
d1e8f4a8
YZ
1160 &format_attr_match_rds.attr,
1161 &format_attr_match_rnid30.attr,
1162 &format_attr_match_rnid4.attr,
1163 &format_attr_match_dnid.attr,
1164 &format_attr_match_mc.attr,
1165 &format_attr_match_opc.attr,
1166 &format_attr_match_vnw.attr,
1167 &format_attr_match0.attr,
1168 &format_attr_match1.attr,
1169 &format_attr_mask_rds.attr,
1170 &format_attr_mask_rnid30.attr,
1171 &format_attr_mask_rnid4.attr,
1172 &format_attr_mask_dnid.attr,
1173 &format_attr_mask_mc.attr,
1174 &format_attr_mask_opc.attr,
1175 &format_attr_mask_vnw.attr,
1176 &format_attr_mask0.attr,
1177 &format_attr_mask1.attr,
e850f9c3
YZ
1178 NULL,
1179};
1180
1181static struct attribute_group ivt_uncore_format_group = {
1182 .name = "format",
1183 .attrs = ivt_uncore_formats_attr,
1184};
1185
1186static struct attribute_group ivt_uncore_ubox_format_group = {
1187 .name = "format",
1188 .attrs = ivt_uncore_ubox_formats_attr,
1189};
1190
1191static struct attribute_group ivt_uncore_cbox_format_group = {
1192 .name = "format",
1193 .attrs = ivt_uncore_cbox_formats_attr,
1194};
1195
1196static struct attribute_group ivt_uncore_pcu_format_group = {
1197 .name = "format",
1198 .attrs = ivt_uncore_pcu_formats_attr,
1199};
1200
1201static struct attribute_group ivt_uncore_qpi_format_group = {
1202 .name = "format",
1203 .attrs = ivt_uncore_qpi_formats_attr,
1204};
1205
1206static struct intel_uncore_type ivt_uncore_ubox = {
1207 .name = "ubox",
1208 .num_counters = 2,
1209 .num_boxes = 1,
1210 .perf_ctr_bits = 44,
1211 .fixed_ctr_bits = 48,
1212 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1213 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1214 .event_mask = IVT_U_MSR_PMON_RAW_EVENT_MASK,
1215 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1216 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1217 .ops = &ivt_uncore_msr_ops,
1218 .format_group = &ivt_uncore_ubox_format_group,
1219};
1220
1221static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1222 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1223 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1224 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
337397f3
SE
1225 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1226 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1227 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
e850f9c3 1228 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
337397f3 1229 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
e850f9c3 1230 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
337397f3 1231 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
e850f9c3 1232 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
337397f3 1233 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
e850f9c3
YZ
1234 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1235 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1236 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1237 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1238 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1239 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1240 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1241 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1242 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1243 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1244 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1245 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1246 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1247 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1248 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1249 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1250 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1251 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1252 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1253 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1254 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1255 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1256 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1257 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1258 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1259 EVENT_EXTRA_END
1260};
1261
1262static u64 ivt_cbox_filter_mask(int fields)
1263{
1264 u64 mask = 0;
1265
1266 if (fields & 0x1)
1267 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_TID;
1268 if (fields & 0x2)
1269 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_LINK;
1270 if (fields & 0x4)
1271 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_STATE;
1272 if (fields & 0x8)
1273 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_NID;
1274 if (fields & 0x10)
1275 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_OPC;
1276
1277 return mask;
1278}
1279
1280static struct event_constraint *
1281ivt_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1282{
1283 return __snbep_cbox_get_constraint(box, event, ivt_cbox_filter_mask);
1284}
1285
1286static int ivt_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1287{
1288 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1289 struct extra_reg *er;
1290 int idx = 0;
1291
1292 for (er = ivt_uncore_cbox_extra_regs; er->msr; er++) {
1293 if (er->event != (event->hw.config & er->config_mask))
1294 continue;
1295 idx |= er->idx;
1296 }
1297
1298 if (idx) {
1299 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1300 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1301 reg1->config = event->attr.config1 & ivt_cbox_filter_mask(idx);
1302 reg1->idx = idx;
1303 }
1304 return 0;
1305}
1306
1307static void ivt_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1308{
1309 struct hw_perf_event *hwc = &event->hw;
1310 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1311
1312 if (reg1->idx != EXTRA_REG_NONE) {
1313 u64 filter = uncore_shared_reg_config(box, 0);
1314 wrmsrl(reg1->reg, filter & 0xffffffff);
1315 wrmsrl(reg1->reg + 6, filter >> 32);
1316 }
1317
1318 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1319}
1320
1321static struct intel_uncore_ops ivt_uncore_cbox_ops = {
1322 .init_box = ivt_uncore_msr_init_box,
1323 .disable_box = snbep_uncore_msr_disable_box,
1324 .enable_box = snbep_uncore_msr_enable_box,
1325 .disable_event = snbep_uncore_msr_disable_event,
1326 .enable_event = ivt_cbox_enable_event,
1327 .read_counter = uncore_msr_read_counter,
1328 .hw_config = ivt_cbox_hw_config,
1329 .get_constraint = ivt_cbox_get_constraint,
1330 .put_constraint = snbep_cbox_put_constraint,
1331};
1332
1333static struct intel_uncore_type ivt_uncore_cbox = {
1334 .name = "cbox",
1335 .num_counters = 4,
1336 .num_boxes = 15,
1337 .perf_ctr_bits = 44,
1338 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1339 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1340 .event_mask = IVT_CBO_MSR_PMON_RAW_EVENT_MASK,
1341 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1342 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1343 .num_shared_regs = 1,
1344 .constraints = snbep_uncore_cbox_constraints,
1345 .ops = &ivt_uncore_cbox_ops,
1346 .format_group = &ivt_uncore_cbox_format_group,
1347};
1348
1349static struct intel_uncore_ops ivt_uncore_pcu_ops = {
1350 IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1351 .hw_config = snbep_pcu_hw_config,
1352 .get_constraint = snbep_pcu_get_constraint,
1353 .put_constraint = snbep_pcu_put_constraint,
1354};
1355
1356static struct intel_uncore_type ivt_uncore_pcu = {
1357 .name = "pcu",
1358 .num_counters = 4,
1359 .num_boxes = 1,
1360 .perf_ctr_bits = 48,
1361 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1362 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1363 .event_mask = IVT_PCU_MSR_PMON_RAW_EVENT_MASK,
1364 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1365 .num_shared_regs = 1,
1366 .ops = &ivt_uncore_pcu_ops,
1367 .format_group = &ivt_uncore_pcu_format_group,
1368};
1369
1370static struct intel_uncore_type *ivt_msr_uncores[] = {
1371 &ivt_uncore_ubox,
1372 &ivt_uncore_cbox,
1373 &ivt_uncore_pcu,
1374 NULL,
1375};
1376
1377static struct intel_uncore_type ivt_uncore_ha = {
1378 .name = "ha",
1379 .num_counters = 4,
1380 .num_boxes = 2,
1381 .perf_ctr_bits = 48,
1382 IVT_UNCORE_PCI_COMMON_INIT(),
1383};
1384
1385static struct intel_uncore_type ivt_uncore_imc = {
1386 .name = "imc",
1387 .num_counters = 4,
1388 .num_boxes = 8,
1389 .perf_ctr_bits = 48,
1390 .fixed_ctr_bits = 48,
1391 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1392 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1393 IVT_UNCORE_PCI_COMMON_INIT(),
1394};
1395
f891d8cf
YZ
1396/* registers in IRP boxes are not properly aligned */
1397static unsigned ivt_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1398static unsigned ivt_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1399
1400static void ivt_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1401{
1402 struct pci_dev *pdev = box->pci_dev;
1403 struct hw_perf_event *hwc = &event->hw;
1404
1405 pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx],
1406 hwc->config | SNBEP_PMON_CTL_EN);
1407}
1408
1409static void ivt_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1410{
1411 struct pci_dev *pdev = box->pci_dev;
1412 struct hw_perf_event *hwc = &event->hw;
1413
1414 pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx], hwc->config);
1415}
1416
1417static u64 ivt_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1418{
1419 struct pci_dev *pdev = box->pci_dev;
1420 struct hw_perf_event *hwc = &event->hw;
1421 u64 count = 0;
1422
1423 pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1424 pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1425
1426 return count;
1427}
1428
1429static struct intel_uncore_ops ivt_uncore_irp_ops = {
1430 .init_box = ivt_uncore_pci_init_box,
1431 .disable_box = snbep_uncore_pci_disable_box,
1432 .enable_box = snbep_uncore_pci_enable_box,
1433 .disable_event = ivt_uncore_irp_disable_event,
1434 .enable_event = ivt_uncore_irp_enable_event,
1435 .read_counter = ivt_uncore_irp_read_counter,
1436};
1437
1438static struct intel_uncore_type ivt_uncore_irp = {
1439 .name = "irp",
1440 .num_counters = 4,
1441 .num_boxes = 1,
1442 .perf_ctr_bits = 48,
1443 .event_mask = IVT_PMON_RAW_EVENT_MASK,
1444 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1445 .ops = &ivt_uncore_irp_ops,
1446 .format_group = &ivt_uncore_format_group,
1447};
1448
d1e8f4a8
YZ
1449static struct intel_uncore_ops ivt_uncore_qpi_ops = {
1450 .init_box = ivt_uncore_pci_init_box,
1451 .disable_box = snbep_uncore_pci_disable_box,
1452 .enable_box = snbep_uncore_pci_enable_box,
1453 .disable_event = snbep_uncore_pci_disable_event,
1454 .enable_event = snbep_qpi_enable_event,
1455 .read_counter = snbep_uncore_pci_read_counter,
1456 .hw_config = snbep_qpi_hw_config,
1457 .get_constraint = uncore_get_constraint,
1458 .put_constraint = uncore_put_constraint,
1459};
1460
e850f9c3 1461static struct intel_uncore_type ivt_uncore_qpi = {
d1e8f4a8
YZ
1462 .name = "qpi",
1463 .num_counters = 4,
1464 .num_boxes = 3,
1465 .perf_ctr_bits = 48,
1466 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1467 .event_ctl = SNBEP_PCI_PMON_CTL0,
1468 .event_mask = IVT_QPI_PCI_PMON_RAW_EVENT_MASK,
1469 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1470 .num_shared_regs = 1,
1471 .ops = &ivt_uncore_qpi_ops,
1472 .format_group = &ivt_uncore_qpi_format_group,
e850f9c3
YZ
1473};
1474
1475static struct intel_uncore_type ivt_uncore_r2pcie = {
1476 .name = "r2pcie",
1477 .num_counters = 4,
1478 .num_boxes = 1,
1479 .perf_ctr_bits = 44,
1480 .constraints = snbep_uncore_r2pcie_constraints,
1481 IVT_UNCORE_PCI_COMMON_INIT(),
1482};
1483
1484static struct intel_uncore_type ivt_uncore_r3qpi = {
1485 .name = "r3qpi",
1486 .num_counters = 3,
1487 .num_boxes = 2,
1488 .perf_ctr_bits = 44,
1489 .constraints = snbep_uncore_r3qpi_constraints,
1490 IVT_UNCORE_PCI_COMMON_INIT(),
1491};
1492
1493enum {
1494 IVT_PCI_UNCORE_HA,
1495 IVT_PCI_UNCORE_IMC,
f891d8cf 1496 IVT_PCI_UNCORE_IRP,
e850f9c3
YZ
1497 IVT_PCI_UNCORE_QPI,
1498 IVT_PCI_UNCORE_R2PCIE,
1499 IVT_PCI_UNCORE_R3QPI,
1500};
1501
1502static struct intel_uncore_type *ivt_pci_uncores[] = {
1503 [IVT_PCI_UNCORE_HA] = &ivt_uncore_ha,
1504 [IVT_PCI_UNCORE_IMC] = &ivt_uncore_imc,
f891d8cf 1505 [IVT_PCI_UNCORE_IRP] = &ivt_uncore_irp,
e850f9c3
YZ
1506 [IVT_PCI_UNCORE_QPI] = &ivt_uncore_qpi,
1507 [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie,
1508 [IVT_PCI_UNCORE_R3QPI] = &ivt_uncore_r3qpi,
1509 NULL,
1510};
1511
1512static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = {
1513 { /* Home Agent 0 */
1514 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
899396cf 1515 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 0),
e850f9c3
YZ
1516 },
1517 { /* Home Agent 1 */
1518 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
899396cf 1519 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 1),
e850f9c3
YZ
1520 },
1521 { /* MC0 Channel 0 */
1522 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
899396cf 1523 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 0),
e850f9c3
YZ
1524 },
1525 { /* MC0 Channel 1 */
1526 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
899396cf 1527 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 1),
e850f9c3
YZ
1528 },
1529 { /* MC0 Channel 3 */
1530 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
899396cf 1531 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 2),
e850f9c3
YZ
1532 },
1533 { /* MC0 Channel 4 */
1534 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
899396cf 1535 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 3),
e850f9c3
YZ
1536 },
1537 { /* MC1 Channel 0 */
1538 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
899396cf 1539 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 4),
e850f9c3
YZ
1540 },
1541 { /* MC1 Channel 1 */
1542 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
899396cf 1543 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 5),
e850f9c3
YZ
1544 },
1545 { /* MC1 Channel 3 */
1546 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
899396cf 1547 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 6),
e850f9c3
YZ
1548 },
1549 { /* MC1 Channel 4 */
1550 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
899396cf 1551 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 7),
e850f9c3 1552 },
f891d8cf
YZ
1553 { /* IRP */
1554 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1555 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IRP, 0),
1556 },
e850f9c3
YZ
1557 { /* QPI0 Port 0 */
1558 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
899396cf 1559 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 0),
e850f9c3
YZ
1560 },
1561 { /* QPI0 Port 1 */
1562 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
899396cf 1563 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 1),
e850f9c3
YZ
1564 },
1565 { /* QPI1 Port 2 */
1566 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
899396cf 1567 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 2),
e850f9c3
YZ
1568 },
1569 { /* R2PCIe */
1570 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
899396cf 1571 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R2PCIE, 0),
e850f9c3
YZ
1572 },
1573 { /* R3QPI0 Link 0 */
1574 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
899396cf 1575 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 0),
e850f9c3
YZ
1576 },
1577 { /* R3QPI0 Link 1 */
1578 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
899396cf 1579 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 1),
e850f9c3
YZ
1580 },
1581 { /* R3QPI1 Link 2 */
1582 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
899396cf 1583 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 2),
e850f9c3 1584 },
d1e8f4a8
YZ
1585 { /* QPI Port 0 filter */
1586 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1587 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1588 SNBEP_PCI_QPI_PORT0_FILTER),
1589 },
1590 { /* QPI Port 0 filter */
1591 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1592 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1593 SNBEP_PCI_QPI_PORT1_FILTER),
1594 },
e850f9c3
YZ
1595 { /* end: all zeroes */ }
1596};
1597
1598static struct pci_driver ivt_uncore_pci_driver = {
1599 .name = "ivt_uncore",
1600 .id_table = ivt_uncore_pci_ids,
1601};
1602/* end of IvyTown uncore support */
1603
254298c7
YZ
1604/* Sandy Bridge uncore support */
1605static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1606{
1607 struct hw_perf_event *hwc = &event->hw;
1608
1609 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1610 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1611 else
1612 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
1613}
1614
1615static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1616{
1617 wrmsrl(event->hw.config_base, 0);
1618}
1619
1620static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
1621{
1622 if (box->pmu->pmu_idx == 0) {
1623 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
1624 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
1625 }
1626}
1627
35534b20
SE
1628static struct uncore_event_desc snb_uncore_events[] = {
1629 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
1630 { /* end: all zeroes */ },
1631};
1632
254298c7
YZ
1633static struct attribute *snb_uncore_formats_attr[] = {
1634 &format_attr_event.attr,
1635 &format_attr_umask.attr,
1636 &format_attr_edge.attr,
1637 &format_attr_inv.attr,
1638 &format_attr_cmask5.attr,
1639 NULL,
1640};
1641
1642static struct attribute_group snb_uncore_format_group = {
1643 .name = "format",
1644 .attrs = snb_uncore_formats_attr,
1645};
1646
1647static struct intel_uncore_ops snb_uncore_msr_ops = {
1648 .init_box = snb_uncore_msr_init_box,
1649 .disable_event = snb_uncore_msr_disable_event,
1650 .enable_event = snb_uncore_msr_enable_event,
1651 .read_counter = uncore_msr_read_counter,
1652};
1653
1654static struct event_constraint snb_uncore_cbox_constraints[] = {
1655 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
1656 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
1657 EVENT_CONSTRAINT_END
1658};
1659
1660static struct intel_uncore_type snb_uncore_cbox = {
1661 .name = "cbox",
1662 .num_counters = 2,
1663 .num_boxes = 4,
1664 .perf_ctr_bits = 44,
1665 .fixed_ctr_bits = 48,
1666 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
1667 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
1668 .fixed_ctr = SNB_UNC_FIXED_CTR,
1669 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
1670 .single_fixed = 1,
1671 .event_mask = SNB_UNC_RAW_EVENT_MASK,
1672 .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
1673 .constraints = snb_uncore_cbox_constraints,
1674 .ops = &snb_uncore_msr_ops,
1675 .format_group = &snb_uncore_format_group,
35534b20 1676 .event_descs = snb_uncore_events,
254298c7
YZ
1677};
1678
1679static struct intel_uncore_type *snb_msr_uncores[] = {
1680 &snb_uncore_cbox,
1681 NULL,
1682};
b9e1ab6d
SE
1683
1684enum {
1685 SNB_PCI_UNCORE_IMC,
1686};
1687
1688static struct uncore_event_desc snb_uncore_imc_events[] = {
1689 INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"),
e9d97688
SE
1690 INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
1691 INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
b9e1ab6d
SE
1692
1693 INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
e9d97688
SE
1694 INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
1695 INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
b9e1ab6d
SE
1696
1697 { /* end: all zeroes */ },
1698};
1699
1700#define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff
1701#define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48
1702
1703/* page size multiple covering all config regs */
1704#define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000
1705
1706#define SNB_UNCORE_PCI_IMC_DATA_READS 0x1
1707#define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050
1708#define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2
1709#define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
1710#define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
1711
1712static struct attribute *snb_uncore_imc_formats_attr[] = {
1713 &format_attr_event.attr,
1714 NULL,
1715};
1716
1717static struct attribute_group snb_uncore_imc_format_group = {
1718 .name = "format",
1719 .attrs = snb_uncore_imc_formats_attr,
1720};
1721
1722static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
1723{
1724 struct pci_dev *pdev = box->pci_dev;
4191c29f 1725 int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
b9e1ab6d 1726 resource_size_t addr;
4191c29f 1727 u32 pci_dword;
b9e1ab6d 1728
4191c29f
SE
1729 pci_read_config_dword(pdev, where, &pci_dword);
1730 addr = pci_dword;
b9e1ab6d
SE
1731
1732#ifdef CONFIG_PHYS_ADDR_T_64BIT
4191c29f
SE
1733 pci_read_config_dword(pdev, where + 4, &pci_dword);
1734 addr |= ((resource_size_t)pci_dword << 32);
b9e1ab6d
SE
1735#endif
1736
1737 addr &= ~(PAGE_SIZE - 1);
1738
1739 box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
ced2efb0 1740 box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
b9e1ab6d
SE
1741}
1742
1743static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
1744{}
1745
1746static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
1747{}
1748
1749static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1750{}
1751
1752static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1753{}
1754
1755static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1756{
1757 struct hw_perf_event *hwc = &event->hw;
1758
1759 return (u64)*(unsigned int *)(box->io_addr + hwc->event_base);
1760}
1761
1762/*
1763 * custom event_init() function because we define our own fixed, free
1764 * running counters, so we do not want to conflict with generic uncore
1765 * logic. Also simplifies processing
1766 */
1767static int snb_uncore_imc_event_init(struct perf_event *event)
1768{
1769 struct intel_uncore_pmu *pmu;
1770 struct intel_uncore_box *box;
1771 struct hw_perf_event *hwc = &event->hw;
1772 u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
1773 int idx, base;
1774
1775 if (event->attr.type != event->pmu->type)
1776 return -ENOENT;
1777
1778 pmu = uncore_event_to_pmu(event);
1779 /* no device found for this pmu */
1780 if (pmu->func_id < 0)
1781 return -ENOENT;
1782
1783 /* Sampling not supported yet */
1784 if (hwc->sample_period)
1785 return -EINVAL;
1786
1787 /* unsupported modes and filters */
1788 if (event->attr.exclude_user ||
1789 event->attr.exclude_kernel ||
1790 event->attr.exclude_hv ||
1791 event->attr.exclude_idle ||
1792 event->attr.exclude_host ||
1793 event->attr.exclude_guest ||
1794 event->attr.sample_period) /* no sampling */
1795 return -EINVAL;
1796
1797 /*
1798 * Place all uncore events for a particular physical package
1799 * onto a single cpu
1800 */
1801 if (event->cpu < 0)
1802 return -EINVAL;
1803
1804 /* check only supported bits are set */
1805 if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
1806 return -EINVAL;
1807
1808 box = uncore_pmu_to_box(pmu, event->cpu);
1809 if (!box || box->cpu < 0)
1810 return -EINVAL;
1811
1812 event->cpu = box->cpu;
1813
1814 event->hw.idx = -1;
1815 event->hw.last_tag = ~0ULL;
1816 event->hw.extra_reg.idx = EXTRA_REG_NONE;
1817 event->hw.branch_reg.idx = EXTRA_REG_NONE;
1818 /*
1819 * check event is known (whitelist, determines counter)
1820 */
1821 switch (cfg) {
1822 case SNB_UNCORE_PCI_IMC_DATA_READS:
1823 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
1824 idx = UNCORE_PMC_IDX_FIXED;
1825 break;
1826 case SNB_UNCORE_PCI_IMC_DATA_WRITES:
1827 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
1828 idx = UNCORE_PMC_IDX_FIXED + 1;
1829 break;
1830 default:
1831 return -EINVAL;
1832 }
1833
1834 /* must be done before validate_group */
1835 event->hw.event_base = base;
1836 event->hw.config = cfg;
1837 event->hw.idx = idx;
1838
1839 /* no group validation needed, we have free running counters */
1840
1841 return 0;
1842}
1843
1844static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1845{
1846 return 0;
1847}
1848
1849static void snb_uncore_imc_event_start(struct perf_event *event, int flags)
1850{
1851 struct intel_uncore_box *box = uncore_event_to_box(event);
1852 u64 count;
1853
1854 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1855 return;
1856
1857 event->hw.state = 0;
1858 box->n_active++;
1859
1860 list_add_tail(&event->active_entry, &box->active_list);
1861
1862 count = snb_uncore_imc_read_counter(box, event);
1863 local64_set(&event->hw.prev_count, count);
1864
1865 if (box->n_active == 1)
1866 uncore_pmu_start_hrtimer(box);
1867}
1868
1869static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
1870{
1871 struct intel_uncore_box *box = uncore_event_to_box(event);
1872 struct hw_perf_event *hwc = &event->hw;
1873
1874 if (!(hwc->state & PERF_HES_STOPPED)) {
1875 box->n_active--;
1876
1877 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1878 hwc->state |= PERF_HES_STOPPED;
1879
1880 list_del(&event->active_entry);
1881
1882 if (box->n_active == 0)
1883 uncore_pmu_cancel_hrtimer(box);
1884 }
1885
1886 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1887 /*
1888 * Drain the remaining delta count out of a event
1889 * that we are disabling:
1890 */
1891 uncore_perf_event_update(box, event);
1892 hwc->state |= PERF_HES_UPTODATE;
1893 }
1894}
1895
1896static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
1897{
1898 struct intel_uncore_box *box = uncore_event_to_box(event);
1899 struct hw_perf_event *hwc = &event->hw;
1900
1901 if (!box)
1902 return -ENODEV;
1903
1904 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1905 if (!(flags & PERF_EF_START))
1906 hwc->state |= PERF_HES_ARCH;
1907
1908 snb_uncore_imc_event_start(event, 0);
1909
1910 box->n_events++;
1911
1912 return 0;
1913}
1914
1915static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
1916{
1917 struct intel_uncore_box *box = uncore_event_to_box(event);
1918 int i;
1919
1920 snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
1921
1922 for (i = 0; i < box->n_events; i++) {
1923 if (event == box->event_list[i]) {
1924 --box->n_events;
1925 break;
1926 }
1927 }
1928}
1929
1930static int snb_pci2phy_map_init(int devid)
1931{
1932 struct pci_dev *dev = NULL;
1933 int bus;
1934
1935 dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
1936 if (!dev)
1937 return -ENOTTY;
1938
1939 bus = dev->bus->number;
1940
1941 pcibus_to_physid[bus] = 0;
1942
1943 pci_dev_put(dev);
1944
1945 return 0;
1946}
1947
1948static struct pmu snb_uncore_imc_pmu = {
1949 .task_ctx_nr = perf_invalid_context,
1950 .event_init = snb_uncore_imc_event_init,
1951 .add = snb_uncore_imc_event_add,
1952 .del = snb_uncore_imc_event_del,
1953 .start = snb_uncore_imc_event_start,
1954 .stop = snb_uncore_imc_event_stop,
1955 .read = uncore_pmu_event_read,
1956};
1957
1958static struct intel_uncore_ops snb_uncore_imc_ops = {
1959 .init_box = snb_uncore_imc_init_box,
1960 .enable_box = snb_uncore_imc_enable_box,
1961 .disable_box = snb_uncore_imc_disable_box,
1962 .disable_event = snb_uncore_imc_disable_event,
1963 .enable_event = snb_uncore_imc_enable_event,
1964 .hw_config = snb_uncore_imc_hw_config,
1965 .read_counter = snb_uncore_imc_read_counter,
1966};
1967
1968static struct intel_uncore_type snb_uncore_imc = {
1969 .name = "imc",
1970 .num_counters = 2,
1971 .num_boxes = 1,
1972 .fixed_ctr_bits = 32,
1973 .fixed_ctr = SNB_UNCORE_PCI_IMC_CTR_BASE,
1974 .event_descs = snb_uncore_imc_events,
1975 .format_group = &snb_uncore_imc_format_group,
1976 .perf_ctr = SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
1977 .event_mask = SNB_UNCORE_PCI_IMC_EVENT_MASK,
1978 .ops = &snb_uncore_imc_ops,
1979 .pmu = &snb_uncore_imc_pmu,
1980};
1981
1982static struct intel_uncore_type *snb_pci_uncores[] = {
1983 [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc,
1984 NULL,
1985};
1986
1987static DEFINE_PCI_DEVICE_TABLE(snb_uncore_pci_ids) = {
1988 { /* IMC */
1989 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
1990 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1991 },
81827ed8 1992 { /* end: all zeroes */ },
b9e1ab6d
SE
1993};
1994
1995static DEFINE_PCI_DEVICE_TABLE(ivb_uncore_pci_ids) = {
1996 { /* IMC */
1997 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
1998 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1999 },
81827ed8 2000 { /* end: all zeroes */ },
b9e1ab6d
SE
2001};
2002
2003static DEFINE_PCI_DEVICE_TABLE(hsw_uncore_pci_ids) = {
2004 { /* IMC */
2005 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
2006 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
2007 },
81827ed8 2008 { /* end: all zeroes */ },
b9e1ab6d
SE
2009};
2010
2011static struct pci_driver snb_uncore_pci_driver = {
2012 .name = "snb_uncore",
2013 .id_table = snb_uncore_pci_ids,
2014};
2015
2016static struct pci_driver ivb_uncore_pci_driver = {
2017 .name = "ivb_uncore",
2018 .id_table = ivb_uncore_pci_ids,
2019};
2020
2021static struct pci_driver hsw_uncore_pci_driver = {
2022 .name = "hsw_uncore",
2023 .id_table = hsw_uncore_pci_ids,
2024};
2025
254298c7
YZ
2026/* end of Sandy Bridge uncore support */
2027
2028/* Nehalem uncore support */
2029static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
2030{
2031 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
2032}
2033
2034static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
2035{
2036 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
2037}
2038
2039static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2040{
2041 struct hw_perf_event *hwc = &event->hw;
2042
2043 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
2044 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
2045 else
2046 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
2047}
2048
2049static struct attribute *nhm_uncore_formats_attr[] = {
2050 &format_attr_event.attr,
2051 &format_attr_umask.attr,
2052 &format_attr_edge.attr,
2053 &format_attr_inv.attr,
2054 &format_attr_cmask8.attr,
2055 NULL,
2056};
2057
2058static struct attribute_group nhm_uncore_format_group = {
2059 .name = "format",
2060 .attrs = nhm_uncore_formats_attr,
2061};
2062
2063static struct uncore_event_desc nhm_uncore_events[] = {
2064 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
2065 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
2066 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
2067 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
2068 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
2069 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
2070 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
2071 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
2072 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
2073 { /* end: all zeroes */ },
2074};
2075
2076static struct intel_uncore_ops nhm_uncore_msr_ops = {
2077 .disable_box = nhm_uncore_msr_disable_box,
2078 .enable_box = nhm_uncore_msr_enable_box,
2079 .disable_event = snb_uncore_msr_disable_event,
2080 .enable_event = nhm_uncore_msr_enable_event,
2081 .read_counter = uncore_msr_read_counter,
2082};
2083
2084static struct intel_uncore_type nhm_uncore = {
2085 .name = "",
2086 .num_counters = 8,
2087 .num_boxes = 1,
2088 .perf_ctr_bits = 48,
2089 .fixed_ctr_bits = 48,
2090 .event_ctl = NHM_UNC_PERFEVTSEL0,
2091 .perf_ctr = NHM_UNC_UNCORE_PMC0,
2092 .fixed_ctr = NHM_UNC_FIXED_CTR,
2093 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
2094 .event_mask = NHM_UNC_RAW_EVENT_MASK,
2095 .event_descs = nhm_uncore_events,
2096 .ops = &nhm_uncore_msr_ops,
2097 .format_group = &nhm_uncore_format_group,
2098};
2099
2100static struct intel_uncore_type *nhm_msr_uncores[] = {
2101 &nhm_uncore,
2102 NULL,
2103};
2104/* end of Nehalem uncore support */
2105
2106/* Nehalem-EX uncore support */
254298c7
YZ
2107DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
2108DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
254298c7
YZ
2109DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
2110DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
2111
2112static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
2113{
2114 wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
2115}
2116
2117static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
2118{
2119 unsigned msr = uncore_msr_box_ctl(box);
2120 u64 config;
2121
2122 if (msr) {
2123 rdmsrl(msr, config);
2124 config &= ~((1ULL << uncore_num_counters(box)) - 1);
2125 /* WBox has a fixed counter */
2126 if (uncore_msr_fixed_ctl(box))
2127 config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
2128 wrmsrl(msr, config);
2129 }
2130}
2131
2132static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
2133{
2134 unsigned msr = uncore_msr_box_ctl(box);
2135 u64 config;
2136
2137 if (msr) {
2138 rdmsrl(msr, config);
2139 config |= (1ULL << uncore_num_counters(box)) - 1;
2140 /* WBox has a fixed counter */
2141 if (uncore_msr_fixed_ctl(box))
2142 config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
2143 wrmsrl(msr, config);
2144 }
2145}
2146
2147static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
2148{
2149 wrmsrl(event->hw.config_base, 0);
2150}
2151
2152static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2153{
2154 struct hw_perf_event *hwc = &event->hw;
2155
2156 if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
2157 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
2158 else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
2159 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
2160 else
2161 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
2162}
2163
2164#define NHMEX_UNCORE_OPS_COMMON_INIT() \
2165 .init_box = nhmex_uncore_msr_init_box, \
2166 .disable_box = nhmex_uncore_msr_disable_box, \
2167 .enable_box = nhmex_uncore_msr_enable_box, \
2168 .disable_event = nhmex_uncore_msr_disable_event, \
2169 .read_counter = uncore_msr_read_counter
2170
2171static struct intel_uncore_ops nhmex_uncore_ops = {
2172 NHMEX_UNCORE_OPS_COMMON_INIT(),
2173 .enable_event = nhmex_uncore_msr_enable_event,
2174};
2175
2176static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
2177 &format_attr_event.attr,
2178 &format_attr_edge.attr,
2179 NULL,
2180};
2181
2182static struct attribute_group nhmex_uncore_ubox_format_group = {
2183 .name = "format",
2184 .attrs = nhmex_uncore_ubox_formats_attr,
2185};
2186
2187static struct intel_uncore_type nhmex_uncore_ubox = {
2188 .name = "ubox",
2189 .num_counters = 1,
2190 .num_boxes = 1,
2191 .perf_ctr_bits = 48,
2192 .event_ctl = NHMEX_U_MSR_PMON_EV_SEL,
2193 .perf_ctr = NHMEX_U_MSR_PMON_CTR,
2194 .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK,
2195 .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL,
2196 .ops = &nhmex_uncore_ops,
2197 .format_group = &nhmex_uncore_ubox_format_group
2198};
2199
2200static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
2201 &format_attr_event.attr,
2202 &format_attr_umask.attr,
2203 &format_attr_edge.attr,
2204 &format_attr_inv.attr,
2205 &format_attr_thresh8.attr,
2206 NULL,
2207};
2208
2209static struct attribute_group nhmex_uncore_cbox_format_group = {
2210 .name = "format",
2211 .attrs = nhmex_uncore_cbox_formats_attr,
2212};
2213
cb37af77
YZ
2214/* msr offset for each instance of cbox */
2215static unsigned nhmex_cbox_msr_offsets[] = {
2216 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
2217};
2218
254298c7
YZ
2219static struct intel_uncore_type nhmex_uncore_cbox = {
2220 .name = "cbox",
2221 .num_counters = 6,
cb37af77 2222 .num_boxes = 10,
254298c7
YZ
2223 .perf_ctr_bits = 48,
2224 .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0,
2225 .perf_ctr = NHMEX_C0_MSR_PMON_CTR0,
2226 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
2227 .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
cb37af77 2228 .msr_offsets = nhmex_cbox_msr_offsets,
254298c7
YZ
2229 .pair_ctr_ctl = 1,
2230 .ops = &nhmex_uncore_ops,
2231 .format_group = &nhmex_uncore_cbox_format_group
2232};
2233
2234static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
2235 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
2236 { /* end: all zeroes */ },
2237};
2238
2239static struct intel_uncore_type nhmex_uncore_wbox = {
2240 .name = "wbox",
2241 .num_counters = 4,
2242 .num_boxes = 1,
2243 .perf_ctr_bits = 48,
2244 .event_ctl = NHMEX_W_MSR_PMON_CNT0,
2245 .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0,
2246 .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR,
2247 .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL,
2248 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
2249 .box_ctl = NHMEX_W_MSR_GLOBAL_CTL,
2250 .pair_ctr_ctl = 1,
2251 .event_descs = nhmex_uncore_wbox_events,
2252 .ops = &nhmex_uncore_ops,
2253 .format_group = &nhmex_uncore_cbox_format_group
2254};
2255
2256static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2257{
2258 struct hw_perf_event *hwc = &event->hw;
2259 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2260 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2261 int ctr, ev_sel;
2262
2263 ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
2264 NHMEX_B_PMON_CTR_SHIFT;
2265 ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
2266 NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
2267
2268 /* events that do not use the match/mask registers */
2269 if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
2270 (ctr == 2 && ev_sel != 0x4) || ctr == 3)
2271 return 0;
2272
2273 if (box->pmu->pmu_idx == 0)
2274 reg1->reg = NHMEX_B0_MSR_MATCH;
2275 else
2276 reg1->reg = NHMEX_B1_MSR_MATCH;
2277 reg1->idx = 0;
2278 reg1->config = event->attr.config1;
2279 reg2->config = event->attr.config2;
2280 return 0;
2281}
2282
2283static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2284{
2285 struct hw_perf_event *hwc = &event->hw;
2286 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2287 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2288
2289 if (reg1->idx != EXTRA_REG_NONE) {
2290 wrmsrl(reg1->reg, reg1->config);
2291 wrmsrl(reg1->reg + 1, reg2->config);
2292 }
2293 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
2294 (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
2295}
2296
2297/*
2298 * The Bbox has 4 counters, but each counter monitors different events.
2299 * Use bits 6-7 in the event config to select counter.
2300 */
2301static struct event_constraint nhmex_uncore_bbox_constraints[] = {
2302 EVENT_CONSTRAINT(0 , 1, 0xc0),
2303 EVENT_CONSTRAINT(0x40, 2, 0xc0),
2304 EVENT_CONSTRAINT(0x80, 4, 0xc0),
2305 EVENT_CONSTRAINT(0xc0, 8, 0xc0),
2306 EVENT_CONSTRAINT_END,
2307};
2308
2309static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
2310 &format_attr_event5.attr,
2311 &format_attr_counter.attr,
2312 &format_attr_match.attr,
2313 &format_attr_mask.attr,
2314 NULL,
2315};
2316
2317static struct attribute_group nhmex_uncore_bbox_format_group = {
2318 .name = "format",
2319 .attrs = nhmex_uncore_bbox_formats_attr,
2320};
2321
2322static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
2323 NHMEX_UNCORE_OPS_COMMON_INIT(),
2324 .enable_event = nhmex_bbox_msr_enable_event,
2325 .hw_config = nhmex_bbox_hw_config,
2326 .get_constraint = uncore_get_constraint,
2327 .put_constraint = uncore_put_constraint,
2328};
2329
2330static struct intel_uncore_type nhmex_uncore_bbox = {
2331 .name = "bbox",
2332 .num_counters = 4,
2333 .num_boxes = 2,
2334 .perf_ctr_bits = 48,
2335 .event_ctl = NHMEX_B0_MSR_PMON_CTL0,
2336 .perf_ctr = NHMEX_B0_MSR_PMON_CTR0,
2337 .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK,
2338 .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL,
2339 .msr_offset = NHMEX_B_MSR_OFFSET,
2340 .pair_ctr_ctl = 1,
2341 .num_shared_regs = 1,
2342 .constraints = nhmex_uncore_bbox_constraints,
2343 .ops = &nhmex_uncore_bbox_ops,
2344 .format_group = &nhmex_uncore_bbox_format_group
2345};
2346
2347static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2348{
ebb6cc03
YZ
2349 struct hw_perf_event *hwc = &event->hw;
2350 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2351 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
254298c7 2352
ebb6cc03
YZ
2353 /* only TO_R_PROG_EV event uses the match/mask register */
2354 if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
2355 NHMEX_S_EVENT_TO_R_PROG_EV)
2356 return 0;
254298c7
YZ
2357
2358 if (box->pmu->pmu_idx == 0)
2359 reg1->reg = NHMEX_S0_MSR_MM_CFG;
2360 else
2361 reg1->reg = NHMEX_S1_MSR_MM_CFG;
254298c7 2362 reg1->idx = 0;
ebb6cc03
YZ
2363 reg1->config = event->attr.config1;
2364 reg2->config = event->attr.config2;
254298c7
YZ
2365 return 0;
2366}
2367
2368static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2369{
2370 struct hw_perf_event *hwc = &event->hw;
2371 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2372 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2373
ebb6cc03
YZ
2374 if (reg1->idx != EXTRA_REG_NONE) {
2375 wrmsrl(reg1->reg, 0);
254298c7
YZ
2376 wrmsrl(reg1->reg + 1, reg1->config);
2377 wrmsrl(reg1->reg + 2, reg2->config);
2378 wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
2379 }
2380 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
2381}
2382
2383static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
2384 &format_attr_event.attr,
2385 &format_attr_umask.attr,
2386 &format_attr_edge.attr,
2387 &format_attr_inv.attr,
2388 &format_attr_thresh8.attr,
254298c7
YZ
2389 &format_attr_match.attr,
2390 &format_attr_mask.attr,
2391 NULL,
2392};
2393
2394static struct attribute_group nhmex_uncore_sbox_format_group = {
2395 .name = "format",
2396 .attrs = nhmex_uncore_sbox_formats_attr,
2397};
2398
2399static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
2400 NHMEX_UNCORE_OPS_COMMON_INIT(),
2401 .enable_event = nhmex_sbox_msr_enable_event,
2402 .hw_config = nhmex_sbox_hw_config,
2403 .get_constraint = uncore_get_constraint,
2404 .put_constraint = uncore_put_constraint,
2405};
2406
2407static struct intel_uncore_type nhmex_uncore_sbox = {
2408 .name = "sbox",
2409 .num_counters = 4,
2410 .num_boxes = 2,
2411 .perf_ctr_bits = 48,
2412 .event_ctl = NHMEX_S0_MSR_PMON_CTL0,
2413 .perf_ctr = NHMEX_S0_MSR_PMON_CTR0,
2414 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
2415 .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL,
2416 .msr_offset = NHMEX_S_MSR_OFFSET,
2417 .pair_ctr_ctl = 1,
2418 .num_shared_regs = 1,
2419 .ops = &nhmex_uncore_sbox_ops,
2420 .format_group = &nhmex_uncore_sbox_format_group
2421};
2422
2423enum {
2424 EXTRA_REG_NHMEX_M_FILTER,
2425 EXTRA_REG_NHMEX_M_DSP,
2426 EXTRA_REG_NHMEX_M_ISS,
2427 EXTRA_REG_NHMEX_M_MAP,
2428 EXTRA_REG_NHMEX_M_MSC_THR,
2429 EXTRA_REG_NHMEX_M_PGT,
2430 EXTRA_REG_NHMEX_M_PLD,
2431 EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
2432};
2433
2434static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
2435 MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
2436 MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
2437 MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
2438 MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
2439 /* event 0xa uses two extra registers */
2440 MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
2441 MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
2442 MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
2443 /* events 0xd ~ 0x10 use the same extra register */
2444 MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
2445 MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
2446 MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
2447 MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
2448 MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
2449 MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
2450 MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
2451 MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
2452 MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
2453 EVENT_EXTRA_END
2454};
2455
cb37af77 2456/* Nehalem-EX or Westmere-EX ? */
46bdd905 2457static bool uncore_nhmex;
cb37af77 2458
254298c7
YZ
2459static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
2460{
2461 struct intel_uncore_extra_reg *er;
2462 unsigned long flags;
2463 bool ret = false;
2464 u64 mask;
2465
2466 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2467 er = &box->shared_regs[idx];
2468 raw_spin_lock_irqsave(&er->lock, flags);
2469 if (!atomic_read(&er->ref) || er->config == config) {
2470 atomic_inc(&er->ref);
2471 er->config = config;
2472 ret = true;
2473 }
2474 raw_spin_unlock_irqrestore(&er->lock, flags);
2475
2476 return ret;
2477 }
2478 /*
2479 * The ZDP_CTL_FVC MSR has 4 fields which are used to control
2480 * events 0xd ~ 0x10. Besides these 4 fields, there are additional
2481 * fields which are shared.
2482 */
2483 idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2484 if (WARN_ON_ONCE(idx >= 4))
2485 return false;
2486
2487 /* mask of the shared fields */
cb37af77
YZ
2488 if (uncore_nhmex)
2489 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
2490 else
2491 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
254298c7
YZ
2492 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2493
2494 raw_spin_lock_irqsave(&er->lock, flags);
2495 /* add mask of the non-shared field if it's in use */
cb37af77
YZ
2496 if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
2497 if (uncore_nhmex)
2498 mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2499 else
2500 mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2501 }
254298c7
YZ
2502
2503 if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
2504 atomic_add(1 << (idx * 8), &er->ref);
cb37af77
YZ
2505 if (uncore_nhmex)
2506 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
2507 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2508 else
2509 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
2510 WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
254298c7
YZ
2511 er->config &= ~mask;
2512 er->config |= (config & mask);
2513 ret = true;
2514 }
2515 raw_spin_unlock_irqrestore(&er->lock, flags);
2516
2517 return ret;
2518}
2519
2520static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
2521{
2522 struct intel_uncore_extra_reg *er;
2523
2524 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2525 er = &box->shared_regs[idx];
2526 atomic_dec(&er->ref);
2527 return;
2528 }
2529
2530 idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2531 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2532 atomic_sub(1 << (idx * 8), &er->ref);
2533}
2534
46bdd905 2535static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
254298c7
YZ
2536{
2537 struct hw_perf_event *hwc = &event->hw;
2538 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
13acac30 2539 u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
254298c7
YZ
2540 u64 config = reg1->config;
2541
2542 /* get the non-shared control bits and shift them */
2543 idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
cb37af77
YZ
2544 if (uncore_nhmex)
2545 config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2546 else
2547 config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
254298c7
YZ
2548 if (new_idx > orig_idx) {
2549 idx = new_idx - orig_idx;
2550 config <<= 3 * idx;
2551 } else {
2552 idx = orig_idx - new_idx;
2553 config >>= 3 * idx;
2554 }
2555
2556 /* add the shared control bits back */
cb37af77
YZ
2557 if (uncore_nhmex)
2558 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2559 else
2560 config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
254298c7
YZ
2561 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2562 if (modify) {
2563 /* adjust the main event selector */
2564 if (new_idx > orig_idx)
2565 hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
2566 else
2567 hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
2568 reg1->config = config;
2569 reg1->idx = ~0xff | new_idx;
2570 }
2571 return config;
2572}
2573
2574static struct event_constraint *
2575nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2576{
2577 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2578 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2579 int i, idx[2], alloc = 0;
2580 u64 config1 = reg1->config;
2581
2582 idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
2583 idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
2584again:
2585 for (i = 0; i < 2; i++) {
2586 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
2587 idx[i] = 0xff;
2588
2589 if (idx[i] == 0xff)
2590 continue;
2591
2592 if (!nhmex_mbox_get_shared_reg(box, idx[i],
2593 __BITS_VALUE(config1, i, 32)))
2594 goto fail;
2595 alloc |= (0x1 << i);
2596 }
2597
2598 /* for the match/mask registers */
ebb6cc03
YZ
2599 if (reg2->idx != EXTRA_REG_NONE &&
2600 (uncore_box_is_fake(box) || !reg2->alloc) &&
254298c7
YZ
2601 !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
2602 goto fail;
2603
2604 /*
2605 * If it's a fake box -- as per validate_{group,event}() we
2606 * shouldn't touch event state and we can avoid doing so
2607 * since both will only call get_event_constraints() once
2608 * on each event, this avoids the need for reg->alloc.
2609 */
2610 if (!uncore_box_is_fake(box)) {
2611 if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
2612 nhmex_mbox_alter_er(event, idx[0], true);
2613 reg1->alloc |= alloc;
ebb6cc03
YZ
2614 if (reg2->idx != EXTRA_REG_NONE)
2615 reg2->alloc = 1;
254298c7
YZ
2616 }
2617 return NULL;
2618fail:
2619 if (idx[0] != 0xff && !(alloc & 0x1) &&
2620 idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2621 /*
2622 * events 0xd ~ 0x10 are functional identical, but are
2623 * controlled by different fields in the ZDP_CTL_FVC
2624 * register. If we failed to take one field, try the
2625 * rest 3 choices.
7c94ee2e 2626 */
254298c7
YZ
2627 BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
2628 idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2629 idx[0] = (idx[0] + 1) % 4;
2630 idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2631 if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
2632 config1 = nhmex_mbox_alter_er(event, idx[0], false);
2633 goto again;
7c94ee2e 2634 }
254298c7 2635 }
7c94ee2e 2636
254298c7
YZ
2637 if (alloc & 0x1)
2638 nhmex_mbox_put_shared_reg(box, idx[0]);
2639 if (alloc & 0x2)
2640 nhmex_mbox_put_shared_reg(box, idx[1]);
2641 return &constraint_empty;
2642}
fcde10e9 2643
254298c7 2644static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 2645{
254298c7
YZ
2646 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2647 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
fcde10e9 2648
254298c7
YZ
2649 if (uncore_box_is_fake(box))
2650 return;
2651
2652 if (reg1->alloc & 0x1)
2653 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
2654 if (reg1->alloc & 0x2)
2655 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
2656 reg1->alloc = 0;
2657
2658 if (reg2->alloc) {
2659 nhmex_mbox_put_shared_reg(box, reg2->idx);
2660 reg2->alloc = 0;
2661 }
fcde10e9
YZ
2662}
2663
254298c7 2664static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
fcde10e9 2665{
254298c7
YZ
2666 if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
2667 return er->idx;
2668 return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
fcde10e9
YZ
2669}
2670
254298c7 2671static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 2672{
254298c7
YZ
2673 struct intel_uncore_type *type = box->pmu->type;
2674 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2675 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2676 struct extra_reg *er;
2677 unsigned msr;
2678 int reg_idx = 0;
254298c7
YZ
2679 /*
2680 * The mbox events may require 2 extra MSRs at the most. But only
2681 * the lower 32 bits in these MSRs are significant, so we can use
2682 * config1 to pass two MSRs' config.
2683 */
2684 for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
2685 if (er->event != (event->hw.config & er->config_mask))
2686 continue;
2687 if (event->attr.config1 & ~er->valid_mask)
2688 return -EINVAL;
254298c7
YZ
2689
2690 msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
2691 if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
2692 return -EINVAL;
2693
2694 /* always use the 32~63 bits to pass the PLD config */
2695 if (er->idx == EXTRA_REG_NHMEX_M_PLD)
2696 reg_idx = 1;
ebb6cc03
YZ
2697 else if (WARN_ON_ONCE(reg_idx > 0))
2698 return -EINVAL;
254298c7
YZ
2699
2700 reg1->idx &= ~(0xff << (reg_idx * 8));
2701 reg1->reg &= ~(0xffff << (reg_idx * 16));
2702 reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
2703 reg1->reg |= msr << (reg_idx * 16);
2704 reg1->config = event->attr.config1;
2705 reg_idx++;
2706 }
ebb6cc03
YZ
2707 /*
2708 * The mbox only provides ability to perform address matching
2709 * for the PLD events.
2710 */
2711 if (reg_idx == 2) {
2712 reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
2713 if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
2714 reg2->config = event->attr.config2;
2715 else
2716 reg2->config = ~0ULL;
2717 if (box->pmu->pmu_idx == 0)
2718 reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
2719 else
2720 reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
2721 }
254298c7 2722 return 0;
fcde10e9
YZ
2723}
2724
254298c7 2725static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
fcde10e9 2726{
254298c7
YZ
2727 struct intel_uncore_extra_reg *er;
2728 unsigned long flags;
2729 u64 config;
2730
2731 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
2732 return box->shared_regs[idx].config;
2733
2734 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2735 raw_spin_lock_irqsave(&er->lock, flags);
2736 config = er->config;
2737 raw_spin_unlock_irqrestore(&er->lock, flags);
2738 return config;
2739}
2740
2741static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2742{
2743 struct hw_perf_event *hwc = &event->hw;
2744 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2745 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2746 int idx;
2747
2748 idx = __BITS_VALUE(reg1->idx, 0, 8);
2749 if (idx != 0xff)
2750 wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
2751 nhmex_mbox_shared_reg_config(box, idx));
2752 idx = __BITS_VALUE(reg1->idx, 1, 8);
2753 if (idx != 0xff)
2754 wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
2755 nhmex_mbox_shared_reg_config(box, idx));
2756
ebb6cc03
YZ
2757 if (reg2->idx != EXTRA_REG_NONE) {
2758 wrmsrl(reg2->reg, 0);
2759 if (reg2->config != ~0ULL) {
2760 wrmsrl(reg2->reg + 1,
2761 reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
2762 wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
2763 (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
2764 wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
2765 }
fcde10e9 2766 }
254298c7
YZ
2767
2768 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
fcde10e9
YZ
2769}
2770
ebb6cc03
YZ
2771DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3");
2772DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5");
2773DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6");
2774DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7");
2775DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13");
2776DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21");
2777DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63");
2778DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33");
2779DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61");
2780DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31");
2781DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31");
2782DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31");
2783DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
2784DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31");
2785DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31");
2786DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63");
254298c7
YZ
2787
2788static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
2789 &format_attr_count_mode.attr,
2790 &format_attr_storage_mode.attr,
2791 &format_attr_wrap_mode.attr,
2792 &format_attr_flag_mode.attr,
2793 &format_attr_inc_sel.attr,
2794 &format_attr_set_flag_sel.attr,
ebb6cc03 2795 &format_attr_filter_cfg_en.attr,
254298c7
YZ
2796 &format_attr_filter_match.attr,
2797 &format_attr_filter_mask.attr,
2798 &format_attr_dsp.attr,
2799 &format_attr_thr.attr,
2800 &format_attr_fvc.attr,
2801 &format_attr_pgt.attr,
2802 &format_attr_map.attr,
2803 &format_attr_iss.attr,
2804 &format_attr_pld.attr,
fcde10e9
YZ
2805 NULL,
2806};
2807
254298c7
YZ
2808static struct attribute_group nhmex_uncore_mbox_format_group = {
2809 .name = "format",
2810 .attrs = nhmex_uncore_mbox_formats_attr,
fcde10e9
YZ
2811};
2812
254298c7
YZ
2813static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
2814 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
2815 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
2816 { /* end: all zeroes */ },
fcde10e9
YZ
2817};
2818
cb37af77
YZ
2819static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
2820 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
2821 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
2822 { /* end: all zeroes */ },
2823};
2824
254298c7
YZ
2825static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
2826 NHMEX_UNCORE_OPS_COMMON_INIT(),
2827 .enable_event = nhmex_mbox_msr_enable_event,
2828 .hw_config = nhmex_mbox_hw_config,
2829 .get_constraint = nhmex_mbox_get_constraint,
2830 .put_constraint = nhmex_mbox_put_constraint,
fcde10e9
YZ
2831};
2832
254298c7
YZ
2833static struct intel_uncore_type nhmex_uncore_mbox = {
2834 .name = "mbox",
2835 .num_counters = 6,
2836 .num_boxes = 2,
2837 .perf_ctr_bits = 48,
2838 .event_ctl = NHMEX_M0_MSR_PMU_CTL0,
2839 .perf_ctr = NHMEX_M0_MSR_PMU_CNT0,
2840 .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK,
2841 .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL,
2842 .msr_offset = NHMEX_M_MSR_OFFSET,
2843 .pair_ctr_ctl = 1,
2844 .num_shared_regs = 8,
2845 .event_descs = nhmex_uncore_mbox_events,
2846 .ops = &nhmex_uncore_mbox_ops,
2847 .format_group = &nhmex_uncore_mbox_format_group,
fcde10e9
YZ
2848};
2849
46bdd905 2850static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
254298c7
YZ
2851{
2852 struct hw_perf_event *hwc = &event->hw;
2853 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
fcde10e9 2854
ebb6cc03 2855 /* adjust the main event selector and extra register index */
254298c7
YZ
2856 if (reg1->idx % 2) {
2857 reg1->idx--;
2858 hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2859 } else {
2860 reg1->idx++;
2861 hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2862 }
2863
ebb6cc03 2864 /* adjust extra register config */
254298c7 2865 switch (reg1->idx % 6) {
254298c7 2866 case 2:
ebb6cc03 2867 /* shift the 8~15 bits to the 0~7 bits */
254298c7
YZ
2868 reg1->config >>= 8;
2869 break;
2870 case 3:
ebb6cc03 2871 /* shift the 0~7 bits to the 8~15 bits */
254298c7
YZ
2872 reg1->config <<= 8;
2873 break;
254298c7
YZ
2874 };
2875}
2876
2877/*
2878 * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
2879 * An event set consists of 6 events, the 3rd and 4th events in
2880 * an event set use the same extra register. So an event set uses
2881 * 5 extra registers.
2882 */
2883static struct event_constraint *
2884nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 2885{
254298c7
YZ
2886 struct hw_perf_event *hwc = &event->hw;
2887 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2888 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2889 struct intel_uncore_extra_reg *er;
2890 unsigned long flags;
2891 int idx, er_idx;
2892 u64 config1;
2893 bool ok = false;
2894
2895 if (!uncore_box_is_fake(box) && reg1->alloc)
2896 return NULL;
2897
2898 idx = reg1->idx % 6;
2899 config1 = reg1->config;
2900again:
2901 er_idx = idx;
2902 /* the 3rd and 4th events use the same extra register */
2903 if (er_idx > 2)
2904 er_idx--;
2905 er_idx += (reg1->idx / 6) * 5;
2906
2907 er = &box->shared_regs[er_idx];
2908 raw_spin_lock_irqsave(&er->lock, flags);
2909 if (idx < 2) {
2910 if (!atomic_read(&er->ref) || er->config == reg1->config) {
2911 atomic_inc(&er->ref);
2912 er->config = reg1->config;
2913 ok = true;
2914 }
2915 } else if (idx == 2 || idx == 3) {
2916 /*
2917 * these two events use different fields in a extra register,
2918 * the 0~7 bits and the 8~15 bits respectively.
2919 */
2920 u64 mask = 0xff << ((idx - 2) * 8);
2921 if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
2922 !((er->config ^ config1) & mask)) {
2923 atomic_add(1 << ((idx - 2) * 8), &er->ref);
2924 er->config &= ~mask;
2925 er->config |= config1 & mask;
2926 ok = true;
2927 }
2928 } else {
2929 if (!atomic_read(&er->ref) ||
2930 (er->config == (hwc->config >> 32) &&
2931 er->config1 == reg1->config &&
2932 er->config2 == reg2->config)) {
2933 atomic_inc(&er->ref);
2934 er->config = (hwc->config >> 32);
2935 er->config1 = reg1->config;
2936 er->config2 = reg2->config;
2937 ok = true;
2938 }
2939 }
2940 raw_spin_unlock_irqrestore(&er->lock, flags);
2941
2942 if (!ok) {
2943 /*
2944 * The Rbox events are always in pairs. The paired
2945 * events are functional identical, but use different
2946 * extra registers. If we failed to take an extra
2947 * register, try the alternative.
2948 */
2949 if (idx % 2)
2950 idx--;
2951 else
2952 idx++;
2953 if (idx != reg1->idx % 6) {
2954 if (idx == 2)
2955 config1 >>= 8;
2956 else if (idx == 3)
2957 config1 <<= 8;
2958 goto again;
2959 }
2960 } else {
2961 if (!uncore_box_is_fake(box)) {
2962 if (idx != reg1->idx % 6)
2963 nhmex_rbox_alter_er(box, event);
2964 reg1->alloc = 1;
2965 }
2966 return NULL;
2967 }
2968 return &constraint_empty;
fcde10e9
YZ
2969}
2970
254298c7 2971static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 2972{
254298c7
YZ
2973 struct intel_uncore_extra_reg *er;
2974 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2975 int idx, er_idx;
2976
2977 if (uncore_box_is_fake(box) || !reg1->alloc)
2978 return;
2979
2980 idx = reg1->idx % 6;
2981 er_idx = idx;
2982 if (er_idx > 2)
2983 er_idx--;
2984 er_idx += (reg1->idx / 6) * 5;
2985
2986 er = &box->shared_regs[er_idx];
2987 if (idx == 2 || idx == 3)
2988 atomic_sub(1 << ((idx - 2) * 8), &er->ref);
2989 else
2990 atomic_dec(&er->ref);
2991
2992 reg1->alloc = 0;
fcde10e9
YZ
2993}
2994
254298c7 2995static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9
YZ
2996{
2997 struct hw_perf_event *hwc = &event->hw;
254298c7
YZ
2998 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2999 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
ebb6cc03 3000 int idx;
fcde10e9 3001
254298c7
YZ
3002 idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
3003 NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
3004 if (idx >= 0x18)
3005 return -EINVAL;
3006
3007 reg1->idx = idx;
3008 reg1->config = event->attr.config1;
3009
ebb6cc03 3010 switch (idx % 6) {
254298c7
YZ
3011 case 4:
3012 case 5:
254298c7 3013 hwc->config |= event->attr.config & (~0ULL << 32);
ebb6cc03 3014 reg2->config = event->attr.config2;
254298c7
YZ
3015 break;
3016 };
3017 return 0;
fcde10e9
YZ
3018}
3019
254298c7
YZ
3020static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
3021{
3022 struct hw_perf_event *hwc = &event->hw;
3023 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
3024 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
ebb6cc03 3025 int idx, port;
254298c7 3026
ebb6cc03
YZ
3027 idx = reg1->idx;
3028 port = idx / 6 + box->pmu->pmu_idx * 4;
254298c7 3029
ebb6cc03 3030 switch (idx % 6) {
254298c7 3031 case 0:
ebb6cc03
YZ
3032 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
3033 break;
254298c7 3034 case 1:
ebb6cc03 3035 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
254298c7
YZ
3036 break;
3037 case 2:
3038 case 3:
ebb6cc03 3039 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
46bdd905 3040 uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
254298c7
YZ
3041 break;
3042 case 4:
ebb6cc03
YZ
3043 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
3044 hwc->config >> 32);
3045 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
3046 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
3047 break;
254298c7 3048 case 5:
ebb6cc03
YZ
3049 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
3050 hwc->config >> 32);
3051 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
3052 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
254298c7
YZ
3053 break;
3054 };
3055
3056 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
3057 (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
3058}
3059
ebb6cc03
YZ
3060DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
3061DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
254298c7
YZ
3062DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
3063DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
3064DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
3065
3066static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
3067 &format_attr_event5.attr,
3068 &format_attr_xbr_mm_cfg.attr,
3069 &format_attr_xbr_match.attr,
3070 &format_attr_xbr_mask.attr,
3071 &format_attr_qlx_cfg.attr,
3072 &format_attr_iperf_cfg.attr,
fcde10e9
YZ
3073 NULL,
3074};
3075
254298c7 3076static struct attribute_group nhmex_uncore_rbox_format_group = {
fcde10e9 3077 .name = "format",
254298c7 3078 .attrs = nhmex_uncore_rbox_formats_attr,
fcde10e9
YZ
3079};
3080
254298c7
YZ
3081static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
3082 INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"),
3083 INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"),
3084 INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"),
3085 INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"),
3086 INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"),
3087 INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"),
fcde10e9
YZ
3088 { /* end: all zeroes */ },
3089};
3090
254298c7
YZ
3091static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
3092 NHMEX_UNCORE_OPS_COMMON_INIT(),
3093 .enable_event = nhmex_rbox_msr_enable_event,
3094 .hw_config = nhmex_rbox_hw_config,
3095 .get_constraint = nhmex_rbox_get_constraint,
3096 .put_constraint = nhmex_rbox_put_constraint,
fcde10e9
YZ
3097};
3098
254298c7
YZ
3099static struct intel_uncore_type nhmex_uncore_rbox = {
3100 .name = "rbox",
3101 .num_counters = 8,
3102 .num_boxes = 2,
3103 .perf_ctr_bits = 48,
3104 .event_ctl = NHMEX_R_MSR_PMON_CTL0,
3105 .perf_ctr = NHMEX_R_MSR_PMON_CNT0,
3106 .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK,
3107 .box_ctl = NHMEX_R_MSR_GLOBAL_CTL,
3108 .msr_offset = NHMEX_R_MSR_OFFSET,
3109 .pair_ctr_ctl = 1,
3110 .num_shared_regs = 20,
3111 .event_descs = nhmex_uncore_rbox_events,
3112 .ops = &nhmex_uncore_rbox_ops,
3113 .format_group = &nhmex_uncore_rbox_format_group
fcde10e9
YZ
3114};
3115
254298c7
YZ
3116static struct intel_uncore_type *nhmex_msr_uncores[] = {
3117 &nhmex_uncore_ubox,
3118 &nhmex_uncore_cbox,
3119 &nhmex_uncore_bbox,
3120 &nhmex_uncore_sbox,
3121 &nhmex_uncore_mbox,
3122 &nhmex_uncore_rbox,
3123 &nhmex_uncore_wbox,
fcde10e9
YZ
3124 NULL,
3125};
254298c7 3126/* end of Nehalem-EX uncore support */
fcde10e9 3127
254298c7 3128static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
087bfbb0
YZ
3129{
3130 struct hw_perf_event *hwc = &event->hw;
3131
3132 hwc->idx = idx;
3133 hwc->last_tag = ++box->tags[idx];
3134
3135 if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
14371cce
YZ
3136 hwc->event_base = uncore_fixed_ctr(box);
3137 hwc->config_base = uncore_fixed_ctl(box);
087bfbb0
YZ
3138 return;
3139 }
3140
14371cce
YZ
3141 hwc->config_base = uncore_event_ctl(box, hwc->idx);
3142 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
087bfbb0
YZ
3143}
3144
254298c7 3145static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
087bfbb0
YZ
3146{
3147 u64 prev_count, new_count, delta;
3148 int shift;
3149
3150 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
3151 shift = 64 - uncore_fixed_ctr_bits(box);
3152 else
3153 shift = 64 - uncore_perf_ctr_bits(box);
3154
3155 /* the hrtimer might modify the previous event value */
3156again:
3157 prev_count = local64_read(&event->hw.prev_count);
3158 new_count = uncore_read_counter(box, event);
3159 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
3160 goto again;
3161
3162 delta = (new_count << shift) - (prev_count << shift);
3163 delta >>= shift;
3164
3165 local64_add(delta, &event->count);
3166}
3167
3168/*
3169 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
3170 * for SandyBridge. So we use hrtimer to periodically poll the counter
3171 * to avoid overflow.
3172 */
3173static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
3174{
3175 struct intel_uncore_box *box;
ced2efb0 3176 struct perf_event *event;
087bfbb0
YZ
3177 unsigned long flags;
3178 int bit;
3179
3180 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
3181 if (!box->n_active || box->cpu != smp_processor_id())
3182 return HRTIMER_NORESTART;
3183 /*
3184 * disable local interrupt to prevent uncore_pmu_event_start/stop
3185 * to interrupt the update process
3186 */
3187 local_irq_save(flags);
3188
ced2efb0
SE
3189 /*
3190 * handle boxes with an active event list as opposed to active
3191 * counters
3192 */
3193 list_for_each_entry(event, &box->active_list, active_entry) {
3194 uncore_perf_event_update(box, event);
3195 }
3196
087bfbb0
YZ
3197 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
3198 uncore_perf_event_update(box, box->events[bit]);
3199
3200 local_irq_restore(flags);
3201
79859cce 3202 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
087bfbb0
YZ
3203 return HRTIMER_RESTART;
3204}
3205
3206static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
3207{
3208 __hrtimer_start_range_ns(&box->hrtimer,
79859cce 3209 ns_to_ktime(box->hrtimer_duration), 0,
087bfbb0
YZ
3210 HRTIMER_MODE_REL_PINNED, 0);
3211}
3212
3213static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
3214{
3215 hrtimer_cancel(&box->hrtimer);
3216}
3217
3218static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
3219{
3220 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3221 box->hrtimer.function = uncore_pmu_hrtimer;
3222}
3223
73c4427c 3224static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
087bfbb0
YZ
3225{
3226 struct intel_uncore_box *box;
6a67943a 3227 int i, size;
087bfbb0 3228
254298c7 3229 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
6a67943a 3230
73c4427c 3231 box = kzalloc_node(size, GFP_KERNEL, node);
087bfbb0
YZ
3232 if (!box)
3233 return NULL;
3234
6a67943a
YZ
3235 for (i = 0; i < type->num_shared_regs; i++)
3236 raw_spin_lock_init(&box->shared_regs[i].lock);
3237
087bfbb0
YZ
3238 uncore_pmu_init_hrtimer(box);
3239 atomic_set(&box->refcnt, 1);
3240 box->cpu = -1;
3241 box->phys_id = -1;
3242
79859cce
SE
3243 /* set default hrtimer timeout */
3244 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
087bfbb0 3245
ced2efb0 3246 INIT_LIST_HEAD(&box->active_list);
14371cce 3247
087bfbb0 3248 return box;
087bfbb0
YZ
3249}
3250
254298c7
YZ
3251static int
3252uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
087bfbb0
YZ
3253{
3254 struct perf_event *event;
3255 int n, max_count;
3256
3257 max_count = box->pmu->type->num_counters;
3258 if (box->pmu->type->fixed_ctl)
3259 max_count++;
3260
3261 if (box->n_events >= max_count)
3262 return -EINVAL;
3263
3264 n = box->n_events;
3265 box->event_list[n] = leader;
3266 n++;
3267 if (!dogrp)
3268 return n;
3269
3270 list_for_each_entry(event, &leader->sibling_list, group_entry) {
3271 if (event->state <= PERF_EVENT_STATE_OFF)
3272 continue;
3273
3274 if (n >= max_count)
3275 return -EINVAL;
3276
3277 box->event_list[n] = event;
3278 n++;
3279 }
3280 return n;
3281}
3282
3283static struct event_constraint *
254298c7 3284uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
087bfbb0 3285{
6a67943a 3286 struct intel_uncore_type *type = box->pmu->type;
087bfbb0
YZ
3287 struct event_constraint *c;
3288
6a67943a
YZ
3289 if (type->ops->get_constraint) {
3290 c = type->ops->get_constraint(box, event);
3291 if (c)
3292 return c;
3293 }
3294
dbc33f70 3295 if (event->attr.config == UNCORE_FIXED_EVENT)
087bfbb0
YZ
3296 return &constraint_fixed;
3297
3298 if (type->constraints) {
3299 for_each_event_constraint(c, type->constraints) {
3300 if ((event->hw.config & c->cmask) == c->code)
3301 return c;
3302 }
3303 }
3304
3305 return &type->unconstrainted;
3306}
3307
254298c7 3308static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
6a67943a
YZ
3309{
3310 if (box->pmu->type->ops->put_constraint)
3311 box->pmu->type->ops->put_constraint(box, event);
3312}
3313
254298c7 3314static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
087bfbb0
YZ
3315{
3316 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
43b45780 3317 struct event_constraint *c;
6a67943a 3318 int i, wmin, wmax, ret = 0;
087bfbb0
YZ
3319 struct hw_perf_event *hwc;
3320
3321 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
3322
3323 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
43b45780 3324 hwc = &box->event_list[i]->hw;
6a67943a 3325 c = uncore_get_event_constraint(box, box->event_list[i]);
43b45780 3326 hwc->constraint = c;
087bfbb0
YZ
3327 wmin = min(wmin, c->weight);
3328 wmax = max(wmax, c->weight);
3329 }
3330
3331 /* fastpath, try to reuse previous register */
3332 for (i = 0; i < n; i++) {
3333 hwc = &box->event_list[i]->hw;
43b45780 3334 c = hwc->constraint;
087bfbb0
YZ
3335
3336 /* never assigned */
3337 if (hwc->idx == -1)
3338 break;
3339
3340 /* constraint still honored */
3341 if (!test_bit(hwc->idx, c->idxmsk))
3342 break;
3343
3344 /* not already used */
3345 if (test_bit(hwc->idx, used_mask))
3346 break;
3347
3348 __set_bit(hwc->idx, used_mask);
6a67943a
YZ
3349 if (assign)
3350 assign[i] = hwc->idx;
087bfbb0 3351 }
087bfbb0 3352 /* slow path */
6a67943a 3353 if (i != n)
43b45780
AH
3354 ret = perf_assign_events(box->event_list, n,
3355 wmin, wmax, assign);
6a67943a
YZ
3356
3357 if (!assign || ret) {
3358 for (i = 0; i < n; i++)
3359 uncore_put_event_constraint(box, box->event_list[i]);
3360 }
087bfbb0
YZ
3361 return ret ? -EINVAL : 0;
3362}
3363
3364static void uncore_pmu_event_start(struct perf_event *event, int flags)
3365{
3366 struct intel_uncore_box *box = uncore_event_to_box(event);
3367 int idx = event->hw.idx;
3368
3369 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
3370 return;
3371
3372 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
3373 return;
3374
3375 event->hw.state = 0;
3376 box->events[idx] = event;
3377 box->n_active++;
3378 __set_bit(idx, box->active_mask);
3379
3380 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
3381 uncore_enable_event(box, event);
3382
3383 if (box->n_active == 1) {
3384 uncore_enable_box(box);
3385 uncore_pmu_start_hrtimer(box);
3386 }
3387}
3388
3389static void uncore_pmu_event_stop(struct perf_event *event, int flags)
3390{
3391 struct intel_uncore_box *box = uncore_event_to_box(event);
3392 struct hw_perf_event *hwc = &event->hw;
3393
3394 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
3395 uncore_disable_event(box, event);
3396 box->n_active--;
3397 box->events[hwc->idx] = NULL;
3398 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
3399 hwc->state |= PERF_HES_STOPPED;
3400
3401 if (box->n_active == 0) {
3402 uncore_disable_box(box);
3403 uncore_pmu_cancel_hrtimer(box);
3404 }
3405 }
3406
3407 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
3408 /*
3409 * Drain the remaining delta count out of a event
3410 * that we are disabling:
3411 */
3412 uncore_perf_event_update(box, event);
3413 hwc->state |= PERF_HES_UPTODATE;
3414 }
3415}
3416
3417static int uncore_pmu_event_add(struct perf_event *event, int flags)
3418{
3419 struct intel_uncore_box *box = uncore_event_to_box(event);
3420 struct hw_perf_event *hwc = &event->hw;
3421 int assign[UNCORE_PMC_IDX_MAX];
3422 int i, n, ret;
3423
3424 if (!box)
3425 return -ENODEV;
3426
3427 ret = n = uncore_collect_events(box, event, false);
3428 if (ret < 0)
3429 return ret;
3430
3431 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
3432 if (!(flags & PERF_EF_START))
3433 hwc->state |= PERF_HES_ARCH;
3434
3435 ret = uncore_assign_events(box, assign, n);
3436 if (ret)
3437 return ret;
3438
3439 /* save events moving to new counters */
3440 for (i = 0; i < box->n_events; i++) {
3441 event = box->event_list[i];
3442 hwc = &event->hw;
3443
3444 if (hwc->idx == assign[i] &&
3445 hwc->last_tag == box->tags[assign[i]])
3446 continue;
3447 /*
3448 * Ensure we don't accidentally enable a stopped
3449 * counter simply because we rescheduled.
3450 */
3451 if (hwc->state & PERF_HES_STOPPED)
3452 hwc->state |= PERF_HES_ARCH;
3453
3454 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
3455 }
3456
3457 /* reprogram moved events into new counters */
3458 for (i = 0; i < n; i++) {
3459 event = box->event_list[i];
3460 hwc = &event->hw;
3461
3462 if (hwc->idx != assign[i] ||
3463 hwc->last_tag != box->tags[assign[i]])
3464 uncore_assign_hw_event(box, event, assign[i]);
3465 else if (i < box->n_events)
3466 continue;
3467
3468 if (hwc->state & PERF_HES_ARCH)
3469 continue;
3470
3471 uncore_pmu_event_start(event, 0);
3472 }
3473 box->n_events = n;
3474
3475 return 0;
3476}
3477
3478static void uncore_pmu_event_del(struct perf_event *event, int flags)
3479{
3480 struct intel_uncore_box *box = uncore_event_to_box(event);
3481 int i;
3482
3483 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
3484
3485 for (i = 0; i < box->n_events; i++) {
3486 if (event == box->event_list[i]) {
6a67943a
YZ
3487 uncore_put_event_constraint(box, event);
3488
087bfbb0
YZ
3489 while (++i < box->n_events)
3490 box->event_list[i - 1] = box->event_list[i];
3491
3492 --box->n_events;
3493 break;
3494 }
3495 }
3496
3497 event->hw.idx = -1;
3498 event->hw.last_tag = ~0ULL;
3499}
3500
3501static void uncore_pmu_event_read(struct perf_event *event)
3502{
3503 struct intel_uncore_box *box = uncore_event_to_box(event);
3504 uncore_perf_event_update(box, event);
3505}
3506
3507/*
3508 * validation ensures the group can be loaded onto the
3509 * PMU if it was the only group available.
3510 */
3511static int uncore_validate_group(struct intel_uncore_pmu *pmu,
3512 struct perf_event *event)
3513{
3514 struct perf_event *leader = event->group_leader;
3515 struct intel_uncore_box *fake_box;
087bfbb0
YZ
3516 int ret = -EINVAL, n;
3517
73c4427c 3518 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
087bfbb0
YZ
3519 if (!fake_box)
3520 return -ENOMEM;
3521
3522 fake_box->pmu = pmu;
3523 /*
3524 * the event is not yet connected with its
3525 * siblings therefore we must first collect
3526 * existing siblings, then add the new event
3527 * before we can simulate the scheduling
3528 */
3529 n = uncore_collect_events(fake_box, leader, true);
3530 if (n < 0)
3531 goto out;
3532
3533 fake_box->n_events = n;
3534 n = uncore_collect_events(fake_box, event, false);
3535 if (n < 0)
3536 goto out;
3537
3538 fake_box->n_events = n;
3539
6a67943a 3540 ret = uncore_assign_events(fake_box, NULL, n);
087bfbb0
YZ
3541out:
3542 kfree(fake_box);
3543 return ret;
3544}
3545
46bdd905 3546static int uncore_pmu_event_init(struct perf_event *event)
087bfbb0
YZ
3547{
3548 struct intel_uncore_pmu *pmu;
3549 struct intel_uncore_box *box;
3550 struct hw_perf_event *hwc = &event->hw;
3551 int ret;
3552
3553 if (event->attr.type != event->pmu->type)
3554 return -ENOENT;
3555
3556 pmu = uncore_event_to_pmu(event);
3557 /* no device found for this pmu */
3558 if (pmu->func_id < 0)
3559 return -ENOENT;
3560
3561 /*
3562 * Uncore PMU does measure at all privilege level all the time.
3563 * So it doesn't make sense to specify any exclude bits.
3564 */
3565 if (event->attr.exclude_user || event->attr.exclude_kernel ||
3566 event->attr.exclude_hv || event->attr.exclude_idle)
3567 return -EINVAL;
3568
3569 /* Sampling not supported yet */
3570 if (hwc->sample_period)
3571 return -EINVAL;
3572
3573 /*
3574 * Place all uncore events for a particular physical package
3575 * onto a single cpu
3576 */
3577 if (event->cpu < 0)
3578 return -EINVAL;
3579 box = uncore_pmu_to_box(pmu, event->cpu);
3580 if (!box || box->cpu < 0)
3581 return -EINVAL;
3582 event->cpu = box->cpu;
3583
6a67943a
YZ
3584 event->hw.idx = -1;
3585 event->hw.last_tag = ~0ULL;
3586 event->hw.extra_reg.idx = EXTRA_REG_NONE;
ebb6cc03 3587 event->hw.branch_reg.idx = EXTRA_REG_NONE;
6a67943a 3588
087bfbb0
YZ
3589 if (event->attr.config == UNCORE_FIXED_EVENT) {
3590 /* no fixed counter */
3591 if (!pmu->type->fixed_ctl)
3592 return -EINVAL;
3593 /*
3594 * if there is only one fixed counter, only the first pmu
3595 * can access the fixed counter
3596 */
3597 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
3598 return -EINVAL;
dbc33f70
SE
3599
3600 /* fixed counters have event field hardcoded to zero */
3601 hwc->config = 0ULL;
087bfbb0
YZ
3602 } else {
3603 hwc->config = event->attr.config & pmu->type->event_mask;
6a67943a
YZ
3604 if (pmu->type->ops->hw_config) {
3605 ret = pmu->type->ops->hw_config(box, event);
3606 if (ret)
3607 return ret;
3608 }
087bfbb0
YZ
3609 }
3610
087bfbb0
YZ
3611 if (event->group_leader != event)
3612 ret = uncore_validate_group(pmu, event);
3613 else
3614 ret = 0;
3615
3616 return ret;
3617}
3618
314d9f63
YZ
3619static ssize_t uncore_get_attr_cpumask(struct device *dev,
3620 struct device_attribute *attr, char *buf)
3621{
3622 int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &uncore_cpu_mask);
3623
3624 buf[n++] = '\n';
3625 buf[n] = '\0';
3626 return n;
3627}
3628
3629static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
3630
3631static struct attribute *uncore_pmu_attrs[] = {
3632 &dev_attr_cpumask.attr,
3633 NULL,
3634};
3635
3636static struct attribute_group uncore_pmu_attr_group = {
3637 .attrs = uncore_pmu_attrs,
3638};
3639
087bfbb0
YZ
3640static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
3641{
3642 int ret;
3643
d64b25b6
SE
3644 if (!pmu->type->pmu) {
3645 pmu->pmu = (struct pmu) {
3646 .attr_groups = pmu->type->attr_groups,
3647 .task_ctx_nr = perf_invalid_context,
3648 .event_init = uncore_pmu_event_init,
3649 .add = uncore_pmu_event_add,
3650 .del = uncore_pmu_event_del,
3651 .start = uncore_pmu_event_start,
3652 .stop = uncore_pmu_event_stop,
3653 .read = uncore_pmu_event_read,
3654 };
3655 } else {
3656 pmu->pmu = *pmu->type->pmu;
3657 pmu->pmu.attr_groups = pmu->type->attr_groups;
3658 }
087bfbb0
YZ
3659
3660 if (pmu->type->num_boxes == 1) {
3661 if (strlen(pmu->type->name) > 0)
3662 sprintf(pmu->name, "uncore_%s", pmu->type->name);
3663 else
3664 sprintf(pmu->name, "uncore");
3665 } else {
3666 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
3667 pmu->pmu_idx);
3668 }
3669
3670 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
3671 return ret;
3672}
3673
3674static void __init uncore_type_exit(struct intel_uncore_type *type)
3675{
3676 int i;
3677
3678 for (i = 0; i < type->num_boxes; i++)
3679 free_percpu(type->pmus[i].box);
3680 kfree(type->pmus);
3681 type->pmus = NULL;
314d9f63
YZ
3682 kfree(type->events_group);
3683 type->events_group = NULL;
087bfbb0
YZ
3684}
3685
cffa59ba 3686static void __init uncore_types_exit(struct intel_uncore_type **types)
14371cce
YZ
3687{
3688 int i;
3689 for (i = 0; types[i]; i++)
3690 uncore_type_exit(types[i]);
3691}
3692
087bfbb0
YZ
3693static int __init uncore_type_init(struct intel_uncore_type *type)
3694{
3695 struct intel_uncore_pmu *pmus;
1b0dac2a 3696 struct attribute_group *attr_group;
087bfbb0
YZ
3697 struct attribute **attrs;
3698 int i, j;
3699
3700 pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
3701 if (!pmus)
3702 return -ENOMEM;
3703
b7b4839d
DJ
3704 type->pmus = pmus;
3705
087bfbb0
YZ
3706 type->unconstrainted = (struct event_constraint)
3707 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
9fac2cf3 3708 0, type->num_counters, 0, 0);
087bfbb0
YZ
3709
3710 for (i = 0; i < type->num_boxes; i++) {
3711 pmus[i].func_id = -1;
3712 pmus[i].pmu_idx = i;
3713 pmus[i].type = type;
14371cce 3714 INIT_LIST_HEAD(&pmus[i].box_list);
087bfbb0
YZ
3715 pmus[i].box = alloc_percpu(struct intel_uncore_box *);
3716 if (!pmus[i].box)
3717 goto fail;
3718 }
3719
3720 if (type->event_descs) {
3721 i = 0;
3722 while (type->event_descs[i].attr.attr.name)
3723 i++;
3724
1b0dac2a
JSM
3725 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
3726 sizeof(*attr_group), GFP_KERNEL);
3727 if (!attr_group)
087bfbb0
YZ
3728 goto fail;
3729
1b0dac2a
JSM
3730 attrs = (struct attribute **)(attr_group + 1);
3731 attr_group->name = "events";
3732 attr_group->attrs = attrs;
087bfbb0
YZ
3733
3734 for (j = 0; j < i; j++)
3735 attrs[j] = &type->event_descs[j].attr.attr;
3736
1b0dac2a 3737 type->events_group = attr_group;
087bfbb0
YZ
3738 }
3739
314d9f63 3740 type->pmu_group = &uncore_pmu_attr_group;
087bfbb0
YZ
3741 return 0;
3742fail:
3743 uncore_type_exit(type);
3744 return -ENOMEM;
3745}
3746
3747static int __init uncore_types_init(struct intel_uncore_type **types)
3748{
3749 int i, ret;
3750
3751 for (i = 0; types[i]; i++) {
3752 ret = uncore_type_init(types[i]);
3753 if (ret)
3754 goto fail;
3755 }
3756 return 0;
3757fail:
3758 while (--i >= 0)
3759 uncore_type_exit(types[i]);
3760 return ret;
3761}
3762
14371cce
YZ
3763static struct pci_driver *uncore_pci_driver;
3764static bool pcidrv_registered;
3765
3766/*
3767 * add a pci uncore device
3768 */
899396cf 3769static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
14371cce
YZ
3770{
3771 struct intel_uncore_pmu *pmu;
3772 struct intel_uncore_box *box;
899396cf
YZ
3773 struct intel_uncore_type *type;
3774 int phys_id;
14371cce
YZ
3775
3776 phys_id = pcibus_to_physid[pdev->bus->number];
3777 if (phys_id < 0)
3778 return -ENODEV;
3779
899396cf
YZ
3780 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
3781 extra_pci_dev[phys_id][UNCORE_PCI_DEV_IDX(id->driver_data)] = pdev;
3782 pci_set_drvdata(pdev, NULL);
3783 return 0;
3784 }
3785
3786 type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
73c4427c 3787 box = uncore_alloc_box(type, NUMA_NO_NODE);
14371cce
YZ
3788 if (!box)
3789 return -ENOMEM;
3790
3791 /*
3792 * for performance monitoring unit with multiple boxes,
3793 * each box has a different function id.
3794 */
899396cf
YZ
3795 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
3796 if (pmu->func_id < 0)
3797 pmu->func_id = pdev->devfn;
3798 else
3799 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
14371cce
YZ
3800
3801 box->phys_id = phys_id;
3802 box->pci_dev = pdev;
3803 box->pmu = pmu;
3804 uncore_box_init(box);
3805 pci_set_drvdata(pdev, box);
3806
3807 raw_spin_lock(&uncore_box_lock);
3808 list_add_tail(&box->list, &pmu->box_list);
3809 raw_spin_unlock(&uncore_box_lock);
3810
3811 return 0;
3812}
3813
357398e9 3814static void uncore_pci_remove(struct pci_dev *pdev)
14371cce
YZ
3815{
3816 struct intel_uncore_box *box = pci_get_drvdata(pdev);
899396cf
YZ
3817 struct intel_uncore_pmu *pmu;
3818 int i, cpu, phys_id = pcibus_to_physid[pdev->bus->number];
3819
3820 box = pci_get_drvdata(pdev);
3821 if (!box) {
3822 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
3823 if (extra_pci_dev[phys_id][i] == pdev) {
3824 extra_pci_dev[phys_id][i] = NULL;
3825 break;
3826 }
3827 }
3828 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
3829 return;
3830 }
14371cce 3831
899396cf 3832 pmu = box->pmu;
14371cce
YZ
3833 if (WARN_ON_ONCE(phys_id != box->phys_id))
3834 return;
3835
e850f9c3
YZ
3836 pci_set_drvdata(pdev, NULL);
3837
14371cce
YZ
3838 raw_spin_lock(&uncore_box_lock);
3839 list_del(&box->list);
3840 raw_spin_unlock(&uncore_box_lock);
3841
3842 for_each_possible_cpu(cpu) {
3843 if (*per_cpu_ptr(pmu->box, cpu) == box) {
3844 *per_cpu_ptr(pmu->box, cpu) = NULL;
3845 atomic_dec(&box->refcnt);
3846 }
3847 }
3848
3849 WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
3850 kfree(box);
3851}
3852
14371cce
YZ
3853static int __init uncore_pci_init(void)
3854{
3855 int ret;
3856
3857 switch (boot_cpu_data.x86_model) {
7c94ee2e 3858 case 45: /* Sandy Bridge-EP */
e850f9c3 3859 ret = snbep_pci2phy_map_init(0x3ce0);
032c3851
YZ
3860 if (ret)
3861 return ret;
7c94ee2e
YZ
3862 pci_uncores = snbep_pci_uncores;
3863 uncore_pci_driver = &snbep_uncore_pci_driver;
7c94ee2e 3864 break;
e850f9c3
YZ
3865 case 62: /* IvyTown */
3866 ret = snbep_pci2phy_map_init(0x0e1e);
3867 if (ret)
3868 return ret;
3869 pci_uncores = ivt_pci_uncores;
3870 uncore_pci_driver = &ivt_uncore_pci_driver;
3871 break;
b9e1ab6d
SE
3872 case 42: /* Sandy Bridge */
3873 ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_SNB_IMC);
3874 if (ret)
3875 return ret;
3876 pci_uncores = snb_pci_uncores;
3877 uncore_pci_driver = &snb_uncore_pci_driver;
3878 break;
3879 case 58: /* Ivy Bridge */
3880 ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_IVB_IMC);
3881 if (ret)
3882 return ret;
3883 pci_uncores = snb_pci_uncores;
3884 uncore_pci_driver = &ivb_uncore_pci_driver;
3885 break;
3886 case 60: /* Haswell */
3887 case 69: /* Haswell Celeron */
3888 ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_HSW_IMC);
3889 if (ret)
3890 return ret;
3891 pci_uncores = snb_pci_uncores;
3892 uncore_pci_driver = &hsw_uncore_pci_driver;
3893 break;
14371cce
YZ
3894 default:
3895 return 0;
3896 }
3897
3898 ret = uncore_types_init(pci_uncores);
3899 if (ret)
3900 return ret;
3901
3902 uncore_pci_driver->probe = uncore_pci_probe;
3903 uncore_pci_driver->remove = uncore_pci_remove;
3904
3905 ret = pci_register_driver(uncore_pci_driver);
3906 if (ret == 0)
3907 pcidrv_registered = true;
3908 else
3909 uncore_types_exit(pci_uncores);
3910
3911 return ret;
3912}
3913
3914static void __init uncore_pci_exit(void)
3915{
3916 if (pcidrv_registered) {
3917 pcidrv_registered = false;
3918 pci_unregister_driver(uncore_pci_driver);
3919 uncore_types_exit(pci_uncores);
3920 }
3921}
3922
22cc4ccf
YZ
3923/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
3924static LIST_HEAD(boxes_to_free);
3925
148f9bb8 3926static void uncore_kfree_boxes(void)
22cc4ccf
YZ
3927{
3928 struct intel_uncore_box *box;
3929
3930 while (!list_empty(&boxes_to_free)) {
3931 box = list_entry(boxes_to_free.next,
3932 struct intel_uncore_box, list);
3933 list_del(&box->list);
3934 kfree(box);
3935 }
3936}
3937
148f9bb8 3938static void uncore_cpu_dying(int cpu)
087bfbb0
YZ
3939{
3940 struct intel_uncore_type *type;
3941 struct intel_uncore_pmu *pmu;
3942 struct intel_uncore_box *box;
3943 int i, j;
3944
3945 for (i = 0; msr_uncores[i]; i++) {
3946 type = msr_uncores[i];
3947 for (j = 0; j < type->num_boxes; j++) {
3948 pmu = &type->pmus[j];
3949 box = *per_cpu_ptr(pmu->box, cpu);
3950 *per_cpu_ptr(pmu->box, cpu) = NULL;
3951 if (box && atomic_dec_and_test(&box->refcnt))
22cc4ccf 3952 list_add(&box->list, &boxes_to_free);
087bfbb0
YZ
3953 }
3954 }
3955}
3956
148f9bb8 3957static int uncore_cpu_starting(int cpu)
087bfbb0
YZ
3958{
3959 struct intel_uncore_type *type;
3960 struct intel_uncore_pmu *pmu;
3961 struct intel_uncore_box *box, *exist;
3962 int i, j, k, phys_id;
3963
3964 phys_id = topology_physical_package_id(cpu);
3965
3966 for (i = 0; msr_uncores[i]; i++) {
3967 type = msr_uncores[i];
3968 for (j = 0; j < type->num_boxes; j++) {
3969 pmu = &type->pmus[j];
3970 box = *per_cpu_ptr(pmu->box, cpu);
3971 /* called by uncore_cpu_init? */
3972 if (box && box->phys_id >= 0) {
3973 uncore_box_init(box);
3974 continue;
3975 }
3976
3977 for_each_online_cpu(k) {
3978 exist = *per_cpu_ptr(pmu->box, k);
3979 if (exist && exist->phys_id == phys_id) {
3980 atomic_inc(&exist->refcnt);
3981 *per_cpu_ptr(pmu->box, cpu) = exist;
22cc4ccf
YZ
3982 if (box) {
3983 list_add(&box->list,
3984 &boxes_to_free);
3985 box = NULL;
3986 }
087bfbb0
YZ
3987 break;
3988 }
3989 }
3990
3991 if (box) {
3992 box->phys_id = phys_id;
3993 uncore_box_init(box);
3994 }
3995 }
3996 }
3997 return 0;
3998}
3999
148f9bb8 4000static int uncore_cpu_prepare(int cpu, int phys_id)
087bfbb0
YZ
4001{
4002 struct intel_uncore_type *type;
4003 struct intel_uncore_pmu *pmu;
4004 struct intel_uncore_box *box;
4005 int i, j;
4006
4007 for (i = 0; msr_uncores[i]; i++) {
4008 type = msr_uncores[i];
4009 for (j = 0; j < type->num_boxes; j++) {
4010 pmu = &type->pmus[j];
4011 if (pmu->func_id < 0)
4012 pmu->func_id = j;
4013
73c4427c 4014 box = uncore_alloc_box(type, cpu_to_node(cpu));
087bfbb0
YZ
4015 if (!box)
4016 return -ENOMEM;
4017
4018 box->pmu = pmu;
4019 box->phys_id = phys_id;
4020 *per_cpu_ptr(pmu->box, cpu) = box;
4021 }
4022 }
4023 return 0;
4024}
4025
148f9bb8 4026static void
254298c7 4027uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
087bfbb0
YZ
4028{
4029 struct intel_uncore_type *type;
4030 struct intel_uncore_pmu *pmu;
4031 struct intel_uncore_box *box;
4032 int i, j;
4033
4034 for (i = 0; uncores[i]; i++) {
4035 type = uncores[i];
4036 for (j = 0; j < type->num_boxes; j++) {
4037 pmu = &type->pmus[j];
4038 if (old_cpu < 0)
4039 box = uncore_pmu_to_box(pmu, new_cpu);
4040 else
4041 box = uncore_pmu_to_box(pmu, old_cpu);
4042 if (!box)
4043 continue;
4044
4045 if (old_cpu < 0) {
4046 WARN_ON_ONCE(box->cpu != -1);
4047 box->cpu = new_cpu;
4048 continue;
4049 }
4050
4051 WARN_ON_ONCE(box->cpu != old_cpu);
4052 if (new_cpu >= 0) {
4053 uncore_pmu_cancel_hrtimer(box);
4054 perf_pmu_migrate_context(&pmu->pmu,
4055 old_cpu, new_cpu);
4056 box->cpu = new_cpu;
4057 } else {
4058 box->cpu = -1;
4059 }
4060 }
4061 }
4062}
4063
148f9bb8 4064static void uncore_event_exit_cpu(int cpu)
087bfbb0
YZ
4065{
4066 int i, phys_id, target;
4067
4068 /* if exiting cpu is used for collecting uncore events */
4069 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
4070 return;
4071
4072 /* find a new cpu to collect uncore events */
4073 phys_id = topology_physical_package_id(cpu);
4074 target = -1;
4075 for_each_online_cpu(i) {
4076 if (i == cpu)
4077 continue;
4078 if (phys_id == topology_physical_package_id(i)) {
4079 target = i;
4080 break;
4081 }
4082 }
4083
4084 /* migrate uncore events to the new cpu */
4085 if (target >= 0)
4086 cpumask_set_cpu(target, &uncore_cpu_mask);
4087
4088 uncore_change_context(msr_uncores, cpu, target);
14371cce 4089 uncore_change_context(pci_uncores, cpu, target);
087bfbb0
YZ
4090}
4091
148f9bb8 4092static void uncore_event_init_cpu(int cpu)
087bfbb0
YZ
4093{
4094 int i, phys_id;
4095
4096 phys_id = topology_physical_package_id(cpu);
4097 for_each_cpu(i, &uncore_cpu_mask) {
4098 if (phys_id == topology_physical_package_id(i))
4099 return;
4100 }
4101
4102 cpumask_set_cpu(cpu, &uncore_cpu_mask);
4103
4104 uncore_change_context(msr_uncores, -1, cpu);
14371cce 4105 uncore_change_context(pci_uncores, -1, cpu);
087bfbb0
YZ
4106}
4107
148f9bb8
PG
4108static int uncore_cpu_notifier(struct notifier_block *self,
4109 unsigned long action, void *hcpu)
087bfbb0
YZ
4110{
4111 unsigned int cpu = (long)hcpu;
4112
4113 /* allocate/free data structure for uncore box */
4114 switch (action & ~CPU_TASKS_FROZEN) {
4115 case CPU_UP_PREPARE:
4116 uncore_cpu_prepare(cpu, -1);
4117 break;
4118 case CPU_STARTING:
4119 uncore_cpu_starting(cpu);
4120 break;
4121 case CPU_UP_CANCELED:
4122 case CPU_DYING:
4123 uncore_cpu_dying(cpu);
4124 break;
22cc4ccf
YZ
4125 case CPU_ONLINE:
4126 case CPU_DEAD:
4127 uncore_kfree_boxes();
4128 break;
087bfbb0
YZ
4129 default:
4130 break;
4131 }
4132
4133 /* select the cpu that collects uncore events */
4134 switch (action & ~CPU_TASKS_FROZEN) {
4135 case CPU_DOWN_FAILED:
4136 case CPU_STARTING:
4137 uncore_event_init_cpu(cpu);
4138 break;
4139 case CPU_DOWN_PREPARE:
4140 uncore_event_exit_cpu(cpu);
4141 break;
4142 default:
4143 break;
4144 }
4145
4146 return NOTIFY_OK;
4147}
4148
148f9bb8 4149static struct notifier_block uncore_cpu_nb = {
254298c7 4150 .notifier_call = uncore_cpu_notifier,
087bfbb0
YZ
4151 /*
4152 * to migrate uncore events, our notifier should be executed
4153 * before perf core's notifier.
4154 */
254298c7 4155 .priority = CPU_PRI_PERF + 1,
087bfbb0
YZ
4156};
4157
4158static void __init uncore_cpu_setup(void *dummy)
4159{
4160 uncore_cpu_starting(smp_processor_id());
4161}
4162
4163static int __init uncore_cpu_init(void)
4164{
411cf180 4165 int ret, max_cores;
087bfbb0 4166
42089697 4167 max_cores = boot_cpu_data.x86_max_cores;
087bfbb0 4168 switch (boot_cpu_data.x86_model) {
fcde10e9
YZ
4169 case 26: /* Nehalem */
4170 case 30:
4171 case 37: /* Westmere */
4172 case 44:
4173 msr_uncores = nhm_msr_uncores;
4174 break;
4175 case 42: /* Sandy Bridge */
9a6bc143 4176 case 58: /* Ivy Bridge */
42089697
YZ
4177 if (snb_uncore_cbox.num_boxes > max_cores)
4178 snb_uncore_cbox.num_boxes = max_cores;
fcde10e9
YZ
4179 msr_uncores = snb_msr_uncores;
4180 break;
80e217e9 4181 case 45: /* Sandy Bridge-EP */
42089697
YZ
4182 if (snbep_uncore_cbox.num_boxes > max_cores)
4183 snbep_uncore_cbox.num_boxes = max_cores;
7c94ee2e
YZ
4184 msr_uncores = snbep_msr_uncores;
4185 break;
cb37af77
YZ
4186 case 46: /* Nehalem-EX */
4187 uncore_nhmex = true;
4188 case 47: /* Westmere-EX aka. Xeon E7 */
4189 if (!uncore_nhmex)
4190 nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
4191 if (nhmex_uncore_cbox.num_boxes > max_cores)
4192 nhmex_uncore_cbox.num_boxes = max_cores;
254298c7
YZ
4193 msr_uncores = nhmex_msr_uncores;
4194 break;
e850f9c3
YZ
4195 case 62: /* IvyTown */
4196 if (ivt_uncore_cbox.num_boxes > max_cores)
4197 ivt_uncore_cbox.num_boxes = max_cores;
4198 msr_uncores = ivt_msr_uncores;
4199 break;
4200
087bfbb0
YZ
4201 default:
4202 return 0;
4203 }
4204
4205 ret = uncore_types_init(msr_uncores);
4206 if (ret)
4207 return ret;
4208
087bfbb0
YZ
4209 return 0;
4210}
4211
4212static int __init uncore_pmus_register(void)
4213{
4214 struct intel_uncore_pmu *pmu;
4215 struct intel_uncore_type *type;
4216 int i, j;
4217
4218 for (i = 0; msr_uncores[i]; i++) {
4219 type = msr_uncores[i];
4220 for (j = 0; j < type->num_boxes; j++) {
4221 pmu = &type->pmus[j];
4222 uncore_pmu_register(pmu);
4223 }
4224 }
4225
14371cce
YZ
4226 for (i = 0; pci_uncores[i]; i++) {
4227 type = pci_uncores[i];
4228 for (j = 0; j < type->num_boxes; j++) {
4229 pmu = &type->pmus[j];
4230 uncore_pmu_register(pmu);
4231 }
4232 }
4233
087bfbb0
YZ
4234 return 0;
4235}
4236
ef11dadb 4237static void __init uncore_cpumask_init(void)
411cf180
SE
4238{
4239 int cpu;
4240
4241 /*
4242 * ony invoke once from msr or pci init code
4243 */
4244 if (!cpumask_empty(&uncore_cpu_mask))
4245 return;
4246
467a9e16 4247 cpu_notifier_register_begin();
411cf180
SE
4248
4249 for_each_online_cpu(cpu) {
4250 int i, phys_id = topology_physical_package_id(cpu);
4251
4252 for_each_cpu(i, &uncore_cpu_mask) {
4253 if (phys_id == topology_physical_package_id(i)) {
4254 phys_id = -1;
4255 break;
4256 }
4257 }
4258 if (phys_id < 0)
4259 continue;
4260
4261 uncore_cpu_prepare(cpu, phys_id);
4262 uncore_event_init_cpu(cpu);
4263 }
4264 on_each_cpu(uncore_cpu_setup, NULL, 1);
4265
467a9e16 4266 __register_cpu_notifier(&uncore_cpu_nb);
411cf180 4267
467a9e16 4268 cpu_notifier_register_done();
411cf180
SE
4269}
4270
4271
087bfbb0
YZ
4272static int __init intel_uncore_init(void)
4273{
4274 int ret;
4275
4276 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
4277 return -ENODEV;
4278
a05123bd
YZ
4279 if (cpu_has_hypervisor)
4280 return -ENODEV;
4281
14371cce 4282 ret = uncore_pci_init();
087bfbb0
YZ
4283 if (ret)
4284 goto fail;
14371cce
YZ
4285 ret = uncore_cpu_init();
4286 if (ret) {
4287 uncore_pci_exit();
4288 goto fail;
4289 }
411cf180 4290 uncore_cpumask_init();
087bfbb0
YZ
4291
4292 uncore_pmus_register();
4293 return 0;
4294fail:
4295 return ret;
4296}
4297device_initcall(intel_uncore_init);