]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/kernel/cpu/perf_event_intel_uncore.c
perf/x86/uncore: make hrtimer timeout configurable per box
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore.c
CommitLineData
087bfbb0
YZ
1#include "perf_event_intel_uncore.h"
2
3static struct intel_uncore_type *empty_uncore[] = { NULL, };
4static struct intel_uncore_type **msr_uncores = empty_uncore;
14371cce
YZ
5static struct intel_uncore_type **pci_uncores = empty_uncore;
6/* pci bus to socket mapping */
7static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
8
899396cf
YZ
9static struct pci_dev *extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
10
14371cce 11static DEFINE_RAW_SPINLOCK(uncore_box_lock);
087bfbb0
YZ
12
13/* mask of cpus that collect uncore events */
14static cpumask_t uncore_cpu_mask;
15
16/* constraint for the fixed counter */
17static struct event_constraint constraint_fixed =
18 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
6a67943a
YZ
19static struct event_constraint constraint_empty =
20 EVENT_CONSTRAINT(0, 0, 0);
087bfbb0 21
46bdd905
YZ
22#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
23 ((1ULL << (n)) - 1)))
24
fcde10e9 25DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
c1ece48c 26DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
fcde10e9
YZ
27DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
28DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
6a67943a 29DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
fcde10e9
YZ
30DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
31DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
32DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
7c94ee2e
YZ
33DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
34DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
35DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
36DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
37DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
6a67943a 38DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
e850f9c3 39DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
6a67943a 40DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
e850f9c3 41DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
6a67943a 42DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
e850f9c3 43DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
6a67943a 44DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
e850f9c3 45DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
4f3f713f
YZ
46DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
47DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
48DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
49DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
fd1ec259
YZ
50DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
51DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
52DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
53DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
54DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
55DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
56DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
57DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
58DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
59DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
60DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
61DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
62DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
63DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
64DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
65DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
66DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
67DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
7c94ee2e 68
254298c7
YZ
69static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
70{
71 u64 count;
72
73 rdmsrl(event->hw.event_base, count);
74
75 return count;
76}
77
78/*
79 * generic get constraint function for shared match/mask registers.
80 */
81static struct event_constraint *
82uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
83{
84 struct intel_uncore_extra_reg *er;
85 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
86 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
87 unsigned long flags;
88 bool ok = false;
89
90 /*
91 * reg->alloc can be set due to existing state, so for fake box we
92 * need to ignore this, otherwise we might fail to allocate proper
93 * fake state for this extra reg constraint.
94 */
95 if (reg1->idx == EXTRA_REG_NONE ||
96 (!uncore_box_is_fake(box) && reg1->alloc))
97 return NULL;
98
99 er = &box->shared_regs[reg1->idx];
100 raw_spin_lock_irqsave(&er->lock, flags);
101 if (!atomic_read(&er->ref) ||
102 (er->config1 == reg1->config && er->config2 == reg2->config)) {
103 atomic_inc(&er->ref);
104 er->config1 = reg1->config;
105 er->config2 = reg2->config;
106 ok = true;
107 }
108 raw_spin_unlock_irqrestore(&er->lock, flags);
109
110 if (ok) {
111 if (!uncore_box_is_fake(box))
112 reg1->alloc = 1;
113 return NULL;
114 }
115
116 return &constraint_empty;
117}
118
119static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
120{
121 struct intel_uncore_extra_reg *er;
122 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
123
124 /*
125 * Only put constraint if extra reg was actually allocated. Also
126 * takes care of event which do not use an extra shared reg.
127 *
128 * Also, if this is a fake box we shouldn't touch any event state
129 * (reg->alloc) and we don't care about leaving inconsistent box
130 * state either since it will be thrown out.
131 */
132 if (uncore_box_is_fake(box) || !reg1->alloc)
133 return;
134
135 er = &box->shared_regs[reg1->idx];
136 atomic_dec(&er->ref);
137 reg1->alloc = 0;
138}
139
46bdd905
YZ
140static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
141{
142 struct intel_uncore_extra_reg *er;
143 unsigned long flags;
144 u64 config;
145
146 er = &box->shared_regs[idx];
147
148 raw_spin_lock_irqsave(&er->lock, flags);
149 config = er->config;
150 raw_spin_unlock_irqrestore(&er->lock, flags);
151
152 return config;
153}
154
7c94ee2e 155/* Sandy Bridge-EP uncore support */
6a67943a
YZ
156static struct intel_uncore_type snbep_uncore_cbox;
157static struct intel_uncore_type snbep_uncore_pcu;
158
7c94ee2e
YZ
159static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
160{
161 struct pci_dev *pdev = box->pci_dev;
162 int box_ctl = uncore_pci_box_ctl(box);
032c3851 163 u32 config = 0;
7c94ee2e 164
032c3851
YZ
165 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
166 config |= SNBEP_PMON_BOX_CTL_FRZ;
167 pci_write_config_dword(pdev, box_ctl, config);
168 }
7c94ee2e
YZ
169}
170
171static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
172{
173 struct pci_dev *pdev = box->pci_dev;
174 int box_ctl = uncore_pci_box_ctl(box);
032c3851 175 u32 config = 0;
7c94ee2e 176
032c3851
YZ
177 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
178 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
179 pci_write_config_dword(pdev, box_ctl, config);
180 }
7c94ee2e
YZ
181}
182
254298c7 183static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
184{
185 struct pci_dev *pdev = box->pci_dev;
186 struct hw_perf_event *hwc = &event->hw;
187
254298c7 188 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
7c94ee2e
YZ
189}
190
254298c7 191static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
192{
193 struct pci_dev *pdev = box->pci_dev;
194 struct hw_perf_event *hwc = &event->hw;
195
196 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
197}
198
254298c7 199static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
200{
201 struct pci_dev *pdev = box->pci_dev;
202 struct hw_perf_event *hwc = &event->hw;
032c3851 203 u64 count = 0;
7c94ee2e
YZ
204
205 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
206 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
254298c7 207
7c94ee2e
YZ
208 return count;
209}
210
211static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
212{
213 struct pci_dev *pdev = box->pci_dev;
254298c7
YZ
214
215 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
7c94ee2e
YZ
216}
217
218static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
219{
220 u64 config;
221 unsigned msr;
222
223 msr = uncore_msr_box_ctl(box);
224 if (msr) {
225 rdmsrl(msr, config);
226 config |= SNBEP_PMON_BOX_CTL_FRZ;
227 wrmsrl(msr, config);
7c94ee2e
YZ
228 }
229}
230
231static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
232{
233 u64 config;
234 unsigned msr;
235
236 msr = uncore_msr_box_ctl(box);
237 if (msr) {
238 rdmsrl(msr, config);
239 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
240 wrmsrl(msr, config);
7c94ee2e
YZ
241 }
242}
243
254298c7 244static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
245{
246 struct hw_perf_event *hwc = &event->hw;
6a67943a
YZ
247 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
248
249 if (reg1->idx != EXTRA_REG_NONE)
46bdd905 250 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
7c94ee2e
YZ
251
252 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
253}
254
255static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
256 struct perf_event *event)
257{
258 struct hw_perf_event *hwc = &event->hw;
259
260 wrmsrl(hwc->config_base, hwc->config);
261}
262
7c94ee2e
YZ
263static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
264{
265 unsigned msr = uncore_msr_box_ctl(box);
254298c7 266
7c94ee2e
YZ
267 if (msr)
268 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
269}
270
271static struct attribute *snbep_uncore_formats_attr[] = {
272 &format_attr_event.attr,
273 &format_attr_umask.attr,
274 &format_attr_edge.attr,
275 &format_attr_inv.attr,
276 &format_attr_thresh8.attr,
277 NULL,
278};
279
280static struct attribute *snbep_uncore_ubox_formats_attr[] = {
281 &format_attr_event.attr,
282 &format_attr_umask.attr,
283 &format_attr_edge.attr,
284 &format_attr_inv.attr,
285 &format_attr_thresh5.attr,
286 NULL,
287};
288
6a67943a
YZ
289static struct attribute *snbep_uncore_cbox_formats_attr[] = {
290 &format_attr_event.attr,
291 &format_attr_umask.attr,
292 &format_attr_edge.attr,
293 &format_attr_tid_en.attr,
294 &format_attr_inv.attr,
295 &format_attr_thresh8.attr,
296 &format_attr_filter_tid.attr,
297 &format_attr_filter_nid.attr,
298 &format_attr_filter_state.attr,
299 &format_attr_filter_opc.attr,
300 NULL,
301};
302
7c94ee2e 303static struct attribute *snbep_uncore_pcu_formats_attr[] = {
77b339bc 304 &format_attr_event_ext.attr,
7c94ee2e
YZ
305 &format_attr_occ_sel.attr,
306 &format_attr_edge.attr,
307 &format_attr_inv.attr,
308 &format_attr_thresh5.attr,
309 &format_attr_occ_invert.attr,
310 &format_attr_occ_edge.attr,
4f3f713f
YZ
311 &format_attr_filter_band0.attr,
312 &format_attr_filter_band1.attr,
313 &format_attr_filter_band2.attr,
314 &format_attr_filter_band3.attr,
7c94ee2e
YZ
315 NULL,
316};
317
c1ece48c
YZ
318static struct attribute *snbep_uncore_qpi_formats_attr[] = {
319 &format_attr_event_ext.attr,
320 &format_attr_umask.attr,
321 &format_attr_edge.attr,
322 &format_attr_inv.attr,
323 &format_attr_thresh8.attr,
fd1ec259
YZ
324 &format_attr_match_rds.attr,
325 &format_attr_match_rnid30.attr,
326 &format_attr_match_rnid4.attr,
327 &format_attr_match_dnid.attr,
328 &format_attr_match_mc.attr,
329 &format_attr_match_opc.attr,
330 &format_attr_match_vnw.attr,
331 &format_attr_match0.attr,
332 &format_attr_match1.attr,
333 &format_attr_mask_rds.attr,
334 &format_attr_mask_rnid30.attr,
335 &format_attr_mask_rnid4.attr,
336 &format_attr_mask_dnid.attr,
337 &format_attr_mask_mc.attr,
338 &format_attr_mask_opc.attr,
339 &format_attr_mask_vnw.attr,
340 &format_attr_mask0.attr,
341 &format_attr_mask1.attr,
c1ece48c
YZ
342 NULL,
343};
344
7c94ee2e 345static struct uncore_event_desc snbep_uncore_imc_events[] = {
eca26c99 346 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
2992c542
PZ
347 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
348 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
7c94ee2e
YZ
349 { /* end: all zeroes */ },
350};
351
352static struct uncore_event_desc snbep_uncore_qpi_events[] = {
2992c542
PZ
353 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
354 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
c9601247
VW
355 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
356 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
7c94ee2e
YZ
357 { /* end: all zeroes */ },
358};
359
360static struct attribute_group snbep_uncore_format_group = {
361 .name = "format",
362 .attrs = snbep_uncore_formats_attr,
363};
364
365static struct attribute_group snbep_uncore_ubox_format_group = {
366 .name = "format",
367 .attrs = snbep_uncore_ubox_formats_attr,
368};
369
6a67943a
YZ
370static struct attribute_group snbep_uncore_cbox_format_group = {
371 .name = "format",
372 .attrs = snbep_uncore_cbox_formats_attr,
373};
374
7c94ee2e
YZ
375static struct attribute_group snbep_uncore_pcu_format_group = {
376 .name = "format",
377 .attrs = snbep_uncore_pcu_formats_attr,
378};
379
c1ece48c
YZ
380static struct attribute_group snbep_uncore_qpi_format_group = {
381 .name = "format",
382 .attrs = snbep_uncore_qpi_formats_attr,
383};
384
46bdd905
YZ
385#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
386 .init_box = snbep_uncore_msr_init_box, \
387 .disable_box = snbep_uncore_msr_disable_box, \
388 .enable_box = snbep_uncore_msr_enable_box, \
389 .disable_event = snbep_uncore_msr_disable_event, \
390 .enable_event = snbep_uncore_msr_enable_event, \
391 .read_counter = uncore_msr_read_counter
392
7c94ee2e 393static struct intel_uncore_ops snbep_uncore_msr_ops = {
46bdd905 394 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
7c94ee2e
YZ
395};
396
fd1ec259
YZ
397#define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
398 .init_box = snbep_uncore_pci_init_box, \
399 .disable_box = snbep_uncore_pci_disable_box, \
400 .enable_box = snbep_uncore_pci_enable_box, \
401 .disable_event = snbep_uncore_pci_disable_event, \
402 .read_counter = snbep_uncore_pci_read_counter
403
7c94ee2e 404static struct intel_uncore_ops snbep_uncore_pci_ops = {
fd1ec259
YZ
405 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
406 .enable_event = snbep_uncore_pci_enable_event, \
7c94ee2e
YZ
407};
408
409static struct event_constraint snbep_uncore_cbox_constraints[] = {
410 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
411 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
412 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
413 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
414 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
e850f9c3 415 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
7c94ee2e
YZ
416 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
417 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
418 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
419 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
420 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
421 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
422 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
3b19e4c9 423 EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
7c94ee2e
YZ
424 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
425 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
426 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
427 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
428 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
429 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
430 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
431 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
432 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
433 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
434 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
435 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
436 EVENT_CONSTRAINT_END
437};
438
439static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
440 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
441 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
442 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
443 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
444 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
445 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
446 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
447 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
448 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
449 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
450 EVENT_CONSTRAINT_END
451};
452
453static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
454 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
455 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
456 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
457 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
458 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
459 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
460 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
461 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
462 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
463 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
464 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
e850f9c3
YZ
465 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
466 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
467 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
468 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
469 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
470 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
471 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
472 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
7c94ee2e
YZ
473 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
474 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
475 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
476 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
477 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
478 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
479 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
e850f9c3
YZ
480 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
481 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
7c94ee2e
YZ
482 EVENT_CONSTRAINT_END
483};
484
485static struct intel_uncore_type snbep_uncore_ubox = {
486 .name = "ubox",
487 .num_counters = 2,
488 .num_boxes = 1,
489 .perf_ctr_bits = 44,
490 .fixed_ctr_bits = 48,
491 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
492 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
493 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
494 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
495 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
496 .ops = &snbep_uncore_msr_ops,
497 .format_group = &snbep_uncore_ubox_format_group,
498};
499
46bdd905
YZ
500static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
501 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
502 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
503 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
504 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
505 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
506 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
507 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
508 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
509 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
510 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
511 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
512 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
513 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
514 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
515 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
516 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
517 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
518 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
519 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
520 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
521 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
522 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
523 EVENT_EXTRA_END
524};
525
526static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
527{
528 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
529 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
530 int i;
531
532 if (uncore_box_is_fake(box))
533 return;
534
535 for (i = 0; i < 5; i++) {
536 if (reg1->alloc & (0x1 << i))
537 atomic_sub(1 << (i * 6), &er->ref);
538 }
539 reg1->alloc = 0;
540}
541
542static struct event_constraint *
543__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
544 u64 (*cbox_filter_mask)(int fields))
545{
546 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
547 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
548 int i, alloc = 0;
549 unsigned long flags;
550 u64 mask;
551
552 if (reg1->idx == EXTRA_REG_NONE)
553 return NULL;
554
555 raw_spin_lock_irqsave(&er->lock, flags);
556 for (i = 0; i < 5; i++) {
557 if (!(reg1->idx & (0x1 << i)))
558 continue;
559 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
560 continue;
561
562 mask = cbox_filter_mask(0x1 << i);
563 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
564 !((reg1->config ^ er->config) & mask)) {
565 atomic_add(1 << (i * 6), &er->ref);
566 er->config &= ~mask;
567 er->config |= reg1->config & mask;
568 alloc |= (0x1 << i);
569 } else {
570 break;
571 }
572 }
573 raw_spin_unlock_irqrestore(&er->lock, flags);
574 if (i < 5)
575 goto fail;
576
577 if (!uncore_box_is_fake(box))
578 reg1->alloc |= alloc;
579
b2fa344d 580 return NULL;
46bdd905
YZ
581fail:
582 for (; i >= 0; i--) {
583 if (alloc & (0x1 << i))
584 atomic_sub(1 << (i * 6), &er->ref);
585 }
586 return &constraint_empty;
587}
588
589static u64 snbep_cbox_filter_mask(int fields)
590{
591 u64 mask = 0;
592
593 if (fields & 0x1)
594 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
595 if (fields & 0x2)
596 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
597 if (fields & 0x4)
598 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
599 if (fields & 0x8)
600 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
601
602 return mask;
603}
604
605static struct event_constraint *
606snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
607{
608 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
609}
610
611static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
612{
613 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
614 struct extra_reg *er;
615 int idx = 0;
616
617 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
618 if (er->event != (event->hw.config & er->config_mask))
619 continue;
620 idx |= er->idx;
621 }
622
623 if (idx) {
624 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
625 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
626 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
627 reg1->idx = idx;
628 }
629 return 0;
630}
631
632static struct intel_uncore_ops snbep_uncore_cbox_ops = {
633 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
634 .hw_config = snbep_cbox_hw_config,
635 .get_constraint = snbep_cbox_get_constraint,
636 .put_constraint = snbep_cbox_put_constraint,
637};
638
7c94ee2e 639static struct intel_uncore_type snbep_uncore_cbox = {
6a67943a
YZ
640 .name = "cbox",
641 .num_counters = 4,
642 .num_boxes = 8,
643 .perf_ctr_bits = 44,
644 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
645 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
646 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
647 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
648 .msr_offset = SNBEP_CBO_MSR_OFFSET,
649 .num_shared_regs = 1,
650 .constraints = snbep_uncore_cbox_constraints,
46bdd905 651 .ops = &snbep_uncore_cbox_ops,
6a67943a 652 .format_group = &snbep_uncore_cbox_format_group,
7c94ee2e
YZ
653};
654
46bdd905
YZ
655static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
656{
657 struct hw_perf_event *hwc = &event->hw;
658 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
659 u64 config = reg1->config;
660
661 if (new_idx > reg1->idx)
662 config <<= 8 * (new_idx - reg1->idx);
663 else
664 config >>= 8 * (reg1->idx - new_idx);
665
666 if (modify) {
667 hwc->config += new_idx - reg1->idx;
668 reg1->config = config;
669 reg1->idx = new_idx;
670 }
671 return config;
672}
673
674static struct event_constraint *
675snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
676{
677 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
678 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
679 unsigned long flags;
680 int idx = reg1->idx;
681 u64 mask, config1 = reg1->config;
682 bool ok = false;
683
684 if (reg1->idx == EXTRA_REG_NONE ||
685 (!uncore_box_is_fake(box) && reg1->alloc))
686 return NULL;
687again:
13acac30 688 mask = 0xffULL << (idx * 8);
46bdd905
YZ
689 raw_spin_lock_irqsave(&er->lock, flags);
690 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
691 !((config1 ^ er->config) & mask)) {
692 atomic_add(1 << (idx * 8), &er->ref);
693 er->config &= ~mask;
694 er->config |= config1 & mask;
695 ok = true;
696 }
697 raw_spin_unlock_irqrestore(&er->lock, flags);
698
699 if (!ok) {
700 idx = (idx + 1) % 4;
701 if (idx != reg1->idx) {
702 config1 = snbep_pcu_alter_er(event, idx, false);
703 goto again;
704 }
705 return &constraint_empty;
706 }
707
708 if (!uncore_box_is_fake(box)) {
709 if (idx != reg1->idx)
710 snbep_pcu_alter_er(event, idx, true);
711 reg1->alloc = 1;
712 }
713 return NULL;
714}
715
716static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
717{
718 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
719 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
720
721 if (uncore_box_is_fake(box) || !reg1->alloc)
722 return;
723
724 atomic_sub(1 << (reg1->idx * 8), &er->ref);
725 reg1->alloc = 0;
726}
727
728static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
729{
730 struct hw_perf_event *hwc = &event->hw;
731 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
732 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
733
734 if (ev_sel >= 0xb && ev_sel <= 0xe) {
735 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
736 reg1->idx = ev_sel - 0xb;
737 reg1->config = event->attr.config1 & (0xff << reg1->idx);
738 }
739 return 0;
740}
741
742static struct intel_uncore_ops snbep_uncore_pcu_ops = {
743 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
744 .hw_config = snbep_pcu_hw_config,
745 .get_constraint = snbep_pcu_get_constraint,
746 .put_constraint = snbep_pcu_put_constraint,
747};
748
7c94ee2e 749static struct intel_uncore_type snbep_uncore_pcu = {
6a67943a
YZ
750 .name = "pcu",
751 .num_counters = 4,
752 .num_boxes = 1,
753 .perf_ctr_bits = 48,
754 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
755 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
756 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
757 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
758 .num_shared_regs = 1,
46bdd905 759 .ops = &snbep_uncore_pcu_ops,
6a67943a 760 .format_group = &snbep_uncore_pcu_format_group,
7c94ee2e
YZ
761};
762
763static struct intel_uncore_type *snbep_msr_uncores[] = {
764 &snbep_uncore_ubox,
765 &snbep_uncore_cbox,
766 &snbep_uncore_pcu,
767 NULL,
768};
769
fd1ec259
YZ
770enum {
771 SNBEP_PCI_QPI_PORT0_FILTER,
772 SNBEP_PCI_QPI_PORT1_FILTER,
773};
774
775static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
776{
777 struct hw_perf_event *hwc = &event->hw;
778 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
779 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
780
781 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
782 reg1->idx = 0;
783 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
784 reg1->config = event->attr.config1;
785 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
786 reg2->config = event->attr.config2;
787 }
788 return 0;
789}
790
791static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
792{
793 struct pci_dev *pdev = box->pci_dev;
794 struct hw_perf_event *hwc = &event->hw;
795 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
796 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
797
798 if (reg1->idx != EXTRA_REG_NONE) {
799 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
800 struct pci_dev *filter_pdev = extra_pci_dev[box->phys_id][idx];
801 WARN_ON_ONCE(!filter_pdev);
802 if (filter_pdev) {
803 pci_write_config_dword(filter_pdev, reg1->reg,
804 (u32)reg1->config);
805 pci_write_config_dword(filter_pdev, reg1->reg + 4,
806 (u32)(reg1->config >> 32));
807 pci_write_config_dword(filter_pdev, reg2->reg,
808 (u32)reg2->config);
809 pci_write_config_dword(filter_pdev, reg2->reg + 4,
810 (u32)(reg2->config >> 32));
811 }
812 }
813
814 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
815}
816
817static struct intel_uncore_ops snbep_uncore_qpi_ops = {
818 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
819 .enable_event = snbep_qpi_enable_event,
820 .hw_config = snbep_qpi_hw_config,
821 .get_constraint = uncore_get_constraint,
822 .put_constraint = uncore_put_constraint,
823};
824
7c94ee2e
YZ
825#define SNBEP_UNCORE_PCI_COMMON_INIT() \
826 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
827 .event_ctl = SNBEP_PCI_PMON_CTL0, \
828 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
829 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
830 .ops = &snbep_uncore_pci_ops, \
831 .format_group = &snbep_uncore_format_group
832
833static struct intel_uncore_type snbep_uncore_ha = {
834 .name = "ha",
835 .num_counters = 4,
836 .num_boxes = 1,
837 .perf_ctr_bits = 48,
838 SNBEP_UNCORE_PCI_COMMON_INIT(),
839};
840
841static struct intel_uncore_type snbep_uncore_imc = {
842 .name = "imc",
843 .num_counters = 4,
844 .num_boxes = 4,
845 .perf_ctr_bits = 48,
846 .fixed_ctr_bits = 48,
847 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
848 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
849 .event_descs = snbep_uncore_imc_events,
850 SNBEP_UNCORE_PCI_COMMON_INIT(),
851};
852
853static struct intel_uncore_type snbep_uncore_qpi = {
fd1ec259
YZ
854 .name = "qpi",
855 .num_counters = 4,
856 .num_boxes = 2,
857 .perf_ctr_bits = 48,
858 .perf_ctr = SNBEP_PCI_PMON_CTR0,
859 .event_ctl = SNBEP_PCI_PMON_CTL0,
860 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
861 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
862 .num_shared_regs = 1,
863 .ops = &snbep_uncore_qpi_ops,
864 .event_descs = snbep_uncore_qpi_events,
865 .format_group = &snbep_uncore_qpi_format_group,
7c94ee2e
YZ
866};
867
868
869static struct intel_uncore_type snbep_uncore_r2pcie = {
870 .name = "r2pcie",
871 .num_counters = 4,
872 .num_boxes = 1,
873 .perf_ctr_bits = 44,
874 .constraints = snbep_uncore_r2pcie_constraints,
875 SNBEP_UNCORE_PCI_COMMON_INIT(),
876};
877
878static struct intel_uncore_type snbep_uncore_r3qpi = {
879 .name = "r3qpi",
880 .num_counters = 3,
881 .num_boxes = 2,
882 .perf_ctr_bits = 44,
883 .constraints = snbep_uncore_r3qpi_constraints,
884 SNBEP_UNCORE_PCI_COMMON_INIT(),
885};
886
e850f9c3
YZ
887enum {
888 SNBEP_PCI_UNCORE_HA,
889 SNBEP_PCI_UNCORE_IMC,
890 SNBEP_PCI_UNCORE_QPI,
891 SNBEP_PCI_UNCORE_R2PCIE,
892 SNBEP_PCI_UNCORE_R3QPI,
893};
894
7c94ee2e 895static struct intel_uncore_type *snbep_pci_uncores[] = {
e850f9c3
YZ
896 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
897 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
898 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
899 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
900 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
7c94ee2e
YZ
901 NULL,
902};
903
904static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
905 { /* Home Agent */
906 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
899396cf 907 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
7c94ee2e
YZ
908 },
909 { /* MC Channel 0 */
910 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
899396cf 911 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
7c94ee2e
YZ
912 },
913 { /* MC Channel 1 */
914 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
899396cf 915 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
7c94ee2e
YZ
916 },
917 { /* MC Channel 2 */
918 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
899396cf 919 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
7c94ee2e
YZ
920 },
921 { /* MC Channel 3 */
922 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
899396cf 923 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
7c94ee2e
YZ
924 },
925 { /* QPI Port 0 */
926 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
899396cf 927 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
7c94ee2e
YZ
928 },
929 { /* QPI Port 1 */
930 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
899396cf 931 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
7c94ee2e 932 },
e850f9c3 933 { /* R2PCIe */
7c94ee2e 934 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
899396cf 935 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
7c94ee2e
YZ
936 },
937 { /* R3QPI Link 0 */
938 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
899396cf 939 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
7c94ee2e
YZ
940 },
941 { /* R3QPI Link 1 */
942 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
899396cf 943 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
7c94ee2e 944 },
fd1ec259
YZ
945 { /* QPI Port 0 filter */
946 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
947 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
948 SNBEP_PCI_QPI_PORT0_FILTER),
949 },
950 { /* QPI Port 0 filter */
951 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
952 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
953 SNBEP_PCI_QPI_PORT1_FILTER),
954 },
7c94ee2e
YZ
955 { /* end: all zeroes */ }
956};
957
958static struct pci_driver snbep_uncore_pci_driver = {
959 .name = "snbep_uncore",
960 .id_table = snbep_uncore_pci_ids,
961};
962
963/*
964 * build pci bus to socket mapping
965 */
e850f9c3 966static int snbep_pci2phy_map_init(int devid)
7c94ee2e
YZ
967{
968 struct pci_dev *ubox_dev = NULL;
969 int i, bus, nodeid;
032c3851
YZ
970 int err = 0;
971 u32 config = 0;
7c94ee2e
YZ
972
973 while (1) {
974 /* find the UBOX device */
e850f9c3 975 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
7c94ee2e
YZ
976 if (!ubox_dev)
977 break;
978 bus = ubox_dev->bus->number;
979 /* get the Node ID of the local register */
032c3851
YZ
980 err = pci_read_config_dword(ubox_dev, 0x40, &config);
981 if (err)
982 break;
7c94ee2e
YZ
983 nodeid = config;
984 /* get the Node ID mapping */
032c3851
YZ
985 err = pci_read_config_dword(ubox_dev, 0x54, &config);
986 if (err)
987 break;
7c94ee2e 988 /*
254298c7
YZ
989 * every three bits in the Node ID mapping register maps
990 * to a particular node.
991 */
992 for (i = 0; i < 8; i++) {
993 if (nodeid == ((config >> (3 * i)) & 0x7)) {
994 pcibus_to_physid[bus] = i;
995 break;
996 }
997 }
e850f9c3 998 }
032c3851 999
f891d8cf
YZ
1000 if (!err) {
1001 /*
1002 * For PCI bus with no UBOX device, find the next bus
1003 * that has UBOX device and use its mapping.
1004 */
1005 i = -1;
1006 for (bus = 255; bus >= 0; bus--) {
1007 if (pcibus_to_physid[bus] >= 0)
1008 i = pcibus_to_physid[bus];
1009 else
1010 pcibus_to_physid[bus] = i;
1011 }
1012 }
1013
032c3851
YZ
1014 if (ubox_dev)
1015 pci_dev_put(ubox_dev);
1016
1017 return err ? pcibios_err_to_errno(err) : 0;
254298c7
YZ
1018}
1019/* end of Sandy Bridge-EP uncore support */
1020
e850f9c3
YZ
1021/* IvyTown uncore support */
1022static void ivt_uncore_msr_init_box(struct intel_uncore_box *box)
1023{
1024 unsigned msr = uncore_msr_box_ctl(box);
1025 if (msr)
1026 wrmsrl(msr, IVT_PMON_BOX_CTL_INT);
1027}
1028
1029static void ivt_uncore_pci_init_box(struct intel_uncore_box *box)
1030{
1031 struct pci_dev *pdev = box->pci_dev;
1032
1033 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVT_PMON_BOX_CTL_INT);
1034}
1035
1036#define IVT_UNCORE_MSR_OPS_COMMON_INIT() \
1037 .init_box = ivt_uncore_msr_init_box, \
1038 .disable_box = snbep_uncore_msr_disable_box, \
1039 .enable_box = snbep_uncore_msr_enable_box, \
1040 .disable_event = snbep_uncore_msr_disable_event, \
1041 .enable_event = snbep_uncore_msr_enable_event, \
1042 .read_counter = uncore_msr_read_counter
1043
1044static struct intel_uncore_ops ivt_uncore_msr_ops = {
1045 IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1046};
1047
1048static struct intel_uncore_ops ivt_uncore_pci_ops = {
1049 .init_box = ivt_uncore_pci_init_box,
1050 .disable_box = snbep_uncore_pci_disable_box,
1051 .enable_box = snbep_uncore_pci_enable_box,
1052 .disable_event = snbep_uncore_pci_disable_event,
1053 .enable_event = snbep_uncore_pci_enable_event,
1054 .read_counter = snbep_uncore_pci_read_counter,
1055};
1056
1057#define IVT_UNCORE_PCI_COMMON_INIT() \
1058 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1059 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1060 .event_mask = IVT_PMON_RAW_EVENT_MASK, \
1061 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1062 .ops = &ivt_uncore_pci_ops, \
1063 .format_group = &ivt_uncore_format_group
1064
1065static struct attribute *ivt_uncore_formats_attr[] = {
1066 &format_attr_event.attr,
1067 &format_attr_umask.attr,
1068 &format_attr_edge.attr,
1069 &format_attr_inv.attr,
1070 &format_attr_thresh8.attr,
1071 NULL,
1072};
1073
1074static struct attribute *ivt_uncore_ubox_formats_attr[] = {
1075 &format_attr_event.attr,
1076 &format_attr_umask.attr,
1077 &format_attr_edge.attr,
1078 &format_attr_inv.attr,
1079 &format_attr_thresh5.attr,
1080 NULL,
1081};
1082
1083static struct attribute *ivt_uncore_cbox_formats_attr[] = {
1084 &format_attr_event.attr,
1085 &format_attr_umask.attr,
1086 &format_attr_edge.attr,
1087 &format_attr_tid_en.attr,
1088 &format_attr_thresh8.attr,
1089 &format_attr_filter_tid.attr,
1090 &format_attr_filter_link.attr,
1091 &format_attr_filter_state2.attr,
1092 &format_attr_filter_nid2.attr,
1093 &format_attr_filter_opc2.attr,
1094 NULL,
1095};
1096
1097static struct attribute *ivt_uncore_pcu_formats_attr[] = {
1098 &format_attr_event_ext.attr,
1099 &format_attr_occ_sel.attr,
1100 &format_attr_edge.attr,
1101 &format_attr_thresh5.attr,
1102 &format_attr_occ_invert.attr,
1103 &format_attr_occ_edge.attr,
1104 &format_attr_filter_band0.attr,
1105 &format_attr_filter_band1.attr,
1106 &format_attr_filter_band2.attr,
1107 &format_attr_filter_band3.attr,
1108 NULL,
1109};
1110
1111static struct attribute *ivt_uncore_qpi_formats_attr[] = {
1112 &format_attr_event_ext.attr,
1113 &format_attr_umask.attr,
1114 &format_attr_edge.attr,
1115 &format_attr_thresh8.attr,
d1e8f4a8
YZ
1116 &format_attr_match_rds.attr,
1117 &format_attr_match_rnid30.attr,
1118 &format_attr_match_rnid4.attr,
1119 &format_attr_match_dnid.attr,
1120 &format_attr_match_mc.attr,
1121 &format_attr_match_opc.attr,
1122 &format_attr_match_vnw.attr,
1123 &format_attr_match0.attr,
1124 &format_attr_match1.attr,
1125 &format_attr_mask_rds.attr,
1126 &format_attr_mask_rnid30.attr,
1127 &format_attr_mask_rnid4.attr,
1128 &format_attr_mask_dnid.attr,
1129 &format_attr_mask_mc.attr,
1130 &format_attr_mask_opc.attr,
1131 &format_attr_mask_vnw.attr,
1132 &format_attr_mask0.attr,
1133 &format_attr_mask1.attr,
e850f9c3
YZ
1134 NULL,
1135};
1136
1137static struct attribute_group ivt_uncore_format_group = {
1138 .name = "format",
1139 .attrs = ivt_uncore_formats_attr,
1140};
1141
1142static struct attribute_group ivt_uncore_ubox_format_group = {
1143 .name = "format",
1144 .attrs = ivt_uncore_ubox_formats_attr,
1145};
1146
1147static struct attribute_group ivt_uncore_cbox_format_group = {
1148 .name = "format",
1149 .attrs = ivt_uncore_cbox_formats_attr,
1150};
1151
1152static struct attribute_group ivt_uncore_pcu_format_group = {
1153 .name = "format",
1154 .attrs = ivt_uncore_pcu_formats_attr,
1155};
1156
1157static struct attribute_group ivt_uncore_qpi_format_group = {
1158 .name = "format",
1159 .attrs = ivt_uncore_qpi_formats_attr,
1160};
1161
1162static struct intel_uncore_type ivt_uncore_ubox = {
1163 .name = "ubox",
1164 .num_counters = 2,
1165 .num_boxes = 1,
1166 .perf_ctr_bits = 44,
1167 .fixed_ctr_bits = 48,
1168 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1169 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1170 .event_mask = IVT_U_MSR_PMON_RAW_EVENT_MASK,
1171 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1172 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1173 .ops = &ivt_uncore_msr_ops,
1174 .format_group = &ivt_uncore_ubox_format_group,
1175};
1176
1177static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1178 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1179 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1180 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1181 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1182 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1183 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1184 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1185 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1186 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1187 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1188 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1189 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1190 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1191 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1192 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1193 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1194 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1195 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1196 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1197 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1198 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1199 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1200 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1201 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1202 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1203 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1204 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1205 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1206 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1207 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1208 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1209 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1210 EVENT_EXTRA_END
1211};
1212
1213static u64 ivt_cbox_filter_mask(int fields)
1214{
1215 u64 mask = 0;
1216
1217 if (fields & 0x1)
1218 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_TID;
1219 if (fields & 0x2)
1220 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_LINK;
1221 if (fields & 0x4)
1222 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_STATE;
1223 if (fields & 0x8)
1224 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_NID;
1225 if (fields & 0x10)
1226 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_OPC;
1227
1228 return mask;
1229}
1230
1231static struct event_constraint *
1232ivt_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1233{
1234 return __snbep_cbox_get_constraint(box, event, ivt_cbox_filter_mask);
1235}
1236
1237static int ivt_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1238{
1239 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1240 struct extra_reg *er;
1241 int idx = 0;
1242
1243 for (er = ivt_uncore_cbox_extra_regs; er->msr; er++) {
1244 if (er->event != (event->hw.config & er->config_mask))
1245 continue;
1246 idx |= er->idx;
1247 }
1248
1249 if (idx) {
1250 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1251 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1252 reg1->config = event->attr.config1 & ivt_cbox_filter_mask(idx);
1253 reg1->idx = idx;
1254 }
1255 return 0;
1256}
1257
1258static void ivt_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1259{
1260 struct hw_perf_event *hwc = &event->hw;
1261 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1262
1263 if (reg1->idx != EXTRA_REG_NONE) {
1264 u64 filter = uncore_shared_reg_config(box, 0);
1265 wrmsrl(reg1->reg, filter & 0xffffffff);
1266 wrmsrl(reg1->reg + 6, filter >> 32);
1267 }
1268
1269 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1270}
1271
1272static struct intel_uncore_ops ivt_uncore_cbox_ops = {
1273 .init_box = ivt_uncore_msr_init_box,
1274 .disable_box = snbep_uncore_msr_disable_box,
1275 .enable_box = snbep_uncore_msr_enable_box,
1276 .disable_event = snbep_uncore_msr_disable_event,
1277 .enable_event = ivt_cbox_enable_event,
1278 .read_counter = uncore_msr_read_counter,
1279 .hw_config = ivt_cbox_hw_config,
1280 .get_constraint = ivt_cbox_get_constraint,
1281 .put_constraint = snbep_cbox_put_constraint,
1282};
1283
1284static struct intel_uncore_type ivt_uncore_cbox = {
1285 .name = "cbox",
1286 .num_counters = 4,
1287 .num_boxes = 15,
1288 .perf_ctr_bits = 44,
1289 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1290 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1291 .event_mask = IVT_CBO_MSR_PMON_RAW_EVENT_MASK,
1292 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1293 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1294 .num_shared_regs = 1,
1295 .constraints = snbep_uncore_cbox_constraints,
1296 .ops = &ivt_uncore_cbox_ops,
1297 .format_group = &ivt_uncore_cbox_format_group,
1298};
1299
1300static struct intel_uncore_ops ivt_uncore_pcu_ops = {
1301 IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1302 .hw_config = snbep_pcu_hw_config,
1303 .get_constraint = snbep_pcu_get_constraint,
1304 .put_constraint = snbep_pcu_put_constraint,
1305};
1306
1307static struct intel_uncore_type ivt_uncore_pcu = {
1308 .name = "pcu",
1309 .num_counters = 4,
1310 .num_boxes = 1,
1311 .perf_ctr_bits = 48,
1312 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1313 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1314 .event_mask = IVT_PCU_MSR_PMON_RAW_EVENT_MASK,
1315 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1316 .num_shared_regs = 1,
1317 .ops = &ivt_uncore_pcu_ops,
1318 .format_group = &ivt_uncore_pcu_format_group,
1319};
1320
1321static struct intel_uncore_type *ivt_msr_uncores[] = {
1322 &ivt_uncore_ubox,
1323 &ivt_uncore_cbox,
1324 &ivt_uncore_pcu,
1325 NULL,
1326};
1327
1328static struct intel_uncore_type ivt_uncore_ha = {
1329 .name = "ha",
1330 .num_counters = 4,
1331 .num_boxes = 2,
1332 .perf_ctr_bits = 48,
1333 IVT_UNCORE_PCI_COMMON_INIT(),
1334};
1335
1336static struct intel_uncore_type ivt_uncore_imc = {
1337 .name = "imc",
1338 .num_counters = 4,
1339 .num_boxes = 8,
1340 .perf_ctr_bits = 48,
1341 .fixed_ctr_bits = 48,
1342 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1343 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1344 IVT_UNCORE_PCI_COMMON_INIT(),
1345};
1346
f891d8cf
YZ
1347/* registers in IRP boxes are not properly aligned */
1348static unsigned ivt_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1349static unsigned ivt_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1350
1351static void ivt_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1352{
1353 struct pci_dev *pdev = box->pci_dev;
1354 struct hw_perf_event *hwc = &event->hw;
1355
1356 pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx],
1357 hwc->config | SNBEP_PMON_CTL_EN);
1358}
1359
1360static void ivt_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1361{
1362 struct pci_dev *pdev = box->pci_dev;
1363 struct hw_perf_event *hwc = &event->hw;
1364
1365 pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx], hwc->config);
1366}
1367
1368static u64 ivt_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1369{
1370 struct pci_dev *pdev = box->pci_dev;
1371 struct hw_perf_event *hwc = &event->hw;
1372 u64 count = 0;
1373
1374 pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1375 pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1376
1377 return count;
1378}
1379
1380static struct intel_uncore_ops ivt_uncore_irp_ops = {
1381 .init_box = ivt_uncore_pci_init_box,
1382 .disable_box = snbep_uncore_pci_disable_box,
1383 .enable_box = snbep_uncore_pci_enable_box,
1384 .disable_event = ivt_uncore_irp_disable_event,
1385 .enable_event = ivt_uncore_irp_enable_event,
1386 .read_counter = ivt_uncore_irp_read_counter,
1387};
1388
1389static struct intel_uncore_type ivt_uncore_irp = {
1390 .name = "irp",
1391 .num_counters = 4,
1392 .num_boxes = 1,
1393 .perf_ctr_bits = 48,
1394 .event_mask = IVT_PMON_RAW_EVENT_MASK,
1395 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1396 .ops = &ivt_uncore_irp_ops,
1397 .format_group = &ivt_uncore_format_group,
1398};
1399
d1e8f4a8
YZ
1400static struct intel_uncore_ops ivt_uncore_qpi_ops = {
1401 .init_box = ivt_uncore_pci_init_box,
1402 .disable_box = snbep_uncore_pci_disable_box,
1403 .enable_box = snbep_uncore_pci_enable_box,
1404 .disable_event = snbep_uncore_pci_disable_event,
1405 .enable_event = snbep_qpi_enable_event,
1406 .read_counter = snbep_uncore_pci_read_counter,
1407 .hw_config = snbep_qpi_hw_config,
1408 .get_constraint = uncore_get_constraint,
1409 .put_constraint = uncore_put_constraint,
1410};
1411
e850f9c3 1412static struct intel_uncore_type ivt_uncore_qpi = {
d1e8f4a8
YZ
1413 .name = "qpi",
1414 .num_counters = 4,
1415 .num_boxes = 3,
1416 .perf_ctr_bits = 48,
1417 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1418 .event_ctl = SNBEP_PCI_PMON_CTL0,
1419 .event_mask = IVT_QPI_PCI_PMON_RAW_EVENT_MASK,
1420 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1421 .num_shared_regs = 1,
1422 .ops = &ivt_uncore_qpi_ops,
1423 .format_group = &ivt_uncore_qpi_format_group,
e850f9c3
YZ
1424};
1425
1426static struct intel_uncore_type ivt_uncore_r2pcie = {
1427 .name = "r2pcie",
1428 .num_counters = 4,
1429 .num_boxes = 1,
1430 .perf_ctr_bits = 44,
1431 .constraints = snbep_uncore_r2pcie_constraints,
1432 IVT_UNCORE_PCI_COMMON_INIT(),
1433};
1434
1435static struct intel_uncore_type ivt_uncore_r3qpi = {
1436 .name = "r3qpi",
1437 .num_counters = 3,
1438 .num_boxes = 2,
1439 .perf_ctr_bits = 44,
1440 .constraints = snbep_uncore_r3qpi_constraints,
1441 IVT_UNCORE_PCI_COMMON_INIT(),
1442};
1443
1444enum {
1445 IVT_PCI_UNCORE_HA,
1446 IVT_PCI_UNCORE_IMC,
f891d8cf 1447 IVT_PCI_UNCORE_IRP,
e850f9c3
YZ
1448 IVT_PCI_UNCORE_QPI,
1449 IVT_PCI_UNCORE_R2PCIE,
1450 IVT_PCI_UNCORE_R3QPI,
1451};
1452
1453static struct intel_uncore_type *ivt_pci_uncores[] = {
1454 [IVT_PCI_UNCORE_HA] = &ivt_uncore_ha,
1455 [IVT_PCI_UNCORE_IMC] = &ivt_uncore_imc,
f891d8cf 1456 [IVT_PCI_UNCORE_IRP] = &ivt_uncore_irp,
e850f9c3
YZ
1457 [IVT_PCI_UNCORE_QPI] = &ivt_uncore_qpi,
1458 [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie,
1459 [IVT_PCI_UNCORE_R3QPI] = &ivt_uncore_r3qpi,
1460 NULL,
1461};
1462
1463static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = {
1464 { /* Home Agent 0 */
1465 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
899396cf 1466 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 0),
e850f9c3
YZ
1467 },
1468 { /* Home Agent 1 */
1469 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
899396cf 1470 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 1),
e850f9c3
YZ
1471 },
1472 { /* MC0 Channel 0 */
1473 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
899396cf 1474 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 0),
e850f9c3
YZ
1475 },
1476 { /* MC0 Channel 1 */
1477 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
899396cf 1478 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 1),
e850f9c3
YZ
1479 },
1480 { /* MC0 Channel 3 */
1481 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
899396cf 1482 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 2),
e850f9c3
YZ
1483 },
1484 { /* MC0 Channel 4 */
1485 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
899396cf 1486 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 3),
e850f9c3
YZ
1487 },
1488 { /* MC1 Channel 0 */
1489 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
899396cf 1490 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 4),
e850f9c3
YZ
1491 },
1492 { /* MC1 Channel 1 */
1493 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
899396cf 1494 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 5),
e850f9c3
YZ
1495 },
1496 { /* MC1 Channel 3 */
1497 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
899396cf 1498 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 6),
e850f9c3
YZ
1499 },
1500 { /* MC1 Channel 4 */
1501 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
899396cf 1502 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 7),
e850f9c3 1503 },
f891d8cf
YZ
1504 { /* IRP */
1505 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1506 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IRP, 0),
1507 },
e850f9c3
YZ
1508 { /* QPI0 Port 0 */
1509 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
899396cf 1510 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 0),
e850f9c3
YZ
1511 },
1512 { /* QPI0 Port 1 */
1513 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
899396cf 1514 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 1),
e850f9c3
YZ
1515 },
1516 { /* QPI1 Port 2 */
1517 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
899396cf 1518 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 2),
e850f9c3
YZ
1519 },
1520 { /* R2PCIe */
1521 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
899396cf 1522 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R2PCIE, 0),
e850f9c3
YZ
1523 },
1524 { /* R3QPI0 Link 0 */
1525 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
899396cf 1526 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 0),
e850f9c3
YZ
1527 },
1528 { /* R3QPI0 Link 1 */
1529 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
899396cf 1530 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 1),
e850f9c3
YZ
1531 },
1532 { /* R3QPI1 Link 2 */
1533 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
899396cf 1534 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 2),
e850f9c3 1535 },
d1e8f4a8
YZ
1536 { /* QPI Port 0 filter */
1537 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1538 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1539 SNBEP_PCI_QPI_PORT0_FILTER),
1540 },
1541 { /* QPI Port 0 filter */
1542 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1543 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1544 SNBEP_PCI_QPI_PORT1_FILTER),
1545 },
e850f9c3
YZ
1546 { /* end: all zeroes */ }
1547};
1548
1549static struct pci_driver ivt_uncore_pci_driver = {
1550 .name = "ivt_uncore",
1551 .id_table = ivt_uncore_pci_ids,
1552};
1553/* end of IvyTown uncore support */
1554
254298c7
YZ
1555/* Sandy Bridge uncore support */
1556static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1557{
1558 struct hw_perf_event *hwc = &event->hw;
1559
1560 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1561 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1562 else
1563 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
1564}
1565
1566static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1567{
1568 wrmsrl(event->hw.config_base, 0);
1569}
1570
1571static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
1572{
1573 if (box->pmu->pmu_idx == 0) {
1574 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
1575 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
1576 }
1577}
1578
35534b20
SE
1579static struct uncore_event_desc snb_uncore_events[] = {
1580 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
1581 { /* end: all zeroes */ },
1582};
1583
254298c7
YZ
1584static struct attribute *snb_uncore_formats_attr[] = {
1585 &format_attr_event.attr,
1586 &format_attr_umask.attr,
1587 &format_attr_edge.attr,
1588 &format_attr_inv.attr,
1589 &format_attr_cmask5.attr,
1590 NULL,
1591};
1592
1593static struct attribute_group snb_uncore_format_group = {
1594 .name = "format",
1595 .attrs = snb_uncore_formats_attr,
1596};
1597
1598static struct intel_uncore_ops snb_uncore_msr_ops = {
1599 .init_box = snb_uncore_msr_init_box,
1600 .disable_event = snb_uncore_msr_disable_event,
1601 .enable_event = snb_uncore_msr_enable_event,
1602 .read_counter = uncore_msr_read_counter,
1603};
1604
1605static struct event_constraint snb_uncore_cbox_constraints[] = {
1606 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
1607 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
1608 EVENT_CONSTRAINT_END
1609};
1610
1611static struct intel_uncore_type snb_uncore_cbox = {
1612 .name = "cbox",
1613 .num_counters = 2,
1614 .num_boxes = 4,
1615 .perf_ctr_bits = 44,
1616 .fixed_ctr_bits = 48,
1617 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
1618 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
1619 .fixed_ctr = SNB_UNC_FIXED_CTR,
1620 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
1621 .single_fixed = 1,
1622 .event_mask = SNB_UNC_RAW_EVENT_MASK,
1623 .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
1624 .constraints = snb_uncore_cbox_constraints,
1625 .ops = &snb_uncore_msr_ops,
1626 .format_group = &snb_uncore_format_group,
35534b20 1627 .event_descs = snb_uncore_events,
254298c7
YZ
1628};
1629
1630static struct intel_uncore_type *snb_msr_uncores[] = {
1631 &snb_uncore_cbox,
1632 NULL,
1633};
1634/* end of Sandy Bridge uncore support */
1635
1636/* Nehalem uncore support */
1637static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
1638{
1639 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
1640}
1641
1642static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
1643{
1644 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
1645}
1646
1647static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1648{
1649 struct hw_perf_event *hwc = &event->hw;
1650
1651 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1652 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1653 else
1654 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
1655}
1656
1657static struct attribute *nhm_uncore_formats_attr[] = {
1658 &format_attr_event.attr,
1659 &format_attr_umask.attr,
1660 &format_attr_edge.attr,
1661 &format_attr_inv.attr,
1662 &format_attr_cmask8.attr,
1663 NULL,
1664};
1665
1666static struct attribute_group nhm_uncore_format_group = {
1667 .name = "format",
1668 .attrs = nhm_uncore_formats_attr,
1669};
1670
1671static struct uncore_event_desc nhm_uncore_events[] = {
1672 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
1673 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
1674 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
1675 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
1676 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
1677 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
1678 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
1679 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
1680 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
1681 { /* end: all zeroes */ },
1682};
1683
1684static struct intel_uncore_ops nhm_uncore_msr_ops = {
1685 .disable_box = nhm_uncore_msr_disable_box,
1686 .enable_box = nhm_uncore_msr_enable_box,
1687 .disable_event = snb_uncore_msr_disable_event,
1688 .enable_event = nhm_uncore_msr_enable_event,
1689 .read_counter = uncore_msr_read_counter,
1690};
1691
1692static struct intel_uncore_type nhm_uncore = {
1693 .name = "",
1694 .num_counters = 8,
1695 .num_boxes = 1,
1696 .perf_ctr_bits = 48,
1697 .fixed_ctr_bits = 48,
1698 .event_ctl = NHM_UNC_PERFEVTSEL0,
1699 .perf_ctr = NHM_UNC_UNCORE_PMC0,
1700 .fixed_ctr = NHM_UNC_FIXED_CTR,
1701 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
1702 .event_mask = NHM_UNC_RAW_EVENT_MASK,
1703 .event_descs = nhm_uncore_events,
1704 .ops = &nhm_uncore_msr_ops,
1705 .format_group = &nhm_uncore_format_group,
1706};
1707
1708static struct intel_uncore_type *nhm_msr_uncores[] = {
1709 &nhm_uncore,
1710 NULL,
1711};
1712/* end of Nehalem uncore support */
1713
1714/* Nehalem-EX uncore support */
254298c7
YZ
1715DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
1716DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
254298c7
YZ
1717DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
1718DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
1719
1720static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
1721{
1722 wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
1723}
1724
1725static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
1726{
1727 unsigned msr = uncore_msr_box_ctl(box);
1728 u64 config;
1729
1730 if (msr) {
1731 rdmsrl(msr, config);
1732 config &= ~((1ULL << uncore_num_counters(box)) - 1);
1733 /* WBox has a fixed counter */
1734 if (uncore_msr_fixed_ctl(box))
1735 config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
1736 wrmsrl(msr, config);
1737 }
1738}
1739
1740static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
1741{
1742 unsigned msr = uncore_msr_box_ctl(box);
1743 u64 config;
1744
1745 if (msr) {
1746 rdmsrl(msr, config);
1747 config |= (1ULL << uncore_num_counters(box)) - 1;
1748 /* WBox has a fixed counter */
1749 if (uncore_msr_fixed_ctl(box))
1750 config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
1751 wrmsrl(msr, config);
1752 }
1753}
1754
1755static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1756{
1757 wrmsrl(event->hw.config_base, 0);
1758}
1759
1760static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1761{
1762 struct hw_perf_event *hwc = &event->hw;
1763
1764 if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
1765 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
1766 else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
1767 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
1768 else
1769 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
1770}
1771
1772#define NHMEX_UNCORE_OPS_COMMON_INIT() \
1773 .init_box = nhmex_uncore_msr_init_box, \
1774 .disable_box = nhmex_uncore_msr_disable_box, \
1775 .enable_box = nhmex_uncore_msr_enable_box, \
1776 .disable_event = nhmex_uncore_msr_disable_event, \
1777 .read_counter = uncore_msr_read_counter
1778
1779static struct intel_uncore_ops nhmex_uncore_ops = {
1780 NHMEX_UNCORE_OPS_COMMON_INIT(),
1781 .enable_event = nhmex_uncore_msr_enable_event,
1782};
1783
1784static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
1785 &format_attr_event.attr,
1786 &format_attr_edge.attr,
1787 NULL,
1788};
1789
1790static struct attribute_group nhmex_uncore_ubox_format_group = {
1791 .name = "format",
1792 .attrs = nhmex_uncore_ubox_formats_attr,
1793};
1794
1795static struct intel_uncore_type nhmex_uncore_ubox = {
1796 .name = "ubox",
1797 .num_counters = 1,
1798 .num_boxes = 1,
1799 .perf_ctr_bits = 48,
1800 .event_ctl = NHMEX_U_MSR_PMON_EV_SEL,
1801 .perf_ctr = NHMEX_U_MSR_PMON_CTR,
1802 .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK,
1803 .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL,
1804 .ops = &nhmex_uncore_ops,
1805 .format_group = &nhmex_uncore_ubox_format_group
1806};
1807
1808static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
1809 &format_attr_event.attr,
1810 &format_attr_umask.attr,
1811 &format_attr_edge.attr,
1812 &format_attr_inv.attr,
1813 &format_attr_thresh8.attr,
1814 NULL,
1815};
1816
1817static struct attribute_group nhmex_uncore_cbox_format_group = {
1818 .name = "format",
1819 .attrs = nhmex_uncore_cbox_formats_attr,
1820};
1821
cb37af77
YZ
1822/* msr offset for each instance of cbox */
1823static unsigned nhmex_cbox_msr_offsets[] = {
1824 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
1825};
1826
254298c7
YZ
1827static struct intel_uncore_type nhmex_uncore_cbox = {
1828 .name = "cbox",
1829 .num_counters = 6,
cb37af77 1830 .num_boxes = 10,
254298c7
YZ
1831 .perf_ctr_bits = 48,
1832 .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0,
1833 .perf_ctr = NHMEX_C0_MSR_PMON_CTR0,
1834 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
1835 .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
cb37af77 1836 .msr_offsets = nhmex_cbox_msr_offsets,
254298c7
YZ
1837 .pair_ctr_ctl = 1,
1838 .ops = &nhmex_uncore_ops,
1839 .format_group = &nhmex_uncore_cbox_format_group
1840};
1841
1842static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
1843 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
1844 { /* end: all zeroes */ },
1845};
1846
1847static struct intel_uncore_type nhmex_uncore_wbox = {
1848 .name = "wbox",
1849 .num_counters = 4,
1850 .num_boxes = 1,
1851 .perf_ctr_bits = 48,
1852 .event_ctl = NHMEX_W_MSR_PMON_CNT0,
1853 .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0,
1854 .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR,
1855 .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL,
1856 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
1857 .box_ctl = NHMEX_W_MSR_GLOBAL_CTL,
1858 .pair_ctr_ctl = 1,
1859 .event_descs = nhmex_uncore_wbox_events,
1860 .ops = &nhmex_uncore_ops,
1861 .format_group = &nhmex_uncore_cbox_format_group
1862};
1863
1864static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1865{
1866 struct hw_perf_event *hwc = &event->hw;
1867 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1868 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1869 int ctr, ev_sel;
1870
1871 ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
1872 NHMEX_B_PMON_CTR_SHIFT;
1873 ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
1874 NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
1875
1876 /* events that do not use the match/mask registers */
1877 if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
1878 (ctr == 2 && ev_sel != 0x4) || ctr == 3)
1879 return 0;
1880
1881 if (box->pmu->pmu_idx == 0)
1882 reg1->reg = NHMEX_B0_MSR_MATCH;
1883 else
1884 reg1->reg = NHMEX_B1_MSR_MATCH;
1885 reg1->idx = 0;
1886 reg1->config = event->attr.config1;
1887 reg2->config = event->attr.config2;
1888 return 0;
1889}
1890
1891static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1892{
1893 struct hw_perf_event *hwc = &event->hw;
1894 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1895 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1896
1897 if (reg1->idx != EXTRA_REG_NONE) {
1898 wrmsrl(reg1->reg, reg1->config);
1899 wrmsrl(reg1->reg + 1, reg2->config);
1900 }
1901 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
1902 (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
1903}
1904
1905/*
1906 * The Bbox has 4 counters, but each counter monitors different events.
1907 * Use bits 6-7 in the event config to select counter.
1908 */
1909static struct event_constraint nhmex_uncore_bbox_constraints[] = {
1910 EVENT_CONSTRAINT(0 , 1, 0xc0),
1911 EVENT_CONSTRAINT(0x40, 2, 0xc0),
1912 EVENT_CONSTRAINT(0x80, 4, 0xc0),
1913 EVENT_CONSTRAINT(0xc0, 8, 0xc0),
1914 EVENT_CONSTRAINT_END,
1915};
1916
1917static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
1918 &format_attr_event5.attr,
1919 &format_attr_counter.attr,
1920 &format_attr_match.attr,
1921 &format_attr_mask.attr,
1922 NULL,
1923};
1924
1925static struct attribute_group nhmex_uncore_bbox_format_group = {
1926 .name = "format",
1927 .attrs = nhmex_uncore_bbox_formats_attr,
1928};
1929
1930static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
1931 NHMEX_UNCORE_OPS_COMMON_INIT(),
1932 .enable_event = nhmex_bbox_msr_enable_event,
1933 .hw_config = nhmex_bbox_hw_config,
1934 .get_constraint = uncore_get_constraint,
1935 .put_constraint = uncore_put_constraint,
1936};
1937
1938static struct intel_uncore_type nhmex_uncore_bbox = {
1939 .name = "bbox",
1940 .num_counters = 4,
1941 .num_boxes = 2,
1942 .perf_ctr_bits = 48,
1943 .event_ctl = NHMEX_B0_MSR_PMON_CTL0,
1944 .perf_ctr = NHMEX_B0_MSR_PMON_CTR0,
1945 .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK,
1946 .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL,
1947 .msr_offset = NHMEX_B_MSR_OFFSET,
1948 .pair_ctr_ctl = 1,
1949 .num_shared_regs = 1,
1950 .constraints = nhmex_uncore_bbox_constraints,
1951 .ops = &nhmex_uncore_bbox_ops,
1952 .format_group = &nhmex_uncore_bbox_format_group
1953};
1954
1955static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1956{
ebb6cc03
YZ
1957 struct hw_perf_event *hwc = &event->hw;
1958 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1959 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
254298c7 1960
ebb6cc03
YZ
1961 /* only TO_R_PROG_EV event uses the match/mask register */
1962 if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
1963 NHMEX_S_EVENT_TO_R_PROG_EV)
1964 return 0;
254298c7
YZ
1965
1966 if (box->pmu->pmu_idx == 0)
1967 reg1->reg = NHMEX_S0_MSR_MM_CFG;
1968 else
1969 reg1->reg = NHMEX_S1_MSR_MM_CFG;
254298c7 1970 reg1->idx = 0;
ebb6cc03
YZ
1971 reg1->config = event->attr.config1;
1972 reg2->config = event->attr.config2;
254298c7
YZ
1973 return 0;
1974}
1975
1976static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1977{
1978 struct hw_perf_event *hwc = &event->hw;
1979 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1980 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1981
ebb6cc03
YZ
1982 if (reg1->idx != EXTRA_REG_NONE) {
1983 wrmsrl(reg1->reg, 0);
254298c7
YZ
1984 wrmsrl(reg1->reg + 1, reg1->config);
1985 wrmsrl(reg1->reg + 2, reg2->config);
1986 wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
1987 }
1988 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
1989}
1990
1991static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
1992 &format_attr_event.attr,
1993 &format_attr_umask.attr,
1994 &format_attr_edge.attr,
1995 &format_attr_inv.attr,
1996 &format_attr_thresh8.attr,
254298c7
YZ
1997 &format_attr_match.attr,
1998 &format_attr_mask.attr,
1999 NULL,
2000};
2001
2002static struct attribute_group nhmex_uncore_sbox_format_group = {
2003 .name = "format",
2004 .attrs = nhmex_uncore_sbox_formats_attr,
2005};
2006
2007static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
2008 NHMEX_UNCORE_OPS_COMMON_INIT(),
2009 .enable_event = nhmex_sbox_msr_enable_event,
2010 .hw_config = nhmex_sbox_hw_config,
2011 .get_constraint = uncore_get_constraint,
2012 .put_constraint = uncore_put_constraint,
2013};
2014
2015static struct intel_uncore_type nhmex_uncore_sbox = {
2016 .name = "sbox",
2017 .num_counters = 4,
2018 .num_boxes = 2,
2019 .perf_ctr_bits = 48,
2020 .event_ctl = NHMEX_S0_MSR_PMON_CTL0,
2021 .perf_ctr = NHMEX_S0_MSR_PMON_CTR0,
2022 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
2023 .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL,
2024 .msr_offset = NHMEX_S_MSR_OFFSET,
2025 .pair_ctr_ctl = 1,
2026 .num_shared_regs = 1,
2027 .ops = &nhmex_uncore_sbox_ops,
2028 .format_group = &nhmex_uncore_sbox_format_group
2029};
2030
2031enum {
2032 EXTRA_REG_NHMEX_M_FILTER,
2033 EXTRA_REG_NHMEX_M_DSP,
2034 EXTRA_REG_NHMEX_M_ISS,
2035 EXTRA_REG_NHMEX_M_MAP,
2036 EXTRA_REG_NHMEX_M_MSC_THR,
2037 EXTRA_REG_NHMEX_M_PGT,
2038 EXTRA_REG_NHMEX_M_PLD,
2039 EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
2040};
2041
2042static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
2043 MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
2044 MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
2045 MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
2046 MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
2047 /* event 0xa uses two extra registers */
2048 MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
2049 MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
2050 MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
2051 /* events 0xd ~ 0x10 use the same extra register */
2052 MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
2053 MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
2054 MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
2055 MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
2056 MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
2057 MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
2058 MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
2059 MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
2060 MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
2061 EVENT_EXTRA_END
2062};
2063
cb37af77 2064/* Nehalem-EX or Westmere-EX ? */
46bdd905 2065static bool uncore_nhmex;
cb37af77 2066
254298c7
YZ
2067static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
2068{
2069 struct intel_uncore_extra_reg *er;
2070 unsigned long flags;
2071 bool ret = false;
2072 u64 mask;
2073
2074 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2075 er = &box->shared_regs[idx];
2076 raw_spin_lock_irqsave(&er->lock, flags);
2077 if (!atomic_read(&er->ref) || er->config == config) {
2078 atomic_inc(&er->ref);
2079 er->config = config;
2080 ret = true;
2081 }
2082 raw_spin_unlock_irqrestore(&er->lock, flags);
2083
2084 return ret;
2085 }
2086 /*
2087 * The ZDP_CTL_FVC MSR has 4 fields which are used to control
2088 * events 0xd ~ 0x10. Besides these 4 fields, there are additional
2089 * fields which are shared.
2090 */
2091 idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2092 if (WARN_ON_ONCE(idx >= 4))
2093 return false;
2094
2095 /* mask of the shared fields */
cb37af77
YZ
2096 if (uncore_nhmex)
2097 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
2098 else
2099 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
254298c7
YZ
2100 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2101
2102 raw_spin_lock_irqsave(&er->lock, flags);
2103 /* add mask of the non-shared field if it's in use */
cb37af77
YZ
2104 if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
2105 if (uncore_nhmex)
2106 mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2107 else
2108 mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2109 }
254298c7
YZ
2110
2111 if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
2112 atomic_add(1 << (idx * 8), &er->ref);
cb37af77
YZ
2113 if (uncore_nhmex)
2114 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
2115 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2116 else
2117 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
2118 WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
254298c7
YZ
2119 er->config &= ~mask;
2120 er->config |= (config & mask);
2121 ret = true;
2122 }
2123 raw_spin_unlock_irqrestore(&er->lock, flags);
2124
2125 return ret;
2126}
2127
2128static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
2129{
2130 struct intel_uncore_extra_reg *er;
2131
2132 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2133 er = &box->shared_regs[idx];
2134 atomic_dec(&er->ref);
2135 return;
2136 }
2137
2138 idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2139 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2140 atomic_sub(1 << (idx * 8), &er->ref);
2141}
2142
46bdd905 2143static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
254298c7
YZ
2144{
2145 struct hw_perf_event *hwc = &event->hw;
2146 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
13acac30 2147 u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
254298c7
YZ
2148 u64 config = reg1->config;
2149
2150 /* get the non-shared control bits and shift them */
2151 idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
cb37af77
YZ
2152 if (uncore_nhmex)
2153 config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2154 else
2155 config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
254298c7
YZ
2156 if (new_idx > orig_idx) {
2157 idx = new_idx - orig_idx;
2158 config <<= 3 * idx;
2159 } else {
2160 idx = orig_idx - new_idx;
2161 config >>= 3 * idx;
2162 }
2163
2164 /* add the shared control bits back */
cb37af77
YZ
2165 if (uncore_nhmex)
2166 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2167 else
2168 config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
254298c7
YZ
2169 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2170 if (modify) {
2171 /* adjust the main event selector */
2172 if (new_idx > orig_idx)
2173 hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
2174 else
2175 hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
2176 reg1->config = config;
2177 reg1->idx = ~0xff | new_idx;
2178 }
2179 return config;
2180}
2181
2182static struct event_constraint *
2183nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2184{
2185 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2186 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2187 int i, idx[2], alloc = 0;
2188 u64 config1 = reg1->config;
2189
2190 idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
2191 idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
2192again:
2193 for (i = 0; i < 2; i++) {
2194 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
2195 idx[i] = 0xff;
2196
2197 if (idx[i] == 0xff)
2198 continue;
2199
2200 if (!nhmex_mbox_get_shared_reg(box, idx[i],
2201 __BITS_VALUE(config1, i, 32)))
2202 goto fail;
2203 alloc |= (0x1 << i);
2204 }
2205
2206 /* for the match/mask registers */
ebb6cc03
YZ
2207 if (reg2->idx != EXTRA_REG_NONE &&
2208 (uncore_box_is_fake(box) || !reg2->alloc) &&
254298c7
YZ
2209 !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
2210 goto fail;
2211
2212 /*
2213 * If it's a fake box -- as per validate_{group,event}() we
2214 * shouldn't touch event state and we can avoid doing so
2215 * since both will only call get_event_constraints() once
2216 * on each event, this avoids the need for reg->alloc.
2217 */
2218 if (!uncore_box_is_fake(box)) {
2219 if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
2220 nhmex_mbox_alter_er(event, idx[0], true);
2221 reg1->alloc |= alloc;
ebb6cc03
YZ
2222 if (reg2->idx != EXTRA_REG_NONE)
2223 reg2->alloc = 1;
254298c7
YZ
2224 }
2225 return NULL;
2226fail:
2227 if (idx[0] != 0xff && !(alloc & 0x1) &&
2228 idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2229 /*
2230 * events 0xd ~ 0x10 are functional identical, but are
2231 * controlled by different fields in the ZDP_CTL_FVC
2232 * register. If we failed to take one field, try the
2233 * rest 3 choices.
7c94ee2e 2234 */
254298c7
YZ
2235 BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
2236 idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2237 idx[0] = (idx[0] + 1) % 4;
2238 idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2239 if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
2240 config1 = nhmex_mbox_alter_er(event, idx[0], false);
2241 goto again;
7c94ee2e 2242 }
254298c7 2243 }
7c94ee2e 2244
254298c7
YZ
2245 if (alloc & 0x1)
2246 nhmex_mbox_put_shared_reg(box, idx[0]);
2247 if (alloc & 0x2)
2248 nhmex_mbox_put_shared_reg(box, idx[1]);
2249 return &constraint_empty;
2250}
fcde10e9 2251
254298c7 2252static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 2253{
254298c7
YZ
2254 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2255 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
fcde10e9 2256
254298c7
YZ
2257 if (uncore_box_is_fake(box))
2258 return;
2259
2260 if (reg1->alloc & 0x1)
2261 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
2262 if (reg1->alloc & 0x2)
2263 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
2264 reg1->alloc = 0;
2265
2266 if (reg2->alloc) {
2267 nhmex_mbox_put_shared_reg(box, reg2->idx);
2268 reg2->alloc = 0;
2269 }
fcde10e9
YZ
2270}
2271
254298c7 2272static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
fcde10e9 2273{
254298c7
YZ
2274 if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
2275 return er->idx;
2276 return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
fcde10e9
YZ
2277}
2278
254298c7 2279static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 2280{
254298c7
YZ
2281 struct intel_uncore_type *type = box->pmu->type;
2282 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2283 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2284 struct extra_reg *er;
2285 unsigned msr;
2286 int reg_idx = 0;
254298c7
YZ
2287 /*
2288 * The mbox events may require 2 extra MSRs at the most. But only
2289 * the lower 32 bits in these MSRs are significant, so we can use
2290 * config1 to pass two MSRs' config.
2291 */
2292 for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
2293 if (er->event != (event->hw.config & er->config_mask))
2294 continue;
2295 if (event->attr.config1 & ~er->valid_mask)
2296 return -EINVAL;
254298c7
YZ
2297
2298 msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
2299 if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
2300 return -EINVAL;
2301
2302 /* always use the 32~63 bits to pass the PLD config */
2303 if (er->idx == EXTRA_REG_NHMEX_M_PLD)
2304 reg_idx = 1;
ebb6cc03
YZ
2305 else if (WARN_ON_ONCE(reg_idx > 0))
2306 return -EINVAL;
254298c7
YZ
2307
2308 reg1->idx &= ~(0xff << (reg_idx * 8));
2309 reg1->reg &= ~(0xffff << (reg_idx * 16));
2310 reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
2311 reg1->reg |= msr << (reg_idx * 16);
2312 reg1->config = event->attr.config1;
2313 reg_idx++;
2314 }
ebb6cc03
YZ
2315 /*
2316 * The mbox only provides ability to perform address matching
2317 * for the PLD events.
2318 */
2319 if (reg_idx == 2) {
2320 reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
2321 if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
2322 reg2->config = event->attr.config2;
2323 else
2324 reg2->config = ~0ULL;
2325 if (box->pmu->pmu_idx == 0)
2326 reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
2327 else
2328 reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
2329 }
254298c7 2330 return 0;
fcde10e9
YZ
2331}
2332
254298c7 2333static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
fcde10e9 2334{
254298c7
YZ
2335 struct intel_uncore_extra_reg *er;
2336 unsigned long flags;
2337 u64 config;
2338
2339 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
2340 return box->shared_regs[idx].config;
2341
2342 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2343 raw_spin_lock_irqsave(&er->lock, flags);
2344 config = er->config;
2345 raw_spin_unlock_irqrestore(&er->lock, flags);
2346 return config;
2347}
2348
2349static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2350{
2351 struct hw_perf_event *hwc = &event->hw;
2352 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2353 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2354 int idx;
2355
2356 idx = __BITS_VALUE(reg1->idx, 0, 8);
2357 if (idx != 0xff)
2358 wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
2359 nhmex_mbox_shared_reg_config(box, idx));
2360 idx = __BITS_VALUE(reg1->idx, 1, 8);
2361 if (idx != 0xff)
2362 wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
2363 nhmex_mbox_shared_reg_config(box, idx));
2364
ebb6cc03
YZ
2365 if (reg2->idx != EXTRA_REG_NONE) {
2366 wrmsrl(reg2->reg, 0);
2367 if (reg2->config != ~0ULL) {
2368 wrmsrl(reg2->reg + 1,
2369 reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
2370 wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
2371 (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
2372 wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
2373 }
fcde10e9 2374 }
254298c7
YZ
2375
2376 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
fcde10e9
YZ
2377}
2378
ebb6cc03
YZ
2379DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3");
2380DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5");
2381DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6");
2382DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7");
2383DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13");
2384DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21");
2385DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63");
2386DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33");
2387DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61");
2388DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31");
2389DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31");
2390DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31");
2391DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
2392DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31");
2393DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31");
2394DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63");
254298c7
YZ
2395
2396static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
2397 &format_attr_count_mode.attr,
2398 &format_attr_storage_mode.attr,
2399 &format_attr_wrap_mode.attr,
2400 &format_attr_flag_mode.attr,
2401 &format_attr_inc_sel.attr,
2402 &format_attr_set_flag_sel.attr,
ebb6cc03 2403 &format_attr_filter_cfg_en.attr,
254298c7
YZ
2404 &format_attr_filter_match.attr,
2405 &format_attr_filter_mask.attr,
2406 &format_attr_dsp.attr,
2407 &format_attr_thr.attr,
2408 &format_attr_fvc.attr,
2409 &format_attr_pgt.attr,
2410 &format_attr_map.attr,
2411 &format_attr_iss.attr,
2412 &format_attr_pld.attr,
fcde10e9
YZ
2413 NULL,
2414};
2415
254298c7
YZ
2416static struct attribute_group nhmex_uncore_mbox_format_group = {
2417 .name = "format",
2418 .attrs = nhmex_uncore_mbox_formats_attr,
fcde10e9
YZ
2419};
2420
254298c7
YZ
2421static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
2422 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
2423 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
2424 { /* end: all zeroes */ },
fcde10e9
YZ
2425};
2426
cb37af77
YZ
2427static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
2428 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
2429 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
2430 { /* end: all zeroes */ },
2431};
2432
254298c7
YZ
2433static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
2434 NHMEX_UNCORE_OPS_COMMON_INIT(),
2435 .enable_event = nhmex_mbox_msr_enable_event,
2436 .hw_config = nhmex_mbox_hw_config,
2437 .get_constraint = nhmex_mbox_get_constraint,
2438 .put_constraint = nhmex_mbox_put_constraint,
fcde10e9
YZ
2439};
2440
254298c7
YZ
2441static struct intel_uncore_type nhmex_uncore_mbox = {
2442 .name = "mbox",
2443 .num_counters = 6,
2444 .num_boxes = 2,
2445 .perf_ctr_bits = 48,
2446 .event_ctl = NHMEX_M0_MSR_PMU_CTL0,
2447 .perf_ctr = NHMEX_M0_MSR_PMU_CNT0,
2448 .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK,
2449 .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL,
2450 .msr_offset = NHMEX_M_MSR_OFFSET,
2451 .pair_ctr_ctl = 1,
2452 .num_shared_regs = 8,
2453 .event_descs = nhmex_uncore_mbox_events,
2454 .ops = &nhmex_uncore_mbox_ops,
2455 .format_group = &nhmex_uncore_mbox_format_group,
fcde10e9
YZ
2456};
2457
46bdd905 2458static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
254298c7
YZ
2459{
2460 struct hw_perf_event *hwc = &event->hw;
2461 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
fcde10e9 2462
ebb6cc03 2463 /* adjust the main event selector and extra register index */
254298c7
YZ
2464 if (reg1->idx % 2) {
2465 reg1->idx--;
2466 hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2467 } else {
2468 reg1->idx++;
2469 hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2470 }
2471
ebb6cc03 2472 /* adjust extra register config */
254298c7 2473 switch (reg1->idx % 6) {
254298c7 2474 case 2:
ebb6cc03 2475 /* shift the 8~15 bits to the 0~7 bits */
254298c7
YZ
2476 reg1->config >>= 8;
2477 break;
2478 case 3:
ebb6cc03 2479 /* shift the 0~7 bits to the 8~15 bits */
254298c7
YZ
2480 reg1->config <<= 8;
2481 break;
254298c7
YZ
2482 };
2483}
2484
2485/*
2486 * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
2487 * An event set consists of 6 events, the 3rd and 4th events in
2488 * an event set use the same extra register. So an event set uses
2489 * 5 extra registers.
2490 */
2491static struct event_constraint *
2492nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 2493{
254298c7
YZ
2494 struct hw_perf_event *hwc = &event->hw;
2495 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2496 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2497 struct intel_uncore_extra_reg *er;
2498 unsigned long flags;
2499 int idx, er_idx;
2500 u64 config1;
2501 bool ok = false;
2502
2503 if (!uncore_box_is_fake(box) && reg1->alloc)
2504 return NULL;
2505
2506 idx = reg1->idx % 6;
2507 config1 = reg1->config;
2508again:
2509 er_idx = idx;
2510 /* the 3rd and 4th events use the same extra register */
2511 if (er_idx > 2)
2512 er_idx--;
2513 er_idx += (reg1->idx / 6) * 5;
2514
2515 er = &box->shared_regs[er_idx];
2516 raw_spin_lock_irqsave(&er->lock, flags);
2517 if (idx < 2) {
2518 if (!atomic_read(&er->ref) || er->config == reg1->config) {
2519 atomic_inc(&er->ref);
2520 er->config = reg1->config;
2521 ok = true;
2522 }
2523 } else if (idx == 2 || idx == 3) {
2524 /*
2525 * these two events use different fields in a extra register,
2526 * the 0~7 bits and the 8~15 bits respectively.
2527 */
2528 u64 mask = 0xff << ((idx - 2) * 8);
2529 if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
2530 !((er->config ^ config1) & mask)) {
2531 atomic_add(1 << ((idx - 2) * 8), &er->ref);
2532 er->config &= ~mask;
2533 er->config |= config1 & mask;
2534 ok = true;
2535 }
2536 } else {
2537 if (!atomic_read(&er->ref) ||
2538 (er->config == (hwc->config >> 32) &&
2539 er->config1 == reg1->config &&
2540 er->config2 == reg2->config)) {
2541 atomic_inc(&er->ref);
2542 er->config = (hwc->config >> 32);
2543 er->config1 = reg1->config;
2544 er->config2 = reg2->config;
2545 ok = true;
2546 }
2547 }
2548 raw_spin_unlock_irqrestore(&er->lock, flags);
2549
2550 if (!ok) {
2551 /*
2552 * The Rbox events are always in pairs. The paired
2553 * events are functional identical, but use different
2554 * extra registers. If we failed to take an extra
2555 * register, try the alternative.
2556 */
2557 if (idx % 2)
2558 idx--;
2559 else
2560 idx++;
2561 if (idx != reg1->idx % 6) {
2562 if (idx == 2)
2563 config1 >>= 8;
2564 else if (idx == 3)
2565 config1 <<= 8;
2566 goto again;
2567 }
2568 } else {
2569 if (!uncore_box_is_fake(box)) {
2570 if (idx != reg1->idx % 6)
2571 nhmex_rbox_alter_er(box, event);
2572 reg1->alloc = 1;
2573 }
2574 return NULL;
2575 }
2576 return &constraint_empty;
fcde10e9
YZ
2577}
2578
254298c7 2579static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 2580{
254298c7
YZ
2581 struct intel_uncore_extra_reg *er;
2582 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2583 int idx, er_idx;
2584
2585 if (uncore_box_is_fake(box) || !reg1->alloc)
2586 return;
2587
2588 idx = reg1->idx % 6;
2589 er_idx = idx;
2590 if (er_idx > 2)
2591 er_idx--;
2592 er_idx += (reg1->idx / 6) * 5;
2593
2594 er = &box->shared_regs[er_idx];
2595 if (idx == 2 || idx == 3)
2596 atomic_sub(1 << ((idx - 2) * 8), &er->ref);
2597 else
2598 atomic_dec(&er->ref);
2599
2600 reg1->alloc = 0;
fcde10e9
YZ
2601}
2602
254298c7 2603static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9
YZ
2604{
2605 struct hw_perf_event *hwc = &event->hw;
254298c7
YZ
2606 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2607 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
ebb6cc03 2608 int idx;
fcde10e9 2609
254298c7
YZ
2610 idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
2611 NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2612 if (idx >= 0x18)
2613 return -EINVAL;
2614
2615 reg1->idx = idx;
2616 reg1->config = event->attr.config1;
2617
ebb6cc03 2618 switch (idx % 6) {
254298c7
YZ
2619 case 4:
2620 case 5:
254298c7 2621 hwc->config |= event->attr.config & (~0ULL << 32);
ebb6cc03 2622 reg2->config = event->attr.config2;
254298c7
YZ
2623 break;
2624 };
2625 return 0;
fcde10e9
YZ
2626}
2627
254298c7
YZ
2628static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2629{
2630 struct hw_perf_event *hwc = &event->hw;
2631 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2632 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
ebb6cc03 2633 int idx, port;
254298c7 2634
ebb6cc03
YZ
2635 idx = reg1->idx;
2636 port = idx / 6 + box->pmu->pmu_idx * 4;
254298c7 2637
ebb6cc03 2638 switch (idx % 6) {
254298c7 2639 case 0:
ebb6cc03
YZ
2640 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
2641 break;
254298c7 2642 case 1:
ebb6cc03 2643 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
254298c7
YZ
2644 break;
2645 case 2:
2646 case 3:
ebb6cc03 2647 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
46bdd905 2648 uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
254298c7
YZ
2649 break;
2650 case 4:
ebb6cc03
YZ
2651 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
2652 hwc->config >> 32);
2653 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
2654 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
2655 break;
254298c7 2656 case 5:
ebb6cc03
YZ
2657 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
2658 hwc->config >> 32);
2659 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
2660 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
254298c7
YZ
2661 break;
2662 };
2663
2664 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
2665 (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
2666}
2667
ebb6cc03
YZ
2668DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
2669DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
254298c7
YZ
2670DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
2671DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
2672DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
2673
2674static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
2675 &format_attr_event5.attr,
2676 &format_attr_xbr_mm_cfg.attr,
2677 &format_attr_xbr_match.attr,
2678 &format_attr_xbr_mask.attr,
2679 &format_attr_qlx_cfg.attr,
2680 &format_attr_iperf_cfg.attr,
fcde10e9
YZ
2681 NULL,
2682};
2683
254298c7 2684static struct attribute_group nhmex_uncore_rbox_format_group = {
fcde10e9 2685 .name = "format",
254298c7 2686 .attrs = nhmex_uncore_rbox_formats_attr,
fcde10e9
YZ
2687};
2688
254298c7
YZ
2689static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
2690 INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"),
2691 INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"),
2692 INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"),
2693 INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"),
2694 INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"),
2695 INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"),
fcde10e9
YZ
2696 { /* end: all zeroes */ },
2697};
2698
254298c7
YZ
2699static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
2700 NHMEX_UNCORE_OPS_COMMON_INIT(),
2701 .enable_event = nhmex_rbox_msr_enable_event,
2702 .hw_config = nhmex_rbox_hw_config,
2703 .get_constraint = nhmex_rbox_get_constraint,
2704 .put_constraint = nhmex_rbox_put_constraint,
fcde10e9
YZ
2705};
2706
254298c7
YZ
2707static struct intel_uncore_type nhmex_uncore_rbox = {
2708 .name = "rbox",
2709 .num_counters = 8,
2710 .num_boxes = 2,
2711 .perf_ctr_bits = 48,
2712 .event_ctl = NHMEX_R_MSR_PMON_CTL0,
2713 .perf_ctr = NHMEX_R_MSR_PMON_CNT0,
2714 .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK,
2715 .box_ctl = NHMEX_R_MSR_GLOBAL_CTL,
2716 .msr_offset = NHMEX_R_MSR_OFFSET,
2717 .pair_ctr_ctl = 1,
2718 .num_shared_regs = 20,
2719 .event_descs = nhmex_uncore_rbox_events,
2720 .ops = &nhmex_uncore_rbox_ops,
2721 .format_group = &nhmex_uncore_rbox_format_group
fcde10e9
YZ
2722};
2723
254298c7
YZ
2724static struct intel_uncore_type *nhmex_msr_uncores[] = {
2725 &nhmex_uncore_ubox,
2726 &nhmex_uncore_cbox,
2727 &nhmex_uncore_bbox,
2728 &nhmex_uncore_sbox,
2729 &nhmex_uncore_mbox,
2730 &nhmex_uncore_rbox,
2731 &nhmex_uncore_wbox,
fcde10e9
YZ
2732 NULL,
2733};
254298c7 2734/* end of Nehalem-EX uncore support */
fcde10e9 2735
254298c7 2736static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
087bfbb0
YZ
2737{
2738 struct hw_perf_event *hwc = &event->hw;
2739
2740 hwc->idx = idx;
2741 hwc->last_tag = ++box->tags[idx];
2742
2743 if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
14371cce
YZ
2744 hwc->event_base = uncore_fixed_ctr(box);
2745 hwc->config_base = uncore_fixed_ctl(box);
087bfbb0
YZ
2746 return;
2747 }
2748
14371cce
YZ
2749 hwc->config_base = uncore_event_ctl(box, hwc->idx);
2750 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
087bfbb0
YZ
2751}
2752
254298c7 2753static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
087bfbb0
YZ
2754{
2755 u64 prev_count, new_count, delta;
2756 int shift;
2757
2758 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
2759 shift = 64 - uncore_fixed_ctr_bits(box);
2760 else
2761 shift = 64 - uncore_perf_ctr_bits(box);
2762
2763 /* the hrtimer might modify the previous event value */
2764again:
2765 prev_count = local64_read(&event->hw.prev_count);
2766 new_count = uncore_read_counter(box, event);
2767 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
2768 goto again;
2769
2770 delta = (new_count << shift) - (prev_count << shift);
2771 delta >>= shift;
2772
2773 local64_add(delta, &event->count);
2774}
2775
2776/*
2777 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
2778 * for SandyBridge. So we use hrtimer to periodically poll the counter
2779 * to avoid overflow.
2780 */
2781static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
2782{
2783 struct intel_uncore_box *box;
2784 unsigned long flags;
2785 int bit;
2786
2787 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
2788 if (!box->n_active || box->cpu != smp_processor_id())
2789 return HRTIMER_NORESTART;
2790 /*
2791 * disable local interrupt to prevent uncore_pmu_event_start/stop
2792 * to interrupt the update process
2793 */
2794 local_irq_save(flags);
2795
2796 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
2797 uncore_perf_event_update(box, box->events[bit]);
2798
2799 local_irq_restore(flags);
2800
79859cce 2801 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
087bfbb0
YZ
2802 return HRTIMER_RESTART;
2803}
2804
2805static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
2806{
2807 __hrtimer_start_range_ns(&box->hrtimer,
79859cce 2808 ns_to_ktime(box->hrtimer_duration), 0,
087bfbb0
YZ
2809 HRTIMER_MODE_REL_PINNED, 0);
2810}
2811
2812static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
2813{
2814 hrtimer_cancel(&box->hrtimer);
2815}
2816
2817static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
2818{
2819 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2820 box->hrtimer.function = uncore_pmu_hrtimer;
2821}
2822
73c4427c 2823static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
087bfbb0
YZ
2824{
2825 struct intel_uncore_box *box;
6a67943a 2826 int i, size;
087bfbb0 2827
254298c7 2828 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
6a67943a 2829
73c4427c 2830 box = kzalloc_node(size, GFP_KERNEL, node);
087bfbb0
YZ
2831 if (!box)
2832 return NULL;
2833
6a67943a
YZ
2834 for (i = 0; i < type->num_shared_regs; i++)
2835 raw_spin_lock_init(&box->shared_regs[i].lock);
2836
087bfbb0
YZ
2837 uncore_pmu_init_hrtimer(box);
2838 atomic_set(&box->refcnt, 1);
2839 box->cpu = -1;
2840 box->phys_id = -1;
2841
79859cce
SE
2842 /* set default hrtimer timeout */
2843 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
2844
087bfbb0
YZ
2845 return box;
2846}
2847
2848static struct intel_uncore_box *
2849uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
2850{
402537fd 2851 struct intel_uncore_box *box;
14371cce
YZ
2852
2853 box = *per_cpu_ptr(pmu->box, cpu);
2854 if (box)
2855 return box;
2856
2857 raw_spin_lock(&uncore_box_lock);
2858 list_for_each_entry(box, &pmu->box_list, list) {
2859 if (box->phys_id == topology_physical_package_id(cpu)) {
2860 atomic_inc(&box->refcnt);
2861 *per_cpu_ptr(pmu->box, cpu) = box;
2862 break;
2863 }
2864 }
2865 raw_spin_unlock(&uncore_box_lock);
2866
087bfbb0
YZ
2867 return *per_cpu_ptr(pmu->box, cpu);
2868}
2869
2870static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
2871{
2872 return container_of(event->pmu, struct intel_uncore_pmu, pmu);
2873}
2874
2875static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
2876{
2877 /*
2878 * perf core schedules event on the basis of cpu, uncore events are
2879 * collected by one of the cpus inside a physical package.
2880 */
254298c7 2881 return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
087bfbb0
YZ
2882}
2883
254298c7
YZ
2884static int
2885uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
087bfbb0
YZ
2886{
2887 struct perf_event *event;
2888 int n, max_count;
2889
2890 max_count = box->pmu->type->num_counters;
2891 if (box->pmu->type->fixed_ctl)
2892 max_count++;
2893
2894 if (box->n_events >= max_count)
2895 return -EINVAL;
2896
2897 n = box->n_events;
2898 box->event_list[n] = leader;
2899 n++;
2900 if (!dogrp)
2901 return n;
2902
2903 list_for_each_entry(event, &leader->sibling_list, group_entry) {
2904 if (event->state <= PERF_EVENT_STATE_OFF)
2905 continue;
2906
2907 if (n >= max_count)
2908 return -EINVAL;
2909
2910 box->event_list[n] = event;
2911 n++;
2912 }
2913 return n;
2914}
2915
2916static struct event_constraint *
254298c7 2917uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
087bfbb0 2918{
6a67943a 2919 struct intel_uncore_type *type = box->pmu->type;
087bfbb0
YZ
2920 struct event_constraint *c;
2921
6a67943a
YZ
2922 if (type->ops->get_constraint) {
2923 c = type->ops->get_constraint(box, event);
2924 if (c)
2925 return c;
2926 }
2927
dbc33f70 2928 if (event->attr.config == UNCORE_FIXED_EVENT)
087bfbb0
YZ
2929 return &constraint_fixed;
2930
2931 if (type->constraints) {
2932 for_each_event_constraint(c, type->constraints) {
2933 if ((event->hw.config & c->cmask) == c->code)
2934 return c;
2935 }
2936 }
2937
2938 return &type->unconstrainted;
2939}
2940
254298c7 2941static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
6a67943a
YZ
2942{
2943 if (box->pmu->type->ops->put_constraint)
2944 box->pmu->type->ops->put_constraint(box, event);
2945}
2946
254298c7 2947static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
087bfbb0
YZ
2948{
2949 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
43b45780 2950 struct event_constraint *c;
6a67943a 2951 int i, wmin, wmax, ret = 0;
087bfbb0
YZ
2952 struct hw_perf_event *hwc;
2953
2954 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
2955
2956 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
43b45780 2957 hwc = &box->event_list[i]->hw;
6a67943a 2958 c = uncore_get_event_constraint(box, box->event_list[i]);
43b45780 2959 hwc->constraint = c;
087bfbb0
YZ
2960 wmin = min(wmin, c->weight);
2961 wmax = max(wmax, c->weight);
2962 }
2963
2964 /* fastpath, try to reuse previous register */
2965 for (i = 0; i < n; i++) {
2966 hwc = &box->event_list[i]->hw;
43b45780 2967 c = hwc->constraint;
087bfbb0
YZ
2968
2969 /* never assigned */
2970 if (hwc->idx == -1)
2971 break;
2972
2973 /* constraint still honored */
2974 if (!test_bit(hwc->idx, c->idxmsk))
2975 break;
2976
2977 /* not already used */
2978 if (test_bit(hwc->idx, used_mask))
2979 break;
2980
2981 __set_bit(hwc->idx, used_mask);
6a67943a
YZ
2982 if (assign)
2983 assign[i] = hwc->idx;
087bfbb0 2984 }
087bfbb0 2985 /* slow path */
6a67943a 2986 if (i != n)
43b45780
AH
2987 ret = perf_assign_events(box->event_list, n,
2988 wmin, wmax, assign);
6a67943a
YZ
2989
2990 if (!assign || ret) {
2991 for (i = 0; i < n; i++)
2992 uncore_put_event_constraint(box, box->event_list[i]);
2993 }
087bfbb0
YZ
2994 return ret ? -EINVAL : 0;
2995}
2996
2997static void uncore_pmu_event_start(struct perf_event *event, int flags)
2998{
2999 struct intel_uncore_box *box = uncore_event_to_box(event);
3000 int idx = event->hw.idx;
3001
3002 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
3003 return;
3004
3005 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
3006 return;
3007
3008 event->hw.state = 0;
3009 box->events[idx] = event;
3010 box->n_active++;
3011 __set_bit(idx, box->active_mask);
3012
3013 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
3014 uncore_enable_event(box, event);
3015
3016 if (box->n_active == 1) {
3017 uncore_enable_box(box);
3018 uncore_pmu_start_hrtimer(box);
3019 }
3020}
3021
3022static void uncore_pmu_event_stop(struct perf_event *event, int flags)
3023{
3024 struct intel_uncore_box *box = uncore_event_to_box(event);
3025 struct hw_perf_event *hwc = &event->hw;
3026
3027 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
3028 uncore_disable_event(box, event);
3029 box->n_active--;
3030 box->events[hwc->idx] = NULL;
3031 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
3032 hwc->state |= PERF_HES_STOPPED;
3033
3034 if (box->n_active == 0) {
3035 uncore_disable_box(box);
3036 uncore_pmu_cancel_hrtimer(box);
3037 }
3038 }
3039
3040 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
3041 /*
3042 * Drain the remaining delta count out of a event
3043 * that we are disabling:
3044 */
3045 uncore_perf_event_update(box, event);
3046 hwc->state |= PERF_HES_UPTODATE;
3047 }
3048}
3049
3050static int uncore_pmu_event_add(struct perf_event *event, int flags)
3051{
3052 struct intel_uncore_box *box = uncore_event_to_box(event);
3053 struct hw_perf_event *hwc = &event->hw;
3054 int assign[UNCORE_PMC_IDX_MAX];
3055 int i, n, ret;
3056
3057 if (!box)
3058 return -ENODEV;
3059
3060 ret = n = uncore_collect_events(box, event, false);
3061 if (ret < 0)
3062 return ret;
3063
3064 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
3065 if (!(flags & PERF_EF_START))
3066 hwc->state |= PERF_HES_ARCH;
3067
3068 ret = uncore_assign_events(box, assign, n);
3069 if (ret)
3070 return ret;
3071
3072 /* save events moving to new counters */
3073 for (i = 0; i < box->n_events; i++) {
3074 event = box->event_list[i];
3075 hwc = &event->hw;
3076
3077 if (hwc->idx == assign[i] &&
3078 hwc->last_tag == box->tags[assign[i]])
3079 continue;
3080 /*
3081 * Ensure we don't accidentally enable a stopped
3082 * counter simply because we rescheduled.
3083 */
3084 if (hwc->state & PERF_HES_STOPPED)
3085 hwc->state |= PERF_HES_ARCH;
3086
3087 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
3088 }
3089
3090 /* reprogram moved events into new counters */
3091 for (i = 0; i < n; i++) {
3092 event = box->event_list[i];
3093 hwc = &event->hw;
3094
3095 if (hwc->idx != assign[i] ||
3096 hwc->last_tag != box->tags[assign[i]])
3097 uncore_assign_hw_event(box, event, assign[i]);
3098 else if (i < box->n_events)
3099 continue;
3100
3101 if (hwc->state & PERF_HES_ARCH)
3102 continue;
3103
3104 uncore_pmu_event_start(event, 0);
3105 }
3106 box->n_events = n;
3107
3108 return 0;
3109}
3110
3111static void uncore_pmu_event_del(struct perf_event *event, int flags)
3112{
3113 struct intel_uncore_box *box = uncore_event_to_box(event);
3114 int i;
3115
3116 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
3117
3118 for (i = 0; i < box->n_events; i++) {
3119 if (event == box->event_list[i]) {
6a67943a
YZ
3120 uncore_put_event_constraint(box, event);
3121
087bfbb0
YZ
3122 while (++i < box->n_events)
3123 box->event_list[i - 1] = box->event_list[i];
3124
3125 --box->n_events;
3126 break;
3127 }
3128 }
3129
3130 event->hw.idx = -1;
3131 event->hw.last_tag = ~0ULL;
3132}
3133
3134static void uncore_pmu_event_read(struct perf_event *event)
3135{
3136 struct intel_uncore_box *box = uncore_event_to_box(event);
3137 uncore_perf_event_update(box, event);
3138}
3139
3140/*
3141 * validation ensures the group can be loaded onto the
3142 * PMU if it was the only group available.
3143 */
3144static int uncore_validate_group(struct intel_uncore_pmu *pmu,
3145 struct perf_event *event)
3146{
3147 struct perf_event *leader = event->group_leader;
3148 struct intel_uncore_box *fake_box;
087bfbb0
YZ
3149 int ret = -EINVAL, n;
3150
73c4427c 3151 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
087bfbb0
YZ
3152 if (!fake_box)
3153 return -ENOMEM;
3154
3155 fake_box->pmu = pmu;
3156 /*
3157 * the event is not yet connected with its
3158 * siblings therefore we must first collect
3159 * existing siblings, then add the new event
3160 * before we can simulate the scheduling
3161 */
3162 n = uncore_collect_events(fake_box, leader, true);
3163 if (n < 0)
3164 goto out;
3165
3166 fake_box->n_events = n;
3167 n = uncore_collect_events(fake_box, event, false);
3168 if (n < 0)
3169 goto out;
3170
3171 fake_box->n_events = n;
3172
6a67943a 3173 ret = uncore_assign_events(fake_box, NULL, n);
087bfbb0
YZ
3174out:
3175 kfree(fake_box);
3176 return ret;
3177}
3178
46bdd905 3179static int uncore_pmu_event_init(struct perf_event *event)
087bfbb0
YZ
3180{
3181 struct intel_uncore_pmu *pmu;
3182 struct intel_uncore_box *box;
3183 struct hw_perf_event *hwc = &event->hw;
3184 int ret;
3185
3186 if (event->attr.type != event->pmu->type)
3187 return -ENOENT;
3188
3189 pmu = uncore_event_to_pmu(event);
3190 /* no device found for this pmu */
3191 if (pmu->func_id < 0)
3192 return -ENOENT;
3193
3194 /*
3195 * Uncore PMU does measure at all privilege level all the time.
3196 * So it doesn't make sense to specify any exclude bits.
3197 */
3198 if (event->attr.exclude_user || event->attr.exclude_kernel ||
3199 event->attr.exclude_hv || event->attr.exclude_idle)
3200 return -EINVAL;
3201
3202 /* Sampling not supported yet */
3203 if (hwc->sample_period)
3204 return -EINVAL;
3205
3206 /*
3207 * Place all uncore events for a particular physical package
3208 * onto a single cpu
3209 */
3210 if (event->cpu < 0)
3211 return -EINVAL;
3212 box = uncore_pmu_to_box(pmu, event->cpu);
3213 if (!box || box->cpu < 0)
3214 return -EINVAL;
3215 event->cpu = box->cpu;
3216
6a67943a
YZ
3217 event->hw.idx = -1;
3218 event->hw.last_tag = ~0ULL;
3219 event->hw.extra_reg.idx = EXTRA_REG_NONE;
ebb6cc03 3220 event->hw.branch_reg.idx = EXTRA_REG_NONE;
6a67943a 3221
087bfbb0
YZ
3222 if (event->attr.config == UNCORE_FIXED_EVENT) {
3223 /* no fixed counter */
3224 if (!pmu->type->fixed_ctl)
3225 return -EINVAL;
3226 /*
3227 * if there is only one fixed counter, only the first pmu
3228 * can access the fixed counter
3229 */
3230 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
3231 return -EINVAL;
dbc33f70
SE
3232
3233 /* fixed counters have event field hardcoded to zero */
3234 hwc->config = 0ULL;
087bfbb0
YZ
3235 } else {
3236 hwc->config = event->attr.config & pmu->type->event_mask;
6a67943a
YZ
3237 if (pmu->type->ops->hw_config) {
3238 ret = pmu->type->ops->hw_config(box, event);
3239 if (ret)
3240 return ret;
3241 }
087bfbb0
YZ
3242 }
3243
087bfbb0
YZ
3244 if (event->group_leader != event)
3245 ret = uncore_validate_group(pmu, event);
3246 else
3247 ret = 0;
3248
3249 return ret;
3250}
3251
314d9f63
YZ
3252static ssize_t uncore_get_attr_cpumask(struct device *dev,
3253 struct device_attribute *attr, char *buf)
3254{
3255 int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &uncore_cpu_mask);
3256
3257 buf[n++] = '\n';
3258 buf[n] = '\0';
3259 return n;
3260}
3261
3262static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
3263
3264static struct attribute *uncore_pmu_attrs[] = {
3265 &dev_attr_cpumask.attr,
3266 NULL,
3267};
3268
3269static struct attribute_group uncore_pmu_attr_group = {
3270 .attrs = uncore_pmu_attrs,
3271};
3272
087bfbb0
YZ
3273static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
3274{
3275 int ret;
3276
d64b25b6
SE
3277 if (!pmu->type->pmu) {
3278 pmu->pmu = (struct pmu) {
3279 .attr_groups = pmu->type->attr_groups,
3280 .task_ctx_nr = perf_invalid_context,
3281 .event_init = uncore_pmu_event_init,
3282 .add = uncore_pmu_event_add,
3283 .del = uncore_pmu_event_del,
3284 .start = uncore_pmu_event_start,
3285 .stop = uncore_pmu_event_stop,
3286 .read = uncore_pmu_event_read,
3287 };
3288 } else {
3289 pmu->pmu = *pmu->type->pmu;
3290 pmu->pmu.attr_groups = pmu->type->attr_groups;
3291 }
087bfbb0
YZ
3292
3293 if (pmu->type->num_boxes == 1) {
3294 if (strlen(pmu->type->name) > 0)
3295 sprintf(pmu->name, "uncore_%s", pmu->type->name);
3296 else
3297 sprintf(pmu->name, "uncore");
3298 } else {
3299 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
3300 pmu->pmu_idx);
3301 }
3302
3303 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
3304 return ret;
3305}
3306
3307static void __init uncore_type_exit(struct intel_uncore_type *type)
3308{
3309 int i;
3310
3311 for (i = 0; i < type->num_boxes; i++)
3312 free_percpu(type->pmus[i].box);
3313 kfree(type->pmus);
3314 type->pmus = NULL;
314d9f63
YZ
3315 kfree(type->events_group);
3316 type->events_group = NULL;
087bfbb0
YZ
3317}
3318
cffa59ba 3319static void __init uncore_types_exit(struct intel_uncore_type **types)
14371cce
YZ
3320{
3321 int i;
3322 for (i = 0; types[i]; i++)
3323 uncore_type_exit(types[i]);
3324}
3325
087bfbb0
YZ
3326static int __init uncore_type_init(struct intel_uncore_type *type)
3327{
3328 struct intel_uncore_pmu *pmus;
1b0dac2a 3329 struct attribute_group *attr_group;
087bfbb0
YZ
3330 struct attribute **attrs;
3331 int i, j;
3332
3333 pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
3334 if (!pmus)
3335 return -ENOMEM;
3336
3337 type->unconstrainted = (struct event_constraint)
3338 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
9fac2cf3 3339 0, type->num_counters, 0, 0);
087bfbb0
YZ
3340
3341 for (i = 0; i < type->num_boxes; i++) {
3342 pmus[i].func_id = -1;
3343 pmus[i].pmu_idx = i;
3344 pmus[i].type = type;
14371cce 3345 INIT_LIST_HEAD(&pmus[i].box_list);
087bfbb0
YZ
3346 pmus[i].box = alloc_percpu(struct intel_uncore_box *);
3347 if (!pmus[i].box)
3348 goto fail;
3349 }
3350
3351 if (type->event_descs) {
3352 i = 0;
3353 while (type->event_descs[i].attr.attr.name)
3354 i++;
3355
1b0dac2a
JSM
3356 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
3357 sizeof(*attr_group), GFP_KERNEL);
3358 if (!attr_group)
087bfbb0
YZ
3359 goto fail;
3360
1b0dac2a
JSM
3361 attrs = (struct attribute **)(attr_group + 1);
3362 attr_group->name = "events";
3363 attr_group->attrs = attrs;
087bfbb0
YZ
3364
3365 for (j = 0; j < i; j++)
3366 attrs[j] = &type->event_descs[j].attr.attr;
3367
1b0dac2a 3368 type->events_group = attr_group;
087bfbb0
YZ
3369 }
3370
314d9f63 3371 type->pmu_group = &uncore_pmu_attr_group;
087bfbb0
YZ
3372 type->pmus = pmus;
3373 return 0;
3374fail:
3375 uncore_type_exit(type);
3376 return -ENOMEM;
3377}
3378
3379static int __init uncore_types_init(struct intel_uncore_type **types)
3380{
3381 int i, ret;
3382
3383 for (i = 0; types[i]; i++) {
3384 ret = uncore_type_init(types[i]);
3385 if (ret)
3386 goto fail;
3387 }
3388 return 0;
3389fail:
3390 while (--i >= 0)
3391 uncore_type_exit(types[i]);
3392 return ret;
3393}
3394
14371cce
YZ
3395static struct pci_driver *uncore_pci_driver;
3396static bool pcidrv_registered;
3397
3398/*
3399 * add a pci uncore device
3400 */
899396cf 3401static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
14371cce
YZ
3402{
3403 struct intel_uncore_pmu *pmu;
3404 struct intel_uncore_box *box;
899396cf
YZ
3405 struct intel_uncore_type *type;
3406 int phys_id;
14371cce
YZ
3407
3408 phys_id = pcibus_to_physid[pdev->bus->number];
3409 if (phys_id < 0)
3410 return -ENODEV;
3411
899396cf
YZ
3412 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
3413 extra_pci_dev[phys_id][UNCORE_PCI_DEV_IDX(id->driver_data)] = pdev;
3414 pci_set_drvdata(pdev, NULL);
3415 return 0;
3416 }
3417
3418 type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
73c4427c 3419 box = uncore_alloc_box(type, NUMA_NO_NODE);
14371cce
YZ
3420 if (!box)
3421 return -ENOMEM;
3422
3423 /*
3424 * for performance monitoring unit with multiple boxes,
3425 * each box has a different function id.
3426 */
899396cf
YZ
3427 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
3428 if (pmu->func_id < 0)
3429 pmu->func_id = pdev->devfn;
3430 else
3431 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
14371cce
YZ
3432
3433 box->phys_id = phys_id;
3434 box->pci_dev = pdev;
3435 box->pmu = pmu;
3436 uncore_box_init(box);
3437 pci_set_drvdata(pdev, box);
3438
3439 raw_spin_lock(&uncore_box_lock);
3440 list_add_tail(&box->list, &pmu->box_list);
3441 raw_spin_unlock(&uncore_box_lock);
3442
3443 return 0;
3444}
3445
357398e9 3446static void uncore_pci_remove(struct pci_dev *pdev)
14371cce
YZ
3447{
3448 struct intel_uncore_box *box = pci_get_drvdata(pdev);
899396cf
YZ
3449 struct intel_uncore_pmu *pmu;
3450 int i, cpu, phys_id = pcibus_to_physid[pdev->bus->number];
3451
3452 box = pci_get_drvdata(pdev);
3453 if (!box) {
3454 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
3455 if (extra_pci_dev[phys_id][i] == pdev) {
3456 extra_pci_dev[phys_id][i] = NULL;
3457 break;
3458 }
3459 }
3460 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
3461 return;
3462 }
14371cce 3463
899396cf 3464 pmu = box->pmu;
14371cce
YZ
3465 if (WARN_ON_ONCE(phys_id != box->phys_id))
3466 return;
3467
e850f9c3
YZ
3468 pci_set_drvdata(pdev, NULL);
3469
14371cce
YZ
3470 raw_spin_lock(&uncore_box_lock);
3471 list_del(&box->list);
3472 raw_spin_unlock(&uncore_box_lock);
3473
3474 for_each_possible_cpu(cpu) {
3475 if (*per_cpu_ptr(pmu->box, cpu) == box) {
3476 *per_cpu_ptr(pmu->box, cpu) = NULL;
3477 atomic_dec(&box->refcnt);
3478 }
3479 }
3480
3481 WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
3482 kfree(box);
3483}
3484
14371cce
YZ
3485static int __init uncore_pci_init(void)
3486{
3487 int ret;
3488
3489 switch (boot_cpu_data.x86_model) {
7c94ee2e 3490 case 45: /* Sandy Bridge-EP */
e850f9c3 3491 ret = snbep_pci2phy_map_init(0x3ce0);
032c3851
YZ
3492 if (ret)
3493 return ret;
7c94ee2e
YZ
3494 pci_uncores = snbep_pci_uncores;
3495 uncore_pci_driver = &snbep_uncore_pci_driver;
7c94ee2e 3496 break;
e850f9c3
YZ
3497 case 62: /* IvyTown */
3498 ret = snbep_pci2phy_map_init(0x0e1e);
3499 if (ret)
3500 return ret;
3501 pci_uncores = ivt_pci_uncores;
3502 uncore_pci_driver = &ivt_uncore_pci_driver;
3503 break;
14371cce
YZ
3504 default:
3505 return 0;
3506 }
3507
3508 ret = uncore_types_init(pci_uncores);
3509 if (ret)
3510 return ret;
3511
3512 uncore_pci_driver->probe = uncore_pci_probe;
3513 uncore_pci_driver->remove = uncore_pci_remove;
3514
3515 ret = pci_register_driver(uncore_pci_driver);
3516 if (ret == 0)
3517 pcidrv_registered = true;
3518 else
3519 uncore_types_exit(pci_uncores);
3520
3521 return ret;
3522}
3523
3524static void __init uncore_pci_exit(void)
3525{
3526 if (pcidrv_registered) {
3527 pcidrv_registered = false;
3528 pci_unregister_driver(uncore_pci_driver);
3529 uncore_types_exit(pci_uncores);
3530 }
3531}
3532
22cc4ccf
YZ
3533/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
3534static LIST_HEAD(boxes_to_free);
3535
148f9bb8 3536static void uncore_kfree_boxes(void)
22cc4ccf
YZ
3537{
3538 struct intel_uncore_box *box;
3539
3540 while (!list_empty(&boxes_to_free)) {
3541 box = list_entry(boxes_to_free.next,
3542 struct intel_uncore_box, list);
3543 list_del(&box->list);
3544 kfree(box);
3545 }
3546}
3547
148f9bb8 3548static void uncore_cpu_dying(int cpu)
087bfbb0
YZ
3549{
3550 struct intel_uncore_type *type;
3551 struct intel_uncore_pmu *pmu;
3552 struct intel_uncore_box *box;
3553 int i, j;
3554
3555 for (i = 0; msr_uncores[i]; i++) {
3556 type = msr_uncores[i];
3557 for (j = 0; j < type->num_boxes; j++) {
3558 pmu = &type->pmus[j];
3559 box = *per_cpu_ptr(pmu->box, cpu);
3560 *per_cpu_ptr(pmu->box, cpu) = NULL;
3561 if (box && atomic_dec_and_test(&box->refcnt))
22cc4ccf 3562 list_add(&box->list, &boxes_to_free);
087bfbb0
YZ
3563 }
3564 }
3565}
3566
148f9bb8 3567static int uncore_cpu_starting(int cpu)
087bfbb0
YZ
3568{
3569 struct intel_uncore_type *type;
3570 struct intel_uncore_pmu *pmu;
3571 struct intel_uncore_box *box, *exist;
3572 int i, j, k, phys_id;
3573
3574 phys_id = topology_physical_package_id(cpu);
3575
3576 for (i = 0; msr_uncores[i]; i++) {
3577 type = msr_uncores[i];
3578 for (j = 0; j < type->num_boxes; j++) {
3579 pmu = &type->pmus[j];
3580 box = *per_cpu_ptr(pmu->box, cpu);
3581 /* called by uncore_cpu_init? */
3582 if (box && box->phys_id >= 0) {
3583 uncore_box_init(box);
3584 continue;
3585 }
3586
3587 for_each_online_cpu(k) {
3588 exist = *per_cpu_ptr(pmu->box, k);
3589 if (exist && exist->phys_id == phys_id) {
3590 atomic_inc(&exist->refcnt);
3591 *per_cpu_ptr(pmu->box, cpu) = exist;
22cc4ccf
YZ
3592 if (box) {
3593 list_add(&box->list,
3594 &boxes_to_free);
3595 box = NULL;
3596 }
087bfbb0
YZ
3597 break;
3598 }
3599 }
3600
3601 if (box) {
3602 box->phys_id = phys_id;
3603 uncore_box_init(box);
3604 }
3605 }
3606 }
3607 return 0;
3608}
3609
148f9bb8 3610static int uncore_cpu_prepare(int cpu, int phys_id)
087bfbb0
YZ
3611{
3612 struct intel_uncore_type *type;
3613 struct intel_uncore_pmu *pmu;
3614 struct intel_uncore_box *box;
3615 int i, j;
3616
3617 for (i = 0; msr_uncores[i]; i++) {
3618 type = msr_uncores[i];
3619 for (j = 0; j < type->num_boxes; j++) {
3620 pmu = &type->pmus[j];
3621 if (pmu->func_id < 0)
3622 pmu->func_id = j;
3623
73c4427c 3624 box = uncore_alloc_box(type, cpu_to_node(cpu));
087bfbb0
YZ
3625 if (!box)
3626 return -ENOMEM;
3627
3628 box->pmu = pmu;
3629 box->phys_id = phys_id;
3630 *per_cpu_ptr(pmu->box, cpu) = box;
3631 }
3632 }
3633 return 0;
3634}
3635
148f9bb8 3636static void
254298c7 3637uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
087bfbb0
YZ
3638{
3639 struct intel_uncore_type *type;
3640 struct intel_uncore_pmu *pmu;
3641 struct intel_uncore_box *box;
3642 int i, j;
3643
3644 for (i = 0; uncores[i]; i++) {
3645 type = uncores[i];
3646 for (j = 0; j < type->num_boxes; j++) {
3647 pmu = &type->pmus[j];
3648 if (old_cpu < 0)
3649 box = uncore_pmu_to_box(pmu, new_cpu);
3650 else
3651 box = uncore_pmu_to_box(pmu, old_cpu);
3652 if (!box)
3653 continue;
3654
3655 if (old_cpu < 0) {
3656 WARN_ON_ONCE(box->cpu != -1);
3657 box->cpu = new_cpu;
3658 continue;
3659 }
3660
3661 WARN_ON_ONCE(box->cpu != old_cpu);
3662 if (new_cpu >= 0) {
3663 uncore_pmu_cancel_hrtimer(box);
3664 perf_pmu_migrate_context(&pmu->pmu,
3665 old_cpu, new_cpu);
3666 box->cpu = new_cpu;
3667 } else {
3668 box->cpu = -1;
3669 }
3670 }
3671 }
3672}
3673
148f9bb8 3674static void uncore_event_exit_cpu(int cpu)
087bfbb0
YZ
3675{
3676 int i, phys_id, target;
3677
3678 /* if exiting cpu is used for collecting uncore events */
3679 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
3680 return;
3681
3682 /* find a new cpu to collect uncore events */
3683 phys_id = topology_physical_package_id(cpu);
3684 target = -1;
3685 for_each_online_cpu(i) {
3686 if (i == cpu)
3687 continue;
3688 if (phys_id == topology_physical_package_id(i)) {
3689 target = i;
3690 break;
3691 }
3692 }
3693
3694 /* migrate uncore events to the new cpu */
3695 if (target >= 0)
3696 cpumask_set_cpu(target, &uncore_cpu_mask);
3697
3698 uncore_change_context(msr_uncores, cpu, target);
14371cce 3699 uncore_change_context(pci_uncores, cpu, target);
087bfbb0
YZ
3700}
3701
148f9bb8 3702static void uncore_event_init_cpu(int cpu)
087bfbb0
YZ
3703{
3704 int i, phys_id;
3705
3706 phys_id = topology_physical_package_id(cpu);
3707 for_each_cpu(i, &uncore_cpu_mask) {
3708 if (phys_id == topology_physical_package_id(i))
3709 return;
3710 }
3711
3712 cpumask_set_cpu(cpu, &uncore_cpu_mask);
3713
3714 uncore_change_context(msr_uncores, -1, cpu);
14371cce 3715 uncore_change_context(pci_uncores, -1, cpu);
087bfbb0
YZ
3716}
3717
148f9bb8
PG
3718static int uncore_cpu_notifier(struct notifier_block *self,
3719 unsigned long action, void *hcpu)
087bfbb0
YZ
3720{
3721 unsigned int cpu = (long)hcpu;
3722
3723 /* allocate/free data structure for uncore box */
3724 switch (action & ~CPU_TASKS_FROZEN) {
3725 case CPU_UP_PREPARE:
3726 uncore_cpu_prepare(cpu, -1);
3727 break;
3728 case CPU_STARTING:
3729 uncore_cpu_starting(cpu);
3730 break;
3731 case CPU_UP_CANCELED:
3732 case CPU_DYING:
3733 uncore_cpu_dying(cpu);
3734 break;
22cc4ccf
YZ
3735 case CPU_ONLINE:
3736 case CPU_DEAD:
3737 uncore_kfree_boxes();
3738 break;
087bfbb0
YZ
3739 default:
3740 break;
3741 }
3742
3743 /* select the cpu that collects uncore events */
3744 switch (action & ~CPU_TASKS_FROZEN) {
3745 case CPU_DOWN_FAILED:
3746 case CPU_STARTING:
3747 uncore_event_init_cpu(cpu);
3748 break;
3749 case CPU_DOWN_PREPARE:
3750 uncore_event_exit_cpu(cpu);
3751 break;
3752 default:
3753 break;
3754 }
3755
3756 return NOTIFY_OK;
3757}
3758
148f9bb8 3759static struct notifier_block uncore_cpu_nb = {
254298c7 3760 .notifier_call = uncore_cpu_notifier,
087bfbb0
YZ
3761 /*
3762 * to migrate uncore events, our notifier should be executed
3763 * before perf core's notifier.
3764 */
254298c7 3765 .priority = CPU_PRI_PERF + 1,
087bfbb0
YZ
3766};
3767
3768static void __init uncore_cpu_setup(void *dummy)
3769{
3770 uncore_cpu_starting(smp_processor_id());
3771}
3772
3773static int __init uncore_cpu_init(void)
3774{
411cf180 3775 int ret, max_cores;
087bfbb0 3776
42089697 3777 max_cores = boot_cpu_data.x86_max_cores;
087bfbb0 3778 switch (boot_cpu_data.x86_model) {
fcde10e9
YZ
3779 case 26: /* Nehalem */
3780 case 30:
3781 case 37: /* Westmere */
3782 case 44:
3783 msr_uncores = nhm_msr_uncores;
3784 break;
3785 case 42: /* Sandy Bridge */
9a6bc143 3786 case 58: /* Ivy Bridge */
42089697
YZ
3787 if (snb_uncore_cbox.num_boxes > max_cores)
3788 snb_uncore_cbox.num_boxes = max_cores;
fcde10e9
YZ
3789 msr_uncores = snb_msr_uncores;
3790 break;
80e217e9 3791 case 45: /* Sandy Bridge-EP */
42089697
YZ
3792 if (snbep_uncore_cbox.num_boxes > max_cores)
3793 snbep_uncore_cbox.num_boxes = max_cores;
7c94ee2e
YZ
3794 msr_uncores = snbep_msr_uncores;
3795 break;
cb37af77
YZ
3796 case 46: /* Nehalem-EX */
3797 uncore_nhmex = true;
3798 case 47: /* Westmere-EX aka. Xeon E7 */
3799 if (!uncore_nhmex)
3800 nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
3801 if (nhmex_uncore_cbox.num_boxes > max_cores)
3802 nhmex_uncore_cbox.num_boxes = max_cores;
254298c7
YZ
3803 msr_uncores = nhmex_msr_uncores;
3804 break;
e850f9c3
YZ
3805 case 62: /* IvyTown */
3806 if (ivt_uncore_cbox.num_boxes > max_cores)
3807 ivt_uncore_cbox.num_boxes = max_cores;
3808 msr_uncores = ivt_msr_uncores;
3809 break;
3810
087bfbb0
YZ
3811 default:
3812 return 0;
3813 }
3814
3815 ret = uncore_types_init(msr_uncores);
3816 if (ret)
3817 return ret;
3818
087bfbb0
YZ
3819 return 0;
3820}
3821
3822static int __init uncore_pmus_register(void)
3823{
3824 struct intel_uncore_pmu *pmu;
3825 struct intel_uncore_type *type;
3826 int i, j;
3827
3828 for (i = 0; msr_uncores[i]; i++) {
3829 type = msr_uncores[i];
3830 for (j = 0; j < type->num_boxes; j++) {
3831 pmu = &type->pmus[j];
3832 uncore_pmu_register(pmu);
3833 }
3834 }
3835
14371cce
YZ
3836 for (i = 0; pci_uncores[i]; i++) {
3837 type = pci_uncores[i];
3838 for (j = 0; j < type->num_boxes; j++) {
3839 pmu = &type->pmus[j];
3840 uncore_pmu_register(pmu);
3841 }
3842 }
3843
087bfbb0
YZ
3844 return 0;
3845}
3846
411cf180
SE
3847static void uncore_cpumask_init(void)
3848{
3849 int cpu;
3850
3851 /*
3852 * ony invoke once from msr or pci init code
3853 */
3854 if (!cpumask_empty(&uncore_cpu_mask))
3855 return;
3856
3857 get_online_cpus();
3858
3859 for_each_online_cpu(cpu) {
3860 int i, phys_id = topology_physical_package_id(cpu);
3861
3862 for_each_cpu(i, &uncore_cpu_mask) {
3863 if (phys_id == topology_physical_package_id(i)) {
3864 phys_id = -1;
3865 break;
3866 }
3867 }
3868 if (phys_id < 0)
3869 continue;
3870
3871 uncore_cpu_prepare(cpu, phys_id);
3872 uncore_event_init_cpu(cpu);
3873 }
3874 on_each_cpu(uncore_cpu_setup, NULL, 1);
3875
3876 register_cpu_notifier(&uncore_cpu_nb);
3877
3878 put_online_cpus();
3879}
3880
3881
087bfbb0
YZ
3882static int __init intel_uncore_init(void)
3883{
3884 int ret;
3885
3886 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
3887 return -ENODEV;
3888
a05123bd
YZ
3889 if (cpu_has_hypervisor)
3890 return -ENODEV;
3891
14371cce 3892 ret = uncore_pci_init();
087bfbb0
YZ
3893 if (ret)
3894 goto fail;
14371cce
YZ
3895 ret = uncore_cpu_init();
3896 if (ret) {
3897 uncore_pci_exit();
3898 goto fail;
3899 }
411cf180 3900 uncore_cpumask_init();
087bfbb0
YZ
3901
3902 uncore_pmus_register();
3903 return 0;
3904fail:
3905 return ret;
3906}
3907device_initcall(intel_uncore_init);