1 /* Nehalem/SandBridge/Haswell uncore support */
2 #include "perf_event_intel_uncore.h"
4 /* Uncore IMC PCI IDs */
5 #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
6 #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
7 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
8 #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
9 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
10 #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604
12 /* SNB event control */
13 #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
14 #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
15 #define SNB_UNC_CTL_EDGE_DET (1 << 18)
16 #define SNB_UNC_CTL_EN (1 << 22)
17 #define SNB_UNC_CTL_INVERT (1 << 23)
18 #define SNB_UNC_CTL_CMASK_MASK 0x1f000000
19 #define NHM_UNC_CTL_CMASK_MASK 0xff000000
20 #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0)
22 #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
23 SNB_UNC_CTL_UMASK_MASK | \
24 SNB_UNC_CTL_EDGE_DET | \
25 SNB_UNC_CTL_INVERT | \
26 SNB_UNC_CTL_CMASK_MASK)
28 #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
29 SNB_UNC_CTL_UMASK_MASK | \
30 SNB_UNC_CTL_EDGE_DET | \
31 SNB_UNC_CTL_INVERT | \
32 NHM_UNC_CTL_CMASK_MASK)
34 /* SNB global control register */
35 #define SNB_UNC_PERF_GLOBAL_CTL 0x391
36 #define SNB_UNC_FIXED_CTR_CTRL 0x394
37 #define SNB_UNC_FIXED_CTR 0x395
39 /* SNB uncore global control */
40 #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1)
41 #define SNB_UNC_GLOBAL_CTL_EN (1 << 29)
43 /* SNB Cbo register */
44 #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700
45 #define SNB_UNC_CBO_0_PER_CTR0 0x706
46 #define SNB_UNC_CBO_MSR_OFFSET 0x10
48 /* SNB ARB register */
49 #define SNB_UNC_ARB_PER_CTR0 0x3b0
50 #define SNB_UNC_ARB_PERFEVTSEL0 0x3b2
51 #define SNB_UNC_ARB_MSR_OFFSET 0x10
53 /* NHM global control register */
54 #define NHM_UNC_PERF_GLOBAL_CTL 0x391
55 #define NHM_UNC_FIXED_CTR 0x394
56 #define NHM_UNC_FIXED_CTR_CTRL 0x395
58 /* NHM uncore global control */
59 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1)
60 #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32)
62 /* NHM uncore register */
63 #define NHM_UNC_PERFEVTSEL0 0x3c0
64 #define NHM_UNC_UNCORE_PMC0 0x3b0
66 DEFINE_UNCORE_FORMAT_ATTR(event
, event
, "config:0-7");
67 DEFINE_UNCORE_FORMAT_ATTR(umask
, umask
, "config:8-15");
68 DEFINE_UNCORE_FORMAT_ATTR(edge
, edge
, "config:18");
69 DEFINE_UNCORE_FORMAT_ATTR(inv
, inv
, "config:23");
70 DEFINE_UNCORE_FORMAT_ATTR(cmask5
, cmask
, "config:24-28");
71 DEFINE_UNCORE_FORMAT_ATTR(cmask8
, cmask
, "config:24-31");
73 /* Sandy Bridge uncore support */
74 static void snb_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
76 struct hw_perf_event
*hwc
= &event
->hw
;
78 if (hwc
->idx
< UNCORE_PMC_IDX_FIXED
)
79 wrmsrl(hwc
->config_base
, hwc
->config
| SNB_UNC_CTL_EN
);
81 wrmsrl(hwc
->config_base
, SNB_UNC_CTL_EN
);
84 static void snb_uncore_msr_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
86 wrmsrl(event
->hw
.config_base
, 0);
89 static void snb_uncore_msr_init_box(struct intel_uncore_box
*box
)
91 if (box
->pmu
->pmu_idx
== 0) {
92 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL
,
93 SNB_UNC_GLOBAL_CTL_EN
| SNB_UNC_GLOBAL_CTL_CORE_ALL
);
97 static struct uncore_event_desc snb_uncore_events
[] = {
98 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0x00"),
99 { /* end: all zeroes */ },
102 static struct attribute
*snb_uncore_formats_attr
[] = {
103 &format_attr_event
.attr
,
104 &format_attr_umask
.attr
,
105 &format_attr_edge
.attr
,
106 &format_attr_inv
.attr
,
107 &format_attr_cmask5
.attr
,
111 static struct attribute_group snb_uncore_format_group
= {
113 .attrs
= snb_uncore_formats_attr
,
116 static struct intel_uncore_ops snb_uncore_msr_ops
= {
117 .init_box
= snb_uncore_msr_init_box
,
118 .disable_event
= snb_uncore_msr_disable_event
,
119 .enable_event
= snb_uncore_msr_enable_event
,
120 .read_counter
= uncore_msr_read_counter
,
123 static struct event_constraint snb_uncore_arb_constraints
[] = {
124 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
125 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
129 static struct intel_uncore_type snb_uncore_cbox
= {
134 .fixed_ctr_bits
= 48,
135 .perf_ctr
= SNB_UNC_CBO_0_PER_CTR0
,
136 .event_ctl
= SNB_UNC_CBO_0_PERFEVTSEL0
,
137 .fixed_ctr
= SNB_UNC_FIXED_CTR
,
138 .fixed_ctl
= SNB_UNC_FIXED_CTR_CTRL
,
140 .event_mask
= SNB_UNC_RAW_EVENT_MASK
,
141 .msr_offset
= SNB_UNC_CBO_MSR_OFFSET
,
142 .ops
= &snb_uncore_msr_ops
,
143 .format_group
= &snb_uncore_format_group
,
144 .event_descs
= snb_uncore_events
,
147 static struct intel_uncore_type snb_uncore_arb
= {
152 .perf_ctr
= SNB_UNC_ARB_PER_CTR0
,
153 .event_ctl
= SNB_UNC_ARB_PERFEVTSEL0
,
154 .event_mask
= SNB_UNC_RAW_EVENT_MASK
,
155 .msr_offset
= SNB_UNC_ARB_MSR_OFFSET
,
156 .constraints
= snb_uncore_arb_constraints
,
157 .ops
= &snb_uncore_msr_ops
,
158 .format_group
= &snb_uncore_format_group
,
161 static struct intel_uncore_type
*snb_msr_uncores
[] = {
167 void snb_uncore_cpu_init(void)
169 uncore_msr_uncores
= snb_msr_uncores
;
170 if (snb_uncore_cbox
.num_boxes
> boot_cpu_data
.x86_max_cores
)
171 snb_uncore_cbox
.num_boxes
= boot_cpu_data
.x86_max_cores
;
178 static struct uncore_event_desc snb_uncore_imc_events
[] = {
179 INTEL_UNCORE_EVENT_DESC(data_reads
, "event=0x01"),
180 INTEL_UNCORE_EVENT_DESC(data_reads
.scale
, "6.103515625e-5"),
181 INTEL_UNCORE_EVENT_DESC(data_reads
.unit
, "MiB"),
183 INTEL_UNCORE_EVENT_DESC(data_writes
, "event=0x02"),
184 INTEL_UNCORE_EVENT_DESC(data_writes
.scale
, "6.103515625e-5"),
185 INTEL_UNCORE_EVENT_DESC(data_writes
.unit
, "MiB"),
187 { /* end: all zeroes */ },
190 #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff
191 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48
193 /* page size multiple covering all config regs */
194 #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000
196 #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1
197 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050
198 #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2
199 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
200 #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
202 static struct attribute
*snb_uncore_imc_formats_attr
[] = {
203 &format_attr_event
.attr
,
207 static struct attribute_group snb_uncore_imc_format_group
= {
209 .attrs
= snb_uncore_imc_formats_attr
,
212 static void snb_uncore_imc_init_box(struct intel_uncore_box
*box
)
214 struct pci_dev
*pdev
= box
->pci_dev
;
215 int where
= SNB_UNCORE_PCI_IMC_BAR_OFFSET
;
216 resource_size_t addr
;
219 pci_read_config_dword(pdev
, where
, &pci_dword
);
222 #ifdef CONFIG_PHYS_ADDR_T_64BIT
223 pci_read_config_dword(pdev
, where
+ 4, &pci_dword
);
224 addr
|= ((resource_size_t
)pci_dword
<< 32);
227 addr
&= ~(PAGE_SIZE
- 1);
229 box
->io_addr
= ioremap(addr
, SNB_UNCORE_PCI_IMC_MAP_SIZE
);
230 box
->hrtimer_duration
= UNCORE_SNB_IMC_HRTIMER_INTERVAL
;
233 static void snb_uncore_imc_enable_box(struct intel_uncore_box
*box
)
236 static void snb_uncore_imc_disable_box(struct intel_uncore_box
*box
)
239 static void snb_uncore_imc_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
242 static void snb_uncore_imc_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
245 static u64
snb_uncore_imc_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
247 struct hw_perf_event
*hwc
= &event
->hw
;
249 return (u64
)*(unsigned int *)(box
->io_addr
+ hwc
->event_base
);
253 * custom event_init() function because we define our own fixed, free
254 * running counters, so we do not want to conflict with generic uncore
255 * logic. Also simplifies processing
257 static int snb_uncore_imc_event_init(struct perf_event
*event
)
259 struct intel_uncore_pmu
*pmu
;
260 struct intel_uncore_box
*box
;
261 struct hw_perf_event
*hwc
= &event
->hw
;
262 u64 cfg
= event
->attr
.config
& SNB_UNCORE_PCI_IMC_EVENT_MASK
;
265 if (event
->attr
.type
!= event
->pmu
->type
)
268 pmu
= uncore_event_to_pmu(event
);
269 /* no device found for this pmu */
270 if (pmu
->func_id
< 0)
273 /* Sampling not supported yet */
274 if (hwc
->sample_period
)
277 /* unsupported modes and filters */
278 if (event
->attr
.exclude_user
||
279 event
->attr
.exclude_kernel
||
280 event
->attr
.exclude_hv
||
281 event
->attr
.exclude_idle
||
282 event
->attr
.exclude_host
||
283 event
->attr
.exclude_guest
||
284 event
->attr
.sample_period
) /* no sampling */
288 * Place all uncore events for a particular physical package
294 /* check only supported bits are set */
295 if (event
->attr
.config
& ~SNB_UNCORE_PCI_IMC_EVENT_MASK
)
298 box
= uncore_pmu_to_box(pmu
, event
->cpu
);
299 if (!box
|| box
->cpu
< 0)
302 event
->cpu
= box
->cpu
;
305 event
->hw
.last_tag
= ~0ULL;
306 event
->hw
.extra_reg
.idx
= EXTRA_REG_NONE
;
307 event
->hw
.branch_reg
.idx
= EXTRA_REG_NONE
;
309 * check event is known (whitelist, determines counter)
312 case SNB_UNCORE_PCI_IMC_DATA_READS
:
313 base
= SNB_UNCORE_PCI_IMC_DATA_READS_BASE
;
314 idx
= UNCORE_PMC_IDX_FIXED
;
316 case SNB_UNCORE_PCI_IMC_DATA_WRITES
:
317 base
= SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE
;
318 idx
= UNCORE_PMC_IDX_FIXED
+ 1;
324 /* must be done before validate_group */
325 event
->hw
.event_base
= base
;
326 event
->hw
.config
= cfg
;
329 /* no group validation needed, we have free running counters */
334 static int snb_uncore_imc_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
339 static void snb_uncore_imc_event_start(struct perf_event
*event
, int flags
)
341 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
344 if (WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_STOPPED
)))
350 list_add_tail(&event
->active_entry
, &box
->active_list
);
352 count
= snb_uncore_imc_read_counter(box
, event
);
353 local64_set(&event
->hw
.prev_count
, count
);
355 if (box
->n_active
== 1)
356 uncore_pmu_start_hrtimer(box
);
359 static void snb_uncore_imc_event_stop(struct perf_event
*event
, int flags
)
361 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
362 struct hw_perf_event
*hwc
= &event
->hw
;
364 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
367 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
368 hwc
->state
|= PERF_HES_STOPPED
;
370 list_del(&event
->active_entry
);
372 if (box
->n_active
== 0)
373 uncore_pmu_cancel_hrtimer(box
);
376 if ((flags
& PERF_EF_UPDATE
) && !(hwc
->state
& PERF_HES_UPTODATE
)) {
378 * Drain the remaining delta count out of a event
379 * that we are disabling:
381 uncore_perf_event_update(box
, event
);
382 hwc
->state
|= PERF_HES_UPTODATE
;
386 static int snb_uncore_imc_event_add(struct perf_event
*event
, int flags
)
388 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
389 struct hw_perf_event
*hwc
= &event
->hw
;
394 hwc
->state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
395 if (!(flags
& PERF_EF_START
))
396 hwc
->state
|= PERF_HES_ARCH
;
398 snb_uncore_imc_event_start(event
, 0);
405 static void snb_uncore_imc_event_del(struct perf_event
*event
, int flags
)
407 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
410 snb_uncore_imc_event_stop(event
, PERF_EF_UPDATE
);
412 for (i
= 0; i
< box
->n_events
; i
++) {
413 if (event
== box
->event_list
[i
]) {
420 int snb_pci2phy_map_init(int devid
)
422 struct pci_dev
*dev
= NULL
;
423 struct pci2phy_map
*map
;
426 dev
= pci_get_device(PCI_VENDOR_ID_INTEL
, devid
, dev
);
430 bus
= dev
->bus
->number
;
431 segment
= pci_domain_nr(dev
->bus
);
433 raw_spin_lock(&pci2phy_map_lock
);
434 map
= __find_pci2phy_map(segment
);
436 raw_spin_unlock(&pci2phy_map_lock
);
440 map
->pbus_to_physid
[bus
] = 0;
441 raw_spin_unlock(&pci2phy_map_lock
);
448 static struct pmu snb_uncore_imc_pmu
= {
449 .task_ctx_nr
= perf_invalid_context
,
450 .event_init
= snb_uncore_imc_event_init
,
451 .add
= snb_uncore_imc_event_add
,
452 .del
= snb_uncore_imc_event_del
,
453 .start
= snb_uncore_imc_event_start
,
454 .stop
= snb_uncore_imc_event_stop
,
455 .read
= uncore_pmu_event_read
,
458 static struct intel_uncore_ops snb_uncore_imc_ops
= {
459 .init_box
= snb_uncore_imc_init_box
,
460 .enable_box
= snb_uncore_imc_enable_box
,
461 .disable_box
= snb_uncore_imc_disable_box
,
462 .disable_event
= snb_uncore_imc_disable_event
,
463 .enable_event
= snb_uncore_imc_enable_event
,
464 .hw_config
= snb_uncore_imc_hw_config
,
465 .read_counter
= snb_uncore_imc_read_counter
,
468 static struct intel_uncore_type snb_uncore_imc
= {
472 .fixed_ctr_bits
= 32,
473 .fixed_ctr
= SNB_UNCORE_PCI_IMC_CTR_BASE
,
474 .event_descs
= snb_uncore_imc_events
,
475 .format_group
= &snb_uncore_imc_format_group
,
476 .perf_ctr
= SNB_UNCORE_PCI_IMC_DATA_READS_BASE
,
477 .event_mask
= SNB_UNCORE_PCI_IMC_EVENT_MASK
,
478 .ops
= &snb_uncore_imc_ops
,
479 .pmu
= &snb_uncore_imc_pmu
,
482 static struct intel_uncore_type
*snb_pci_uncores
[] = {
483 [SNB_PCI_UNCORE_IMC
] = &snb_uncore_imc
,
487 static const struct pci_device_id snb_uncore_pci_ids
[] = {
489 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_SNB_IMC
),
490 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
492 { /* end: all zeroes */ },
495 static const struct pci_device_id ivb_uncore_pci_ids
[] = {
497 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_IVB_IMC
),
498 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
501 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_IVB_E3_IMC
),
502 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
504 { /* end: all zeroes */ },
507 static const struct pci_device_id hsw_uncore_pci_ids
[] = {
509 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_HSW_IMC
),
510 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
513 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_HSW_U_IMC
),
514 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
516 { /* end: all zeroes */ },
519 static const struct pci_device_id bdw_uncore_pci_ids
[] = {
521 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_BDW_IMC
),
522 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
524 { /* end: all zeroes */ },
527 static struct pci_driver snb_uncore_pci_driver
= {
528 .name
= "snb_uncore",
529 .id_table
= snb_uncore_pci_ids
,
532 static struct pci_driver ivb_uncore_pci_driver
= {
533 .name
= "ivb_uncore",
534 .id_table
= ivb_uncore_pci_ids
,
537 static struct pci_driver hsw_uncore_pci_driver
= {
538 .name
= "hsw_uncore",
539 .id_table
= hsw_uncore_pci_ids
,
542 static struct pci_driver bdw_uncore_pci_driver
= {
543 .name
= "bdw_uncore",
544 .id_table
= bdw_uncore_pci_ids
,
547 struct imc_uncore_pci_dev
{
549 struct pci_driver
*driver
;
551 #define IMC_DEV(a, d) \
552 { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
554 static const struct imc_uncore_pci_dev desktop_imc_pci_ids
[] = {
555 IMC_DEV(SNB_IMC
, &snb_uncore_pci_driver
),
556 IMC_DEV(IVB_IMC
, &ivb_uncore_pci_driver
), /* 3rd Gen Core processor */
557 IMC_DEV(IVB_E3_IMC
, &ivb_uncore_pci_driver
), /* Xeon E3-1200 v2/3rd Gen Core processor */
558 IMC_DEV(HSW_IMC
, &hsw_uncore_pci_driver
), /* 4th Gen Core Processor */
559 IMC_DEV(HSW_U_IMC
, &hsw_uncore_pci_driver
), /* 4th Gen Core ULT Mobile Processor */
560 IMC_DEV(BDW_IMC
, &bdw_uncore_pci_driver
), /* 5th Gen Core U */
565 #define for_each_imc_pci_id(x, t) \
566 for (x = (t); (x)->pci_id; x++)
568 static struct pci_driver
*imc_uncore_find_dev(void)
570 const struct imc_uncore_pci_dev
*p
;
573 for_each_imc_pci_id(p
, desktop_imc_pci_ids
) {
574 ret
= snb_pci2phy_map_init(p
->pci_id
);
581 static int imc_uncore_pci_init(void)
583 struct pci_driver
*imc_drv
= imc_uncore_find_dev();
588 uncore_pci_uncores
= snb_pci_uncores
;
589 uncore_pci_driver
= imc_drv
;
594 int snb_uncore_pci_init(void)
596 return imc_uncore_pci_init();
599 int ivb_uncore_pci_init(void)
601 return imc_uncore_pci_init();
603 int hsw_uncore_pci_init(void)
605 return imc_uncore_pci_init();
608 int bdw_uncore_pci_init(void)
610 return imc_uncore_pci_init();
613 /* end of Sandy Bridge uncore support */
615 /* Nehalem uncore support */
616 static void nhm_uncore_msr_disable_box(struct intel_uncore_box
*box
)
618 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL
, 0);
621 static void nhm_uncore_msr_enable_box(struct intel_uncore_box
*box
)
623 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL
, NHM_UNC_GLOBAL_CTL_EN_PC_ALL
| NHM_UNC_GLOBAL_CTL_EN_FC
);
626 static void nhm_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
628 struct hw_perf_event
*hwc
= &event
->hw
;
630 if (hwc
->idx
< UNCORE_PMC_IDX_FIXED
)
631 wrmsrl(hwc
->config_base
, hwc
->config
| SNB_UNC_CTL_EN
);
633 wrmsrl(hwc
->config_base
, NHM_UNC_FIXED_CTR_CTL_EN
);
636 static struct attribute
*nhm_uncore_formats_attr
[] = {
637 &format_attr_event
.attr
,
638 &format_attr_umask
.attr
,
639 &format_attr_edge
.attr
,
640 &format_attr_inv
.attr
,
641 &format_attr_cmask8
.attr
,
645 static struct attribute_group nhm_uncore_format_group
= {
647 .attrs
= nhm_uncore_formats_attr
,
650 static struct uncore_event_desc nhm_uncore_events
[] = {
651 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0x00"),
652 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any
, "event=0x2f,umask=0x0f"),
653 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any
, "event=0x2c,umask=0x0f"),
654 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads
, "event=0x20,umask=0x01"),
655 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes
, "event=0x20,umask=0x02"),
656 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads
, "event=0x20,umask=0x04"),
657 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes
, "event=0x20,umask=0x08"),
658 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads
, "event=0x20,umask=0x10"),
659 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes
, "event=0x20,umask=0x20"),
660 { /* end: all zeroes */ },
663 static struct intel_uncore_ops nhm_uncore_msr_ops
= {
664 .disable_box
= nhm_uncore_msr_disable_box
,
665 .enable_box
= nhm_uncore_msr_enable_box
,
666 .disable_event
= snb_uncore_msr_disable_event
,
667 .enable_event
= nhm_uncore_msr_enable_event
,
668 .read_counter
= uncore_msr_read_counter
,
671 static struct intel_uncore_type nhm_uncore
= {
676 .fixed_ctr_bits
= 48,
677 .event_ctl
= NHM_UNC_PERFEVTSEL0
,
678 .perf_ctr
= NHM_UNC_UNCORE_PMC0
,
679 .fixed_ctr
= NHM_UNC_FIXED_CTR
,
680 .fixed_ctl
= NHM_UNC_FIXED_CTR_CTRL
,
681 .event_mask
= NHM_UNC_RAW_EVENT_MASK
,
682 .event_descs
= nhm_uncore_events
,
683 .ops
= &nhm_uncore_msr_ops
,
684 .format_group
= &nhm_uncore_format_group
,
687 static struct intel_uncore_type
*nhm_msr_uncores
[] = {
692 void nhm_uncore_cpu_init(void)
694 uncore_msr_uncores
= nhm_msr_uncores
;
697 /* end of Nehalem uncore support */