1 /* Nehalem/SandBridge/Haswell uncore support */
2 #include "perf_event_intel_uncore.h"
4 /* Uncore IMC PCI IDs */
5 #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
6 #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
7 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
8 #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
9 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
10 #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604
11 #define PCI_DEVICE_ID_INTEL_SKL_IMC 0x191f
13 /* SNB event control */
14 #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
15 #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
16 #define SNB_UNC_CTL_EDGE_DET (1 << 18)
17 #define SNB_UNC_CTL_EN (1 << 22)
18 #define SNB_UNC_CTL_INVERT (1 << 23)
19 #define SNB_UNC_CTL_CMASK_MASK 0x1f000000
20 #define NHM_UNC_CTL_CMASK_MASK 0xff000000
21 #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0)
23 #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
24 SNB_UNC_CTL_UMASK_MASK | \
25 SNB_UNC_CTL_EDGE_DET | \
26 SNB_UNC_CTL_INVERT | \
27 SNB_UNC_CTL_CMASK_MASK)
29 #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
30 SNB_UNC_CTL_UMASK_MASK | \
31 SNB_UNC_CTL_EDGE_DET | \
32 SNB_UNC_CTL_INVERT | \
33 NHM_UNC_CTL_CMASK_MASK)
35 /* SNB global control register */
36 #define SNB_UNC_PERF_GLOBAL_CTL 0x391
37 #define SNB_UNC_FIXED_CTR_CTRL 0x394
38 #define SNB_UNC_FIXED_CTR 0x395
40 /* SNB uncore global control */
41 #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1)
42 #define SNB_UNC_GLOBAL_CTL_EN (1 << 29)
44 /* SNB Cbo register */
45 #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700
46 #define SNB_UNC_CBO_0_PER_CTR0 0x706
47 #define SNB_UNC_CBO_MSR_OFFSET 0x10
49 /* SNB ARB register */
50 #define SNB_UNC_ARB_PER_CTR0 0x3b0
51 #define SNB_UNC_ARB_PERFEVTSEL0 0x3b2
52 #define SNB_UNC_ARB_MSR_OFFSET 0x10
54 /* NHM global control register */
55 #define NHM_UNC_PERF_GLOBAL_CTL 0x391
56 #define NHM_UNC_FIXED_CTR 0x394
57 #define NHM_UNC_FIXED_CTR_CTRL 0x395
59 /* NHM uncore global control */
60 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1)
61 #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32)
63 /* NHM uncore register */
64 #define NHM_UNC_PERFEVTSEL0 0x3c0
65 #define NHM_UNC_UNCORE_PMC0 0x3b0
67 DEFINE_UNCORE_FORMAT_ATTR(event
, event
, "config:0-7");
68 DEFINE_UNCORE_FORMAT_ATTR(umask
, umask
, "config:8-15");
69 DEFINE_UNCORE_FORMAT_ATTR(edge
, edge
, "config:18");
70 DEFINE_UNCORE_FORMAT_ATTR(inv
, inv
, "config:23");
71 DEFINE_UNCORE_FORMAT_ATTR(cmask5
, cmask
, "config:24-28");
72 DEFINE_UNCORE_FORMAT_ATTR(cmask8
, cmask
, "config:24-31");
74 /* Sandy Bridge uncore support */
75 static void snb_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
77 struct hw_perf_event
*hwc
= &event
->hw
;
79 if (hwc
->idx
< UNCORE_PMC_IDX_FIXED
)
80 wrmsrl(hwc
->config_base
, hwc
->config
| SNB_UNC_CTL_EN
);
82 wrmsrl(hwc
->config_base
, SNB_UNC_CTL_EN
);
85 static void snb_uncore_msr_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
87 wrmsrl(event
->hw
.config_base
, 0);
90 static void snb_uncore_msr_init_box(struct intel_uncore_box
*box
)
92 if (box
->pmu
->pmu_idx
== 0) {
93 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL
,
94 SNB_UNC_GLOBAL_CTL_EN
| SNB_UNC_GLOBAL_CTL_CORE_ALL
);
98 static struct uncore_event_desc snb_uncore_events
[] = {
99 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0x00"),
100 { /* end: all zeroes */ },
103 static struct attribute
*snb_uncore_formats_attr
[] = {
104 &format_attr_event
.attr
,
105 &format_attr_umask
.attr
,
106 &format_attr_edge
.attr
,
107 &format_attr_inv
.attr
,
108 &format_attr_cmask5
.attr
,
112 static struct attribute_group snb_uncore_format_group
= {
114 .attrs
= snb_uncore_formats_attr
,
117 static struct intel_uncore_ops snb_uncore_msr_ops
= {
118 .init_box
= snb_uncore_msr_init_box
,
119 .disable_event
= snb_uncore_msr_disable_event
,
120 .enable_event
= snb_uncore_msr_enable_event
,
121 .read_counter
= uncore_msr_read_counter
,
124 static struct event_constraint snb_uncore_arb_constraints
[] = {
125 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
126 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
130 static struct intel_uncore_type snb_uncore_cbox
= {
135 .fixed_ctr_bits
= 48,
136 .perf_ctr
= SNB_UNC_CBO_0_PER_CTR0
,
137 .event_ctl
= SNB_UNC_CBO_0_PERFEVTSEL0
,
138 .fixed_ctr
= SNB_UNC_FIXED_CTR
,
139 .fixed_ctl
= SNB_UNC_FIXED_CTR_CTRL
,
141 .event_mask
= SNB_UNC_RAW_EVENT_MASK
,
142 .msr_offset
= SNB_UNC_CBO_MSR_OFFSET
,
143 .ops
= &snb_uncore_msr_ops
,
144 .format_group
= &snb_uncore_format_group
,
145 .event_descs
= snb_uncore_events
,
148 static struct intel_uncore_type snb_uncore_arb
= {
153 .perf_ctr
= SNB_UNC_ARB_PER_CTR0
,
154 .event_ctl
= SNB_UNC_ARB_PERFEVTSEL0
,
155 .event_mask
= SNB_UNC_RAW_EVENT_MASK
,
156 .msr_offset
= SNB_UNC_ARB_MSR_OFFSET
,
157 .constraints
= snb_uncore_arb_constraints
,
158 .ops
= &snb_uncore_msr_ops
,
159 .format_group
= &snb_uncore_format_group
,
162 static struct intel_uncore_type
*snb_msr_uncores
[] = {
168 void snb_uncore_cpu_init(void)
170 uncore_msr_uncores
= snb_msr_uncores
;
171 if (snb_uncore_cbox
.num_boxes
> boot_cpu_data
.x86_max_cores
)
172 snb_uncore_cbox
.num_boxes
= boot_cpu_data
.x86_max_cores
;
179 static struct uncore_event_desc snb_uncore_imc_events
[] = {
180 INTEL_UNCORE_EVENT_DESC(data_reads
, "event=0x01"),
181 INTEL_UNCORE_EVENT_DESC(data_reads
.scale
, "6.103515625e-5"),
182 INTEL_UNCORE_EVENT_DESC(data_reads
.unit
, "MiB"),
184 INTEL_UNCORE_EVENT_DESC(data_writes
, "event=0x02"),
185 INTEL_UNCORE_EVENT_DESC(data_writes
.scale
, "6.103515625e-5"),
186 INTEL_UNCORE_EVENT_DESC(data_writes
.unit
, "MiB"),
188 { /* end: all zeroes */ },
191 #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff
192 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48
194 /* page size multiple covering all config regs */
195 #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000
197 #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1
198 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050
199 #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2
200 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
201 #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
203 static struct attribute
*snb_uncore_imc_formats_attr
[] = {
204 &format_attr_event
.attr
,
208 static struct attribute_group snb_uncore_imc_format_group
= {
210 .attrs
= snb_uncore_imc_formats_attr
,
213 static void snb_uncore_imc_init_box(struct intel_uncore_box
*box
)
215 struct pci_dev
*pdev
= box
->pci_dev
;
216 int where
= SNB_UNCORE_PCI_IMC_BAR_OFFSET
;
217 resource_size_t addr
;
220 pci_read_config_dword(pdev
, where
, &pci_dword
);
223 #ifdef CONFIG_PHYS_ADDR_T_64BIT
224 pci_read_config_dword(pdev
, where
+ 4, &pci_dword
);
225 addr
|= ((resource_size_t
)pci_dword
<< 32);
228 addr
&= ~(PAGE_SIZE
- 1);
230 box
->io_addr
= ioremap(addr
, SNB_UNCORE_PCI_IMC_MAP_SIZE
);
231 box
->hrtimer_duration
= UNCORE_SNB_IMC_HRTIMER_INTERVAL
;
234 static void snb_uncore_imc_enable_box(struct intel_uncore_box
*box
)
237 static void snb_uncore_imc_disable_box(struct intel_uncore_box
*box
)
240 static void snb_uncore_imc_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
243 static void snb_uncore_imc_disable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
246 static u64
snb_uncore_imc_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
248 struct hw_perf_event
*hwc
= &event
->hw
;
250 return (u64
)*(unsigned int *)(box
->io_addr
+ hwc
->event_base
);
254 * custom event_init() function because we define our own fixed, free
255 * running counters, so we do not want to conflict with generic uncore
256 * logic. Also simplifies processing
258 static int snb_uncore_imc_event_init(struct perf_event
*event
)
260 struct intel_uncore_pmu
*pmu
;
261 struct intel_uncore_box
*box
;
262 struct hw_perf_event
*hwc
= &event
->hw
;
263 u64 cfg
= event
->attr
.config
& SNB_UNCORE_PCI_IMC_EVENT_MASK
;
266 if (event
->attr
.type
!= event
->pmu
->type
)
269 pmu
= uncore_event_to_pmu(event
);
270 /* no device found for this pmu */
271 if (pmu
->func_id
< 0)
274 /* Sampling not supported yet */
275 if (hwc
->sample_period
)
278 /* unsupported modes and filters */
279 if (event
->attr
.exclude_user
||
280 event
->attr
.exclude_kernel
||
281 event
->attr
.exclude_hv
||
282 event
->attr
.exclude_idle
||
283 event
->attr
.exclude_host
||
284 event
->attr
.exclude_guest
||
285 event
->attr
.sample_period
) /* no sampling */
289 * Place all uncore events for a particular physical package
295 /* check only supported bits are set */
296 if (event
->attr
.config
& ~SNB_UNCORE_PCI_IMC_EVENT_MASK
)
299 box
= uncore_pmu_to_box(pmu
, event
->cpu
);
300 if (!box
|| box
->cpu
< 0)
303 event
->cpu
= box
->cpu
;
306 event
->hw
.last_tag
= ~0ULL;
307 event
->hw
.extra_reg
.idx
= EXTRA_REG_NONE
;
308 event
->hw
.branch_reg
.idx
= EXTRA_REG_NONE
;
310 * check event is known (whitelist, determines counter)
313 case SNB_UNCORE_PCI_IMC_DATA_READS
:
314 base
= SNB_UNCORE_PCI_IMC_DATA_READS_BASE
;
315 idx
= UNCORE_PMC_IDX_FIXED
;
317 case SNB_UNCORE_PCI_IMC_DATA_WRITES
:
318 base
= SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE
;
319 idx
= UNCORE_PMC_IDX_FIXED
+ 1;
325 /* must be done before validate_group */
326 event
->hw
.event_base
= base
;
327 event
->hw
.config
= cfg
;
330 /* no group validation needed, we have free running counters */
335 static int snb_uncore_imc_hw_config(struct intel_uncore_box
*box
, struct perf_event
*event
)
340 static void snb_uncore_imc_event_start(struct perf_event
*event
, int flags
)
342 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
345 if (WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_STOPPED
)))
351 list_add_tail(&event
->active_entry
, &box
->active_list
);
353 count
= snb_uncore_imc_read_counter(box
, event
);
354 local64_set(&event
->hw
.prev_count
, count
);
356 if (box
->n_active
== 1)
357 uncore_pmu_start_hrtimer(box
);
360 static void snb_uncore_imc_event_stop(struct perf_event
*event
, int flags
)
362 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
363 struct hw_perf_event
*hwc
= &event
->hw
;
365 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
368 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
369 hwc
->state
|= PERF_HES_STOPPED
;
371 list_del(&event
->active_entry
);
373 if (box
->n_active
== 0)
374 uncore_pmu_cancel_hrtimer(box
);
377 if ((flags
& PERF_EF_UPDATE
) && !(hwc
->state
& PERF_HES_UPTODATE
)) {
379 * Drain the remaining delta count out of a event
380 * that we are disabling:
382 uncore_perf_event_update(box
, event
);
383 hwc
->state
|= PERF_HES_UPTODATE
;
387 static int snb_uncore_imc_event_add(struct perf_event
*event
, int flags
)
389 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
390 struct hw_perf_event
*hwc
= &event
->hw
;
395 hwc
->state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
396 if (!(flags
& PERF_EF_START
))
397 hwc
->state
|= PERF_HES_ARCH
;
399 snb_uncore_imc_event_start(event
, 0);
406 static void snb_uncore_imc_event_del(struct perf_event
*event
, int flags
)
408 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
411 snb_uncore_imc_event_stop(event
, PERF_EF_UPDATE
);
413 for (i
= 0; i
< box
->n_events
; i
++) {
414 if (event
== box
->event_list
[i
]) {
421 int snb_pci2phy_map_init(int devid
)
423 struct pci_dev
*dev
= NULL
;
424 struct pci2phy_map
*map
;
427 dev
= pci_get_device(PCI_VENDOR_ID_INTEL
, devid
, dev
);
431 bus
= dev
->bus
->number
;
432 segment
= pci_domain_nr(dev
->bus
);
434 raw_spin_lock(&pci2phy_map_lock
);
435 map
= __find_pci2phy_map(segment
);
437 raw_spin_unlock(&pci2phy_map_lock
);
441 map
->pbus_to_physid
[bus
] = 0;
442 raw_spin_unlock(&pci2phy_map_lock
);
449 static struct pmu snb_uncore_imc_pmu
= {
450 .task_ctx_nr
= perf_invalid_context
,
451 .event_init
= snb_uncore_imc_event_init
,
452 .add
= snb_uncore_imc_event_add
,
453 .del
= snb_uncore_imc_event_del
,
454 .start
= snb_uncore_imc_event_start
,
455 .stop
= snb_uncore_imc_event_stop
,
456 .read
= uncore_pmu_event_read
,
459 static struct intel_uncore_ops snb_uncore_imc_ops
= {
460 .init_box
= snb_uncore_imc_init_box
,
461 .enable_box
= snb_uncore_imc_enable_box
,
462 .disable_box
= snb_uncore_imc_disable_box
,
463 .disable_event
= snb_uncore_imc_disable_event
,
464 .enable_event
= snb_uncore_imc_enable_event
,
465 .hw_config
= snb_uncore_imc_hw_config
,
466 .read_counter
= snb_uncore_imc_read_counter
,
469 static struct intel_uncore_type snb_uncore_imc
= {
473 .fixed_ctr_bits
= 32,
474 .fixed_ctr
= SNB_UNCORE_PCI_IMC_CTR_BASE
,
475 .event_descs
= snb_uncore_imc_events
,
476 .format_group
= &snb_uncore_imc_format_group
,
477 .perf_ctr
= SNB_UNCORE_PCI_IMC_DATA_READS_BASE
,
478 .event_mask
= SNB_UNCORE_PCI_IMC_EVENT_MASK
,
479 .ops
= &snb_uncore_imc_ops
,
480 .pmu
= &snb_uncore_imc_pmu
,
483 static struct intel_uncore_type
*snb_pci_uncores
[] = {
484 [SNB_PCI_UNCORE_IMC
] = &snb_uncore_imc
,
488 static const struct pci_device_id snb_uncore_pci_ids
[] = {
490 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_SNB_IMC
),
491 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
493 { /* end: all zeroes */ },
496 static const struct pci_device_id ivb_uncore_pci_ids
[] = {
498 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_IVB_IMC
),
499 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
502 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_IVB_E3_IMC
),
503 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
505 { /* end: all zeroes */ },
508 static const struct pci_device_id hsw_uncore_pci_ids
[] = {
510 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_HSW_IMC
),
511 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
514 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_HSW_U_IMC
),
515 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
517 { /* end: all zeroes */ },
520 static const struct pci_device_id bdw_uncore_pci_ids
[] = {
522 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_BDW_IMC
),
523 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
525 { /* end: all zeroes */ },
528 static const struct pci_device_id skl_uncore_pci_ids
[] = {
530 PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_SKL_IMC
),
531 .driver_data
= UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC
, 0),
533 { /* end: all zeroes */ },
536 static struct pci_driver snb_uncore_pci_driver
= {
537 .name
= "snb_uncore",
538 .id_table
= snb_uncore_pci_ids
,
541 static struct pci_driver ivb_uncore_pci_driver
= {
542 .name
= "ivb_uncore",
543 .id_table
= ivb_uncore_pci_ids
,
546 static struct pci_driver hsw_uncore_pci_driver
= {
547 .name
= "hsw_uncore",
548 .id_table
= hsw_uncore_pci_ids
,
551 static struct pci_driver bdw_uncore_pci_driver
= {
552 .name
= "bdw_uncore",
553 .id_table
= bdw_uncore_pci_ids
,
556 static struct pci_driver skl_uncore_pci_driver
= {
557 .name
= "skl_uncore",
558 .id_table
= skl_uncore_pci_ids
,
561 struct imc_uncore_pci_dev
{
563 struct pci_driver
*driver
;
565 #define IMC_DEV(a, d) \
566 { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
568 static const struct imc_uncore_pci_dev desktop_imc_pci_ids
[] = {
569 IMC_DEV(SNB_IMC
, &snb_uncore_pci_driver
),
570 IMC_DEV(IVB_IMC
, &ivb_uncore_pci_driver
), /* 3rd Gen Core processor */
571 IMC_DEV(IVB_E3_IMC
, &ivb_uncore_pci_driver
), /* Xeon E3-1200 v2/3rd Gen Core processor */
572 IMC_DEV(HSW_IMC
, &hsw_uncore_pci_driver
), /* 4th Gen Core Processor */
573 IMC_DEV(HSW_U_IMC
, &hsw_uncore_pci_driver
), /* 4th Gen Core ULT Mobile Processor */
574 IMC_DEV(BDW_IMC
, &bdw_uncore_pci_driver
), /* 5th Gen Core U */
575 IMC_DEV(SKL_IMC
, &skl_uncore_pci_driver
), /* 6th Gen Core */
580 #define for_each_imc_pci_id(x, t) \
581 for (x = (t); (x)->pci_id; x++)
583 static struct pci_driver
*imc_uncore_find_dev(void)
585 const struct imc_uncore_pci_dev
*p
;
588 for_each_imc_pci_id(p
, desktop_imc_pci_ids
) {
589 ret
= snb_pci2phy_map_init(p
->pci_id
);
596 static int imc_uncore_pci_init(void)
598 struct pci_driver
*imc_drv
= imc_uncore_find_dev();
603 uncore_pci_uncores
= snb_pci_uncores
;
604 uncore_pci_driver
= imc_drv
;
609 int snb_uncore_pci_init(void)
611 return imc_uncore_pci_init();
614 int ivb_uncore_pci_init(void)
616 return imc_uncore_pci_init();
618 int hsw_uncore_pci_init(void)
620 return imc_uncore_pci_init();
623 int bdw_uncore_pci_init(void)
625 return imc_uncore_pci_init();
628 int skl_uncore_pci_init(void)
630 return imc_uncore_pci_init();
633 /* end of Sandy Bridge uncore support */
635 /* Nehalem uncore support */
636 static void nhm_uncore_msr_disable_box(struct intel_uncore_box
*box
)
638 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL
, 0);
641 static void nhm_uncore_msr_enable_box(struct intel_uncore_box
*box
)
643 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL
, NHM_UNC_GLOBAL_CTL_EN_PC_ALL
| NHM_UNC_GLOBAL_CTL_EN_FC
);
646 static void nhm_uncore_msr_enable_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
648 struct hw_perf_event
*hwc
= &event
->hw
;
650 if (hwc
->idx
< UNCORE_PMC_IDX_FIXED
)
651 wrmsrl(hwc
->config_base
, hwc
->config
| SNB_UNC_CTL_EN
);
653 wrmsrl(hwc
->config_base
, NHM_UNC_FIXED_CTR_CTL_EN
);
656 static struct attribute
*nhm_uncore_formats_attr
[] = {
657 &format_attr_event
.attr
,
658 &format_attr_umask
.attr
,
659 &format_attr_edge
.attr
,
660 &format_attr_inv
.attr
,
661 &format_attr_cmask8
.attr
,
665 static struct attribute_group nhm_uncore_format_group
= {
667 .attrs
= nhm_uncore_formats_attr
,
670 static struct uncore_event_desc nhm_uncore_events
[] = {
671 INTEL_UNCORE_EVENT_DESC(clockticks
, "event=0xff,umask=0x00"),
672 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any
, "event=0x2f,umask=0x0f"),
673 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any
, "event=0x2c,umask=0x0f"),
674 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads
, "event=0x20,umask=0x01"),
675 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes
, "event=0x20,umask=0x02"),
676 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads
, "event=0x20,umask=0x04"),
677 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes
, "event=0x20,umask=0x08"),
678 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads
, "event=0x20,umask=0x10"),
679 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes
, "event=0x20,umask=0x20"),
680 { /* end: all zeroes */ },
683 static struct intel_uncore_ops nhm_uncore_msr_ops
= {
684 .disable_box
= nhm_uncore_msr_disable_box
,
685 .enable_box
= nhm_uncore_msr_enable_box
,
686 .disable_event
= snb_uncore_msr_disable_event
,
687 .enable_event
= nhm_uncore_msr_enable_event
,
688 .read_counter
= uncore_msr_read_counter
,
691 static struct intel_uncore_type nhm_uncore
= {
696 .fixed_ctr_bits
= 48,
697 .event_ctl
= NHM_UNC_PERFEVTSEL0
,
698 .perf_ctr
= NHM_UNC_UNCORE_PMC0
,
699 .fixed_ctr
= NHM_UNC_FIXED_CTR
,
700 .fixed_ctl
= NHM_UNC_FIXED_CTR_CTRL
,
701 .event_mask
= NHM_UNC_RAW_EVENT_MASK
,
702 .event_descs
= nhm_uncore_events
,
703 .ops
= &nhm_uncore_msr_ops
,
704 .format_group
= &nhm_uncore_format_group
,
707 static struct intel_uncore_type
*nhm_msr_uncores
[] = {
712 void nhm_uncore_cpu_init(void)
714 uncore_msr_uncores
= nhm_msr_uncores
;
717 /* end of Nehalem uncore support */