]>
Commit | Line | Data |
---|---|---|
92807ffd YZ |
1 | /* Nehalem/SandBridge/Haswell uncore support */ |
2 | #include "perf_event_intel_uncore.h" | |
3 | ||
4 | /* SNB event control */ | |
5 | #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff | |
6 | #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 | |
7 | #define SNB_UNC_CTL_EDGE_DET (1 << 18) | |
8 | #define SNB_UNC_CTL_EN (1 << 22) | |
9 | #define SNB_UNC_CTL_INVERT (1 << 23) | |
10 | #define SNB_UNC_CTL_CMASK_MASK 0x1f000000 | |
11 | #define NHM_UNC_CTL_CMASK_MASK 0xff000000 | |
12 | #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0) | |
13 | ||
14 | #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ | |
15 | SNB_UNC_CTL_UMASK_MASK | \ | |
16 | SNB_UNC_CTL_EDGE_DET | \ | |
17 | SNB_UNC_CTL_INVERT | \ | |
18 | SNB_UNC_CTL_CMASK_MASK) | |
19 | ||
20 | #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ | |
21 | SNB_UNC_CTL_UMASK_MASK | \ | |
22 | SNB_UNC_CTL_EDGE_DET | \ | |
23 | SNB_UNC_CTL_INVERT | \ | |
24 | NHM_UNC_CTL_CMASK_MASK) | |
25 | ||
26 | /* SNB global control register */ | |
27 | #define SNB_UNC_PERF_GLOBAL_CTL 0x391 | |
28 | #define SNB_UNC_FIXED_CTR_CTRL 0x394 | |
29 | #define SNB_UNC_FIXED_CTR 0x395 | |
30 | ||
31 | /* SNB uncore global control */ | |
32 | #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1) | |
33 | #define SNB_UNC_GLOBAL_CTL_EN (1 << 29) | |
34 | ||
35 | /* SNB Cbo register */ | |
36 | #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700 | |
37 | #define SNB_UNC_CBO_0_PER_CTR0 0x706 | |
38 | #define SNB_UNC_CBO_MSR_OFFSET 0x10 | |
39 | ||
40 | /* NHM global control register */ | |
41 | #define NHM_UNC_PERF_GLOBAL_CTL 0x391 | |
42 | #define NHM_UNC_FIXED_CTR 0x394 | |
43 | #define NHM_UNC_FIXED_CTR_CTRL 0x395 | |
44 | ||
45 | /* NHM uncore global control */ | |
46 | #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1) | |
47 | #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) | |
48 | ||
49 | /* NHM uncore register */ | |
50 | #define NHM_UNC_PERFEVTSEL0 0x3c0 | |
51 | #define NHM_UNC_UNCORE_PMC0 0x3b0 | |
52 | ||
53 | DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); | |
54 | DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); | |
55 | DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); | |
56 | DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); | |
57 | DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28"); | |
58 | DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31"); | |
59 | ||
60 | /* Sandy Bridge uncore support */ | |
61 | static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | |
62 | { | |
63 | struct hw_perf_event *hwc = &event->hw; | |
64 | ||
65 | if (hwc->idx < UNCORE_PMC_IDX_FIXED) | |
66 | wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); | |
67 | else | |
68 | wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); | |
69 | } | |
70 | ||
71 | static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) | |
72 | { | |
73 | wrmsrl(event->hw.config_base, 0); | |
74 | } | |
75 | ||
76 | static void snb_uncore_msr_init_box(struct intel_uncore_box *box) | |
77 | { | |
78 | if (box->pmu->pmu_idx == 0) { | |
79 | wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, | |
80 | SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); | |
81 | } | |
82 | } | |
83 | ||
84 | static struct uncore_event_desc snb_uncore_events[] = { | |
85 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), | |
86 | { /* end: all zeroes */ }, | |
87 | }; | |
88 | ||
89 | static struct attribute *snb_uncore_formats_attr[] = { | |
90 | &format_attr_event.attr, | |
91 | &format_attr_umask.attr, | |
92 | &format_attr_edge.attr, | |
93 | &format_attr_inv.attr, | |
94 | &format_attr_cmask5.attr, | |
95 | NULL, | |
96 | }; | |
97 | ||
98 | static struct attribute_group snb_uncore_format_group = { | |
99 | .name = "format", | |
100 | .attrs = snb_uncore_formats_attr, | |
101 | }; | |
102 | ||
103 | static struct intel_uncore_ops snb_uncore_msr_ops = { | |
104 | .init_box = snb_uncore_msr_init_box, | |
105 | .disable_event = snb_uncore_msr_disable_event, | |
106 | .enable_event = snb_uncore_msr_enable_event, | |
107 | .read_counter = uncore_msr_read_counter, | |
108 | }; | |
109 | ||
110 | static struct event_constraint snb_uncore_cbox_constraints[] = { | |
111 | UNCORE_EVENT_CONSTRAINT(0x80, 0x1), | |
112 | UNCORE_EVENT_CONSTRAINT(0x83, 0x1), | |
113 | EVENT_CONSTRAINT_END | |
114 | }; | |
115 | ||
116 | static struct intel_uncore_type snb_uncore_cbox = { | |
117 | .name = "cbox", | |
118 | .num_counters = 2, | |
119 | .num_boxes = 4, | |
120 | .perf_ctr_bits = 44, | |
121 | .fixed_ctr_bits = 48, | |
122 | .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, | |
123 | .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, | |
124 | .fixed_ctr = SNB_UNC_FIXED_CTR, | |
125 | .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, | |
126 | .single_fixed = 1, | |
127 | .event_mask = SNB_UNC_RAW_EVENT_MASK, | |
128 | .msr_offset = SNB_UNC_CBO_MSR_OFFSET, | |
129 | .constraints = snb_uncore_cbox_constraints, | |
130 | .ops = &snb_uncore_msr_ops, | |
131 | .format_group = &snb_uncore_format_group, | |
132 | .event_descs = snb_uncore_events, | |
133 | }; | |
134 | ||
135 | static struct intel_uncore_type *snb_msr_uncores[] = { | |
136 | &snb_uncore_cbox, | |
137 | NULL, | |
138 | }; | |
139 | ||
140 | void snb_uncore_cpu_init(void) | |
141 | { | |
142 | uncore_msr_uncores = snb_msr_uncores; | |
143 | if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) | |
144 | snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; | |
145 | } | |
146 | ||
147 | enum { | |
148 | SNB_PCI_UNCORE_IMC, | |
149 | }; | |
150 | ||
151 | static struct uncore_event_desc snb_uncore_imc_events[] = { | |
152 | INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"), | |
153 | INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"), | |
154 | INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"), | |
155 | ||
156 | INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"), | |
157 | INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"), | |
158 | INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"), | |
159 | ||
160 | { /* end: all zeroes */ }, | |
161 | }; | |
162 | ||
163 | #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff | |
164 | #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48 | |
165 | ||
166 | /* page size multiple covering all config regs */ | |
167 | #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000 | |
168 | ||
169 | #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1 | |
170 | #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050 | |
171 | #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2 | |
172 | #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054 | |
173 | #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE | |
174 | ||
175 | static struct attribute *snb_uncore_imc_formats_attr[] = { | |
176 | &format_attr_event.attr, | |
177 | NULL, | |
178 | }; | |
179 | ||
180 | static struct attribute_group snb_uncore_imc_format_group = { | |
181 | .name = "format", | |
182 | .attrs = snb_uncore_imc_formats_attr, | |
183 | }; | |
184 | ||
185 | static void snb_uncore_imc_init_box(struct intel_uncore_box *box) | |
186 | { | |
187 | struct pci_dev *pdev = box->pci_dev; | |
188 | int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET; | |
189 | resource_size_t addr; | |
190 | u32 pci_dword; | |
191 | ||
192 | pci_read_config_dword(pdev, where, &pci_dword); | |
193 | addr = pci_dword; | |
194 | ||
195 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | |
196 | pci_read_config_dword(pdev, where + 4, &pci_dword); | |
197 | addr |= ((resource_size_t)pci_dword << 32); | |
198 | #endif | |
199 | ||
200 | addr &= ~(PAGE_SIZE - 1); | |
201 | ||
202 | box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE); | |
203 | box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL; | |
204 | } | |
205 | ||
206 | static void snb_uncore_imc_enable_box(struct intel_uncore_box *box) | |
207 | {} | |
208 | ||
209 | static void snb_uncore_imc_disable_box(struct intel_uncore_box *box) | |
210 | {} | |
211 | ||
212 | static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event) | |
213 | {} | |
214 | ||
215 | static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event) | |
216 | {} | |
217 | ||
218 | static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event) | |
219 | { | |
220 | struct hw_perf_event *hwc = &event->hw; | |
221 | ||
222 | return (u64)*(unsigned int *)(box->io_addr + hwc->event_base); | |
223 | } | |
224 | ||
225 | /* | |
226 | * custom event_init() function because we define our own fixed, free | |
227 | * running counters, so we do not want to conflict with generic uncore | |
228 | * logic. Also simplifies processing | |
229 | */ | |
230 | static int snb_uncore_imc_event_init(struct perf_event *event) | |
231 | { | |
232 | struct intel_uncore_pmu *pmu; | |
233 | struct intel_uncore_box *box; | |
234 | struct hw_perf_event *hwc = &event->hw; | |
235 | u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK; | |
236 | int idx, base; | |
237 | ||
238 | if (event->attr.type != event->pmu->type) | |
239 | return -ENOENT; | |
240 | ||
241 | pmu = uncore_event_to_pmu(event); | |
242 | /* no device found for this pmu */ | |
243 | if (pmu->func_id < 0) | |
244 | return -ENOENT; | |
245 | ||
246 | /* Sampling not supported yet */ | |
247 | if (hwc->sample_period) | |
248 | return -EINVAL; | |
249 | ||
250 | /* unsupported modes and filters */ | |
251 | if (event->attr.exclude_user || | |
252 | event->attr.exclude_kernel || | |
253 | event->attr.exclude_hv || | |
254 | event->attr.exclude_idle || | |
255 | event->attr.exclude_host || | |
256 | event->attr.exclude_guest || | |
257 | event->attr.sample_period) /* no sampling */ | |
258 | return -EINVAL; | |
259 | ||
260 | /* | |
261 | * Place all uncore events for a particular physical package | |
262 | * onto a single cpu | |
263 | */ | |
264 | if (event->cpu < 0) | |
265 | return -EINVAL; | |
266 | ||
267 | /* check only supported bits are set */ | |
268 | if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK) | |
269 | return -EINVAL; | |
270 | ||
271 | box = uncore_pmu_to_box(pmu, event->cpu); | |
272 | if (!box || box->cpu < 0) | |
273 | return -EINVAL; | |
274 | ||
275 | event->cpu = box->cpu; | |
276 | ||
277 | event->hw.idx = -1; | |
278 | event->hw.last_tag = ~0ULL; | |
279 | event->hw.extra_reg.idx = EXTRA_REG_NONE; | |
280 | event->hw.branch_reg.idx = EXTRA_REG_NONE; | |
281 | /* | |
282 | * check event is known (whitelist, determines counter) | |
283 | */ | |
284 | switch (cfg) { | |
285 | case SNB_UNCORE_PCI_IMC_DATA_READS: | |
286 | base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE; | |
287 | idx = UNCORE_PMC_IDX_FIXED; | |
288 | break; | |
289 | case SNB_UNCORE_PCI_IMC_DATA_WRITES: | |
290 | base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE; | |
291 | idx = UNCORE_PMC_IDX_FIXED + 1; | |
292 | break; | |
293 | default: | |
294 | return -EINVAL; | |
295 | } | |
296 | ||
297 | /* must be done before validate_group */ | |
298 | event->hw.event_base = base; | |
299 | event->hw.config = cfg; | |
300 | event->hw.idx = idx; | |
301 | ||
302 | /* no group validation needed, we have free running counters */ | |
303 | ||
304 | return 0; | |
305 | } | |
306 | ||
307 | static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event) | |
308 | { | |
309 | return 0; | |
310 | } | |
311 | ||
312 | static void snb_uncore_imc_event_start(struct perf_event *event, int flags) | |
313 | { | |
314 | struct intel_uncore_box *box = uncore_event_to_box(event); | |
315 | u64 count; | |
316 | ||
317 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) | |
318 | return; | |
319 | ||
320 | event->hw.state = 0; | |
321 | box->n_active++; | |
322 | ||
323 | list_add_tail(&event->active_entry, &box->active_list); | |
324 | ||
325 | count = snb_uncore_imc_read_counter(box, event); | |
326 | local64_set(&event->hw.prev_count, count); | |
327 | ||
328 | if (box->n_active == 1) | |
329 | uncore_pmu_start_hrtimer(box); | |
330 | } | |
331 | ||
332 | static void snb_uncore_imc_event_stop(struct perf_event *event, int flags) | |
333 | { | |
334 | struct intel_uncore_box *box = uncore_event_to_box(event); | |
335 | struct hw_perf_event *hwc = &event->hw; | |
336 | ||
337 | if (!(hwc->state & PERF_HES_STOPPED)) { | |
338 | box->n_active--; | |
339 | ||
340 | WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); | |
341 | hwc->state |= PERF_HES_STOPPED; | |
342 | ||
343 | list_del(&event->active_entry); | |
344 | ||
345 | if (box->n_active == 0) | |
346 | uncore_pmu_cancel_hrtimer(box); | |
347 | } | |
348 | ||
349 | if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { | |
350 | /* | |
351 | * Drain the remaining delta count out of a event | |
352 | * that we are disabling: | |
353 | */ | |
354 | uncore_perf_event_update(box, event); | |
355 | hwc->state |= PERF_HES_UPTODATE; | |
356 | } | |
357 | } | |
358 | ||
359 | static int snb_uncore_imc_event_add(struct perf_event *event, int flags) | |
360 | { | |
361 | struct intel_uncore_box *box = uncore_event_to_box(event); | |
362 | struct hw_perf_event *hwc = &event->hw; | |
363 | ||
364 | if (!box) | |
365 | return -ENODEV; | |
366 | ||
367 | hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; | |
368 | if (!(flags & PERF_EF_START)) | |
369 | hwc->state |= PERF_HES_ARCH; | |
370 | ||
371 | snb_uncore_imc_event_start(event, 0); | |
372 | ||
373 | box->n_events++; | |
374 | ||
375 | return 0; | |
376 | } | |
377 | ||
378 | static void snb_uncore_imc_event_del(struct perf_event *event, int flags) | |
379 | { | |
380 | struct intel_uncore_box *box = uncore_event_to_box(event); | |
381 | int i; | |
382 | ||
383 | snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); | |
384 | ||
385 | for (i = 0; i < box->n_events; i++) { | |
386 | if (event == box->event_list[i]) { | |
387 | --box->n_events; | |
388 | break; | |
389 | } | |
390 | } | |
391 | } | |
392 | ||
393 | static int snb_pci2phy_map_init(int devid) | |
394 | { | |
395 | struct pci_dev *dev = NULL; | |
396 | int bus; | |
397 | ||
398 | dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev); | |
399 | if (!dev) | |
400 | return -ENOTTY; | |
401 | ||
402 | bus = dev->bus->number; | |
403 | ||
404 | uncore_pcibus_to_physid[bus] = 0; | |
405 | ||
406 | pci_dev_put(dev); | |
407 | ||
408 | return 0; | |
409 | } | |
410 | ||
411 | static struct pmu snb_uncore_imc_pmu = { | |
412 | .task_ctx_nr = perf_invalid_context, | |
413 | .event_init = snb_uncore_imc_event_init, | |
414 | .add = snb_uncore_imc_event_add, | |
415 | .del = snb_uncore_imc_event_del, | |
416 | .start = snb_uncore_imc_event_start, | |
417 | .stop = snb_uncore_imc_event_stop, | |
418 | .read = uncore_pmu_event_read, | |
419 | }; | |
420 | ||
421 | static struct intel_uncore_ops snb_uncore_imc_ops = { | |
422 | .init_box = snb_uncore_imc_init_box, | |
423 | .enable_box = snb_uncore_imc_enable_box, | |
424 | .disable_box = snb_uncore_imc_disable_box, | |
425 | .disable_event = snb_uncore_imc_disable_event, | |
426 | .enable_event = snb_uncore_imc_enable_event, | |
427 | .hw_config = snb_uncore_imc_hw_config, | |
428 | .read_counter = snb_uncore_imc_read_counter, | |
429 | }; | |
430 | ||
431 | static struct intel_uncore_type snb_uncore_imc = { | |
432 | .name = "imc", | |
433 | .num_counters = 2, | |
434 | .num_boxes = 1, | |
435 | .fixed_ctr_bits = 32, | |
436 | .fixed_ctr = SNB_UNCORE_PCI_IMC_CTR_BASE, | |
437 | .event_descs = snb_uncore_imc_events, | |
438 | .format_group = &snb_uncore_imc_format_group, | |
439 | .perf_ctr = SNB_UNCORE_PCI_IMC_DATA_READS_BASE, | |
440 | .event_mask = SNB_UNCORE_PCI_IMC_EVENT_MASK, | |
441 | .ops = &snb_uncore_imc_ops, | |
442 | .pmu = &snb_uncore_imc_pmu, | |
443 | }; | |
444 | ||
445 | static struct intel_uncore_type *snb_pci_uncores[] = { | |
446 | [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc, | |
447 | NULL, | |
448 | }; | |
449 | ||
450 | static DEFINE_PCI_DEVICE_TABLE(snb_uncore_pci_ids) = { | |
451 | { /* IMC */ | |
452 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC), | |
453 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
454 | }, | |
455 | { /* end: all zeroes */ }, | |
456 | }; | |
457 | ||
458 | static DEFINE_PCI_DEVICE_TABLE(ivb_uncore_pci_ids) = { | |
459 | { /* IMC */ | |
460 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC), | |
461 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
462 | }, | |
463 | { /* end: all zeroes */ }, | |
464 | }; | |
465 | ||
466 | static DEFINE_PCI_DEVICE_TABLE(hsw_uncore_pci_ids) = { | |
467 | { /* IMC */ | |
468 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC), | |
469 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
470 | }, | |
471 | { /* end: all zeroes */ }, | |
472 | }; | |
473 | ||
474 | static struct pci_driver snb_uncore_pci_driver = { | |
475 | .name = "snb_uncore", | |
476 | .id_table = snb_uncore_pci_ids, | |
477 | }; | |
478 | ||
479 | static struct pci_driver ivb_uncore_pci_driver = { | |
480 | .name = "ivb_uncore", | |
481 | .id_table = ivb_uncore_pci_ids, | |
482 | }; | |
483 | ||
484 | static struct pci_driver hsw_uncore_pci_driver = { | |
485 | .name = "hsw_uncore", | |
486 | .id_table = hsw_uncore_pci_ids, | |
487 | }; | |
488 | ||
489 | int snb_uncore_pci_init(void) | |
490 | { | |
491 | int ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_SNB_IMC); | |
492 | if (ret) | |
493 | return ret; | |
494 | uncore_pci_uncores = snb_pci_uncores; | |
495 | uncore_pci_driver = &snb_uncore_pci_driver; | |
496 | return 0; | |
497 | } | |
498 | ||
499 | int ivb_uncore_pci_init(void) | |
500 | { | |
501 | int ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_IVB_IMC); | |
502 | if (ret) | |
503 | return ret; | |
504 | uncore_pci_uncores = snb_pci_uncores; | |
505 | uncore_pci_driver = &ivb_uncore_pci_driver; | |
506 | return 0; | |
507 | } | |
508 | ||
509 | int hsw_uncore_pci_init(void) | |
510 | { | |
511 | int ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_HSW_IMC); | |
512 | if (ret) | |
513 | return ret; | |
514 | uncore_pci_uncores = snb_pci_uncores; | |
515 | uncore_pci_driver = &hsw_uncore_pci_driver; | |
516 | return 0; | |
517 | } | |
518 | ||
519 | /* end of Sandy Bridge uncore support */ | |
520 | ||
521 | /* Nehalem uncore support */ | |
522 | static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) | |
523 | { | |
524 | wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0); | |
525 | } | |
526 | ||
527 | static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) | |
528 | { | |
529 | wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); | |
530 | } | |
531 | ||
532 | static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | |
533 | { | |
534 | struct hw_perf_event *hwc = &event->hw; | |
535 | ||
536 | if (hwc->idx < UNCORE_PMC_IDX_FIXED) | |
537 | wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); | |
538 | else | |
539 | wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); | |
540 | } | |
541 | ||
542 | static struct attribute *nhm_uncore_formats_attr[] = { | |
543 | &format_attr_event.attr, | |
544 | &format_attr_umask.attr, | |
545 | &format_attr_edge.attr, | |
546 | &format_attr_inv.attr, | |
547 | &format_attr_cmask8.attr, | |
548 | NULL, | |
549 | }; | |
550 | ||
551 | static struct attribute_group nhm_uncore_format_group = { | |
552 | .name = "format", | |
553 | .attrs = nhm_uncore_formats_attr, | |
554 | }; | |
555 | ||
556 | static struct uncore_event_desc nhm_uncore_events[] = { | |
557 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), | |
558 | INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"), | |
559 | INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"), | |
560 | INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"), | |
561 | INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"), | |
562 | INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"), | |
563 | INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"), | |
564 | INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"), | |
565 | INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"), | |
566 | { /* end: all zeroes */ }, | |
567 | }; | |
568 | ||
569 | static struct intel_uncore_ops nhm_uncore_msr_ops = { | |
570 | .disable_box = nhm_uncore_msr_disable_box, | |
571 | .enable_box = nhm_uncore_msr_enable_box, | |
572 | .disable_event = snb_uncore_msr_disable_event, | |
573 | .enable_event = nhm_uncore_msr_enable_event, | |
574 | .read_counter = uncore_msr_read_counter, | |
575 | }; | |
576 | ||
577 | static struct intel_uncore_type nhm_uncore = { | |
578 | .name = "", | |
579 | .num_counters = 8, | |
580 | .num_boxes = 1, | |
581 | .perf_ctr_bits = 48, | |
582 | .fixed_ctr_bits = 48, | |
583 | .event_ctl = NHM_UNC_PERFEVTSEL0, | |
584 | .perf_ctr = NHM_UNC_UNCORE_PMC0, | |
585 | .fixed_ctr = NHM_UNC_FIXED_CTR, | |
586 | .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL, | |
587 | .event_mask = NHM_UNC_RAW_EVENT_MASK, | |
588 | .event_descs = nhm_uncore_events, | |
589 | .ops = &nhm_uncore_msr_ops, | |
590 | .format_group = &nhm_uncore_format_group, | |
591 | }; | |
592 | ||
593 | static struct intel_uncore_type *nhm_msr_uncores[] = { | |
594 | &nhm_uncore, | |
595 | NULL, | |
596 | }; | |
597 | ||
598 | void nhm_uncore_cpu_init(void) | |
599 | { | |
600 | uncore_msr_uncores = nhm_msr_uncores; | |
601 | } | |
602 | ||
603 | /* end of Nehalem uncore support */ |