]>
Commit | Line | Data |
---|---|---|
46866b59 | 1 | /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */ |
92553e40 | 2 | #include "uncore.h" |
92807ffd | 3 | |
0140e614 SR |
4 | /* Uncore IMC PCI IDs */ |
5 | #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100 | |
6 | #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154 | |
7 | #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150 | |
8 | #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 | |
80bcffb3 | 9 | #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 |
a41f3c8c | 10 | #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604 |
0e1eb0a1 | 11 | #define PCI_DEVICE_ID_INTEL_SKL_IMC 0x191f |
46866b59 | 12 | #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x190c |
80bcffb3 | 13 | |
92807ffd YZ |
14 | /* SNB event control */ |
15 | #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff | |
16 | #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 | |
17 | #define SNB_UNC_CTL_EDGE_DET (1 << 18) | |
18 | #define SNB_UNC_CTL_EN (1 << 22) | |
19 | #define SNB_UNC_CTL_INVERT (1 << 23) | |
20 | #define SNB_UNC_CTL_CMASK_MASK 0x1f000000 | |
21 | #define NHM_UNC_CTL_CMASK_MASK 0xff000000 | |
22 | #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0) | |
23 | ||
24 | #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ | |
25 | SNB_UNC_CTL_UMASK_MASK | \ | |
26 | SNB_UNC_CTL_EDGE_DET | \ | |
27 | SNB_UNC_CTL_INVERT | \ | |
28 | SNB_UNC_CTL_CMASK_MASK) | |
29 | ||
30 | #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ | |
31 | SNB_UNC_CTL_UMASK_MASK | \ | |
32 | SNB_UNC_CTL_EDGE_DET | \ | |
33 | SNB_UNC_CTL_INVERT | \ | |
34 | NHM_UNC_CTL_CMASK_MASK) | |
35 | ||
36 | /* SNB global control register */ | |
37 | #define SNB_UNC_PERF_GLOBAL_CTL 0x391 | |
38 | #define SNB_UNC_FIXED_CTR_CTRL 0x394 | |
39 | #define SNB_UNC_FIXED_CTR 0x395 | |
40 | ||
41 | /* SNB uncore global control */ | |
42 | #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1) | |
43 | #define SNB_UNC_GLOBAL_CTL_EN (1 << 29) | |
44 | ||
45 | /* SNB Cbo register */ | |
46 | #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700 | |
47 | #define SNB_UNC_CBO_0_PER_CTR0 0x706 | |
48 | #define SNB_UNC_CBO_MSR_OFFSET 0x10 | |
49 | ||
e3a13192 AK |
50 | /* SNB ARB register */ |
51 | #define SNB_UNC_ARB_PER_CTR0 0x3b0 | |
52 | #define SNB_UNC_ARB_PERFEVTSEL0 0x3b2 | |
53 | #define SNB_UNC_ARB_MSR_OFFSET 0x10 | |
54 | ||
92807ffd YZ |
55 | /* NHM global control register */ |
56 | #define NHM_UNC_PERF_GLOBAL_CTL 0x391 | |
57 | #define NHM_UNC_FIXED_CTR 0x394 | |
58 | #define NHM_UNC_FIXED_CTR_CTRL 0x395 | |
59 | ||
60 | /* NHM uncore global control */ | |
61 | #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1) | |
62 | #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) | |
63 | ||
64 | /* NHM uncore register */ | |
65 | #define NHM_UNC_PERFEVTSEL0 0x3c0 | |
66 | #define NHM_UNC_UNCORE_PMC0 0x3b0 | |
67 | ||
46866b59 KL |
68 | /* SKL uncore global control */ |
69 | #define SKL_UNC_PERF_GLOBAL_CTL 0xe01 | |
70 | #define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1) | |
71 | ||
92807ffd YZ |
72 | DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); |
73 | DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); | |
74 | DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); | |
75 | DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); | |
76 | DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28"); | |
77 | DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31"); | |
78 | ||
79 | /* Sandy Bridge uncore support */ | |
80 | static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | |
81 | { | |
82 | struct hw_perf_event *hwc = &event->hw; | |
83 | ||
84 | if (hwc->idx < UNCORE_PMC_IDX_FIXED) | |
85 | wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); | |
86 | else | |
87 | wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); | |
88 | } | |
89 | ||
90 | static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) | |
91 | { | |
92 | wrmsrl(event->hw.config_base, 0); | |
93 | } | |
94 | ||
95 | static void snb_uncore_msr_init_box(struct intel_uncore_box *box) | |
96 | { | |
97 | if (box->pmu->pmu_idx == 0) { | |
98 | wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, | |
99 | SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); | |
100 | } | |
101 | } | |
102 | ||
95f3be79 KL |
103 | static void snb_uncore_msr_enable_box(struct intel_uncore_box *box) |
104 | { | |
105 | wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, | |
106 | SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); | |
107 | } | |
108 | ||
a46195f1 TG |
109 | static void snb_uncore_msr_exit_box(struct intel_uncore_box *box) |
110 | { | |
111 | if (box->pmu->pmu_idx == 0) | |
112 | wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0); | |
113 | } | |
114 | ||
92807ffd YZ |
115 | static struct uncore_event_desc snb_uncore_events[] = { |
116 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), | |
117 | { /* end: all zeroes */ }, | |
118 | }; | |
119 | ||
120 | static struct attribute *snb_uncore_formats_attr[] = { | |
121 | &format_attr_event.attr, | |
122 | &format_attr_umask.attr, | |
123 | &format_attr_edge.attr, | |
124 | &format_attr_inv.attr, | |
125 | &format_attr_cmask5.attr, | |
126 | NULL, | |
127 | }; | |
128 | ||
129 | static struct attribute_group snb_uncore_format_group = { | |
130 | .name = "format", | |
131 | .attrs = snb_uncore_formats_attr, | |
132 | }; | |
133 | ||
134 | static struct intel_uncore_ops snb_uncore_msr_ops = { | |
135 | .init_box = snb_uncore_msr_init_box, | |
95f3be79 | 136 | .enable_box = snb_uncore_msr_enable_box, |
a46195f1 | 137 | .exit_box = snb_uncore_msr_exit_box, |
92807ffd YZ |
138 | .disable_event = snb_uncore_msr_disable_event, |
139 | .enable_event = snb_uncore_msr_enable_event, | |
140 | .read_counter = uncore_msr_read_counter, | |
141 | }; | |
142 | ||
e3a13192 | 143 | static struct event_constraint snb_uncore_arb_constraints[] = { |
92807ffd YZ |
144 | UNCORE_EVENT_CONSTRAINT(0x80, 0x1), |
145 | UNCORE_EVENT_CONSTRAINT(0x83, 0x1), | |
146 | EVENT_CONSTRAINT_END | |
147 | }; | |
148 | ||
149 | static struct intel_uncore_type snb_uncore_cbox = { | |
150 | .name = "cbox", | |
151 | .num_counters = 2, | |
152 | .num_boxes = 4, | |
153 | .perf_ctr_bits = 44, | |
154 | .fixed_ctr_bits = 48, | |
155 | .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, | |
156 | .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, | |
157 | .fixed_ctr = SNB_UNC_FIXED_CTR, | |
158 | .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, | |
159 | .single_fixed = 1, | |
160 | .event_mask = SNB_UNC_RAW_EVENT_MASK, | |
161 | .msr_offset = SNB_UNC_CBO_MSR_OFFSET, | |
92807ffd YZ |
162 | .ops = &snb_uncore_msr_ops, |
163 | .format_group = &snb_uncore_format_group, | |
164 | .event_descs = snb_uncore_events, | |
165 | }; | |
166 | ||
e3a13192 AK |
167 | static struct intel_uncore_type snb_uncore_arb = { |
168 | .name = "arb", | |
169 | .num_counters = 2, | |
170 | .num_boxes = 1, | |
171 | .perf_ctr_bits = 44, | |
172 | .perf_ctr = SNB_UNC_ARB_PER_CTR0, | |
173 | .event_ctl = SNB_UNC_ARB_PERFEVTSEL0, | |
174 | .event_mask = SNB_UNC_RAW_EVENT_MASK, | |
175 | .msr_offset = SNB_UNC_ARB_MSR_OFFSET, | |
176 | .constraints = snb_uncore_arb_constraints, | |
177 | .ops = &snb_uncore_msr_ops, | |
178 | .format_group = &snb_uncore_format_group, | |
179 | }; | |
180 | ||
92807ffd YZ |
181 | static struct intel_uncore_type *snb_msr_uncores[] = { |
182 | &snb_uncore_cbox, | |
e3a13192 | 183 | &snb_uncore_arb, |
92807ffd YZ |
184 | NULL, |
185 | }; | |
186 | ||
187 | void snb_uncore_cpu_init(void) | |
188 | { | |
189 | uncore_msr_uncores = snb_msr_uncores; | |
190 | if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) | |
191 | snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; | |
192 | } | |
193 | ||
46866b59 KL |
194 | static void skl_uncore_msr_init_box(struct intel_uncore_box *box) |
195 | { | |
196 | if (box->pmu->pmu_idx == 0) { | |
197 | wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, | |
198 | SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); | |
199 | } | |
200 | } | |
201 | ||
95f3be79 KL |
202 | static void skl_uncore_msr_enable_box(struct intel_uncore_box *box) |
203 | { | |
204 | wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, | |
205 | SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); | |
206 | } | |
207 | ||
46866b59 KL |
208 | static void skl_uncore_msr_exit_box(struct intel_uncore_box *box) |
209 | { | |
210 | if (box->pmu->pmu_idx == 0) | |
211 | wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0); | |
212 | } | |
213 | ||
214 | static struct intel_uncore_ops skl_uncore_msr_ops = { | |
215 | .init_box = skl_uncore_msr_init_box, | |
95f3be79 | 216 | .enable_box = skl_uncore_msr_enable_box, |
46866b59 KL |
217 | .exit_box = skl_uncore_msr_exit_box, |
218 | .disable_event = snb_uncore_msr_disable_event, | |
219 | .enable_event = snb_uncore_msr_enable_event, | |
220 | .read_counter = uncore_msr_read_counter, | |
221 | }; | |
222 | ||
223 | static struct intel_uncore_type skl_uncore_cbox = { | |
224 | .name = "cbox", | |
225 | .num_counters = 4, | |
226 | .num_boxes = 5, | |
227 | .perf_ctr_bits = 44, | |
228 | .fixed_ctr_bits = 48, | |
229 | .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, | |
230 | .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, | |
231 | .fixed_ctr = SNB_UNC_FIXED_CTR, | |
232 | .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, | |
233 | .single_fixed = 1, | |
234 | .event_mask = SNB_UNC_RAW_EVENT_MASK, | |
235 | .msr_offset = SNB_UNC_CBO_MSR_OFFSET, | |
236 | .ops = &skl_uncore_msr_ops, | |
237 | .format_group = &snb_uncore_format_group, | |
238 | .event_descs = snb_uncore_events, | |
239 | }; | |
240 | ||
241 | static struct intel_uncore_type *skl_msr_uncores[] = { | |
242 | &skl_uncore_cbox, | |
243 | &snb_uncore_arb, | |
244 | NULL, | |
245 | }; | |
246 | ||
247 | void skl_uncore_cpu_init(void) | |
248 | { | |
249 | uncore_msr_uncores = skl_msr_uncores; | |
250 | if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) | |
251 | skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; | |
252 | snb_uncore_arb.ops = &skl_uncore_msr_ops; | |
253 | } | |
254 | ||
92807ffd YZ |
255 | enum { |
256 | SNB_PCI_UNCORE_IMC, | |
257 | }; | |
258 | ||
259 | static struct uncore_event_desc snb_uncore_imc_events[] = { | |
260 | INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"), | |
261 | INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"), | |
262 | INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"), | |
263 | ||
264 | INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"), | |
265 | INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"), | |
266 | INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"), | |
267 | ||
268 | { /* end: all zeroes */ }, | |
269 | }; | |
270 | ||
271 | #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff | |
272 | #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48 | |
273 | ||
274 | /* page size multiple covering all config regs */ | |
275 | #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000 | |
276 | ||
277 | #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1 | |
278 | #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050 | |
279 | #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2 | |
280 | #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054 | |
281 | #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE | |
282 | ||
283 | static struct attribute *snb_uncore_imc_formats_attr[] = { | |
284 | &format_attr_event.attr, | |
285 | NULL, | |
286 | }; | |
287 | ||
288 | static struct attribute_group snb_uncore_imc_format_group = { | |
289 | .name = "format", | |
290 | .attrs = snb_uncore_imc_formats_attr, | |
291 | }; | |
292 | ||
293 | static void snb_uncore_imc_init_box(struct intel_uncore_box *box) | |
294 | { | |
295 | struct pci_dev *pdev = box->pci_dev; | |
296 | int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET; | |
297 | resource_size_t addr; | |
298 | u32 pci_dword; | |
299 | ||
300 | pci_read_config_dword(pdev, where, &pci_dword); | |
301 | addr = pci_dword; | |
302 | ||
303 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | |
304 | pci_read_config_dword(pdev, where + 4, &pci_dword); | |
305 | addr |= ((resource_size_t)pci_dword << 32); | |
306 | #endif | |
307 | ||
308 | addr &= ~(PAGE_SIZE - 1); | |
309 | ||
310 | box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE); | |
311 | box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL; | |
312 | } | |
313 | ||
a46195f1 TG |
314 | static void snb_uncore_imc_exit_box(struct intel_uncore_box *box) |
315 | { | |
316 | iounmap(box->io_addr); | |
317 | } | |
318 | ||
92807ffd YZ |
319 | static void snb_uncore_imc_enable_box(struct intel_uncore_box *box) |
320 | {} | |
321 | ||
322 | static void snb_uncore_imc_disable_box(struct intel_uncore_box *box) | |
323 | {} | |
324 | ||
325 | static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event) | |
326 | {} | |
327 | ||
328 | static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event) | |
329 | {} | |
330 | ||
331 | static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event) | |
332 | { | |
333 | struct hw_perf_event *hwc = &event->hw; | |
334 | ||
335 | return (u64)*(unsigned int *)(box->io_addr + hwc->event_base); | |
336 | } | |
337 | ||
338 | /* | |
339 | * custom event_init() function because we define our own fixed, free | |
340 | * running counters, so we do not want to conflict with generic uncore | |
341 | * logic. Also simplifies processing | |
342 | */ | |
343 | static int snb_uncore_imc_event_init(struct perf_event *event) | |
344 | { | |
345 | struct intel_uncore_pmu *pmu; | |
346 | struct intel_uncore_box *box; | |
347 | struct hw_perf_event *hwc = &event->hw; | |
348 | u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK; | |
349 | int idx, base; | |
350 | ||
351 | if (event->attr.type != event->pmu->type) | |
352 | return -ENOENT; | |
353 | ||
354 | pmu = uncore_event_to_pmu(event); | |
355 | /* no device found for this pmu */ | |
356 | if (pmu->func_id < 0) | |
357 | return -ENOENT; | |
358 | ||
359 | /* Sampling not supported yet */ | |
360 | if (hwc->sample_period) | |
361 | return -EINVAL; | |
362 | ||
363 | /* unsupported modes and filters */ | |
364 | if (event->attr.exclude_user || | |
365 | event->attr.exclude_kernel || | |
366 | event->attr.exclude_hv || | |
367 | event->attr.exclude_idle || | |
368 | event->attr.exclude_host || | |
369 | event->attr.exclude_guest || | |
370 | event->attr.sample_period) /* no sampling */ | |
371 | return -EINVAL; | |
372 | ||
373 | /* | |
374 | * Place all uncore events for a particular physical package | |
375 | * onto a single cpu | |
376 | */ | |
377 | if (event->cpu < 0) | |
378 | return -EINVAL; | |
379 | ||
380 | /* check only supported bits are set */ | |
381 | if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK) | |
382 | return -EINVAL; | |
383 | ||
384 | box = uncore_pmu_to_box(pmu, event->cpu); | |
385 | if (!box || box->cpu < 0) | |
386 | return -EINVAL; | |
387 | ||
388 | event->cpu = box->cpu; | |
1f2569fa | 389 | event->pmu_private = box; |
92807ffd YZ |
390 | |
391 | event->hw.idx = -1; | |
392 | event->hw.last_tag = ~0ULL; | |
393 | event->hw.extra_reg.idx = EXTRA_REG_NONE; | |
394 | event->hw.branch_reg.idx = EXTRA_REG_NONE; | |
395 | /* | |
396 | * check event is known (whitelist, determines counter) | |
397 | */ | |
398 | switch (cfg) { | |
399 | case SNB_UNCORE_PCI_IMC_DATA_READS: | |
400 | base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE; | |
401 | idx = UNCORE_PMC_IDX_FIXED; | |
402 | break; | |
403 | case SNB_UNCORE_PCI_IMC_DATA_WRITES: | |
404 | base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE; | |
405 | idx = UNCORE_PMC_IDX_FIXED + 1; | |
406 | break; | |
407 | default: | |
408 | return -EINVAL; | |
409 | } | |
410 | ||
411 | /* must be done before validate_group */ | |
412 | event->hw.event_base = base; | |
413 | event->hw.config = cfg; | |
414 | event->hw.idx = idx; | |
415 | ||
416 | /* no group validation needed, we have free running counters */ | |
417 | ||
418 | return 0; | |
419 | } | |
420 | ||
421 | static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event) | |
422 | { | |
423 | return 0; | |
424 | } | |
425 | ||
426 | static void snb_uncore_imc_event_start(struct perf_event *event, int flags) | |
427 | { | |
428 | struct intel_uncore_box *box = uncore_event_to_box(event); | |
429 | u64 count; | |
430 | ||
431 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) | |
432 | return; | |
433 | ||
434 | event->hw.state = 0; | |
435 | box->n_active++; | |
436 | ||
437 | list_add_tail(&event->active_entry, &box->active_list); | |
438 | ||
439 | count = snb_uncore_imc_read_counter(box, event); | |
440 | local64_set(&event->hw.prev_count, count); | |
441 | ||
442 | if (box->n_active == 1) | |
443 | uncore_pmu_start_hrtimer(box); | |
444 | } | |
445 | ||
446 | static void snb_uncore_imc_event_stop(struct perf_event *event, int flags) | |
447 | { | |
448 | struct intel_uncore_box *box = uncore_event_to_box(event); | |
449 | struct hw_perf_event *hwc = &event->hw; | |
450 | ||
451 | if (!(hwc->state & PERF_HES_STOPPED)) { | |
452 | box->n_active--; | |
453 | ||
454 | WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); | |
455 | hwc->state |= PERF_HES_STOPPED; | |
456 | ||
457 | list_del(&event->active_entry); | |
458 | ||
459 | if (box->n_active == 0) | |
460 | uncore_pmu_cancel_hrtimer(box); | |
461 | } | |
462 | ||
463 | if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { | |
464 | /* | |
465 | * Drain the remaining delta count out of a event | |
466 | * that we are disabling: | |
467 | */ | |
468 | uncore_perf_event_update(box, event); | |
469 | hwc->state |= PERF_HES_UPTODATE; | |
470 | } | |
471 | } | |
472 | ||
473 | static int snb_uncore_imc_event_add(struct perf_event *event, int flags) | |
474 | { | |
475 | struct intel_uncore_box *box = uncore_event_to_box(event); | |
476 | struct hw_perf_event *hwc = &event->hw; | |
477 | ||
478 | if (!box) | |
479 | return -ENODEV; | |
480 | ||
481 | hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; | |
482 | if (!(flags & PERF_EF_START)) | |
483 | hwc->state |= PERF_HES_ARCH; | |
484 | ||
485 | snb_uncore_imc_event_start(event, 0); | |
486 | ||
487 | box->n_events++; | |
488 | ||
489 | return 0; | |
490 | } | |
491 | ||
492 | static void snb_uncore_imc_event_del(struct perf_event *event, int flags) | |
493 | { | |
494 | struct intel_uncore_box *box = uncore_event_to_box(event); | |
495 | int i; | |
496 | ||
497 | snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); | |
498 | ||
499 | for (i = 0; i < box->n_events; i++) { | |
500 | if (event == box->event_list[i]) { | |
501 | --box->n_events; | |
502 | break; | |
503 | } | |
504 | } | |
505 | } | |
506 | ||
77af0037 | 507 | int snb_pci2phy_map_init(int devid) |
92807ffd YZ |
508 | { |
509 | struct pci_dev *dev = NULL; | |
712df65c TI |
510 | struct pci2phy_map *map; |
511 | int bus, segment; | |
92807ffd YZ |
512 | |
513 | dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev); | |
514 | if (!dev) | |
515 | return -ENOTTY; | |
516 | ||
517 | bus = dev->bus->number; | |
712df65c TI |
518 | segment = pci_domain_nr(dev->bus); |
519 | ||
520 | raw_spin_lock(&pci2phy_map_lock); | |
521 | map = __find_pci2phy_map(segment); | |
522 | if (!map) { | |
523 | raw_spin_unlock(&pci2phy_map_lock); | |
524 | pci_dev_put(dev); | |
525 | return -ENOMEM; | |
526 | } | |
527 | map->pbus_to_physid[bus] = 0; | |
528 | raw_spin_unlock(&pci2phy_map_lock); | |
92807ffd YZ |
529 | |
530 | pci_dev_put(dev); | |
531 | ||
532 | return 0; | |
533 | } | |
534 | ||
535 | static struct pmu snb_uncore_imc_pmu = { | |
536 | .task_ctx_nr = perf_invalid_context, | |
537 | .event_init = snb_uncore_imc_event_init, | |
538 | .add = snb_uncore_imc_event_add, | |
539 | .del = snb_uncore_imc_event_del, | |
540 | .start = snb_uncore_imc_event_start, | |
541 | .stop = snb_uncore_imc_event_stop, | |
542 | .read = uncore_pmu_event_read, | |
543 | }; | |
544 | ||
545 | static struct intel_uncore_ops snb_uncore_imc_ops = { | |
546 | .init_box = snb_uncore_imc_init_box, | |
a46195f1 | 547 | .exit_box = snb_uncore_imc_exit_box, |
92807ffd YZ |
548 | .enable_box = snb_uncore_imc_enable_box, |
549 | .disable_box = snb_uncore_imc_disable_box, | |
550 | .disable_event = snb_uncore_imc_disable_event, | |
551 | .enable_event = snb_uncore_imc_enable_event, | |
552 | .hw_config = snb_uncore_imc_hw_config, | |
553 | .read_counter = snb_uncore_imc_read_counter, | |
554 | }; | |
555 | ||
556 | static struct intel_uncore_type snb_uncore_imc = { | |
557 | .name = "imc", | |
558 | .num_counters = 2, | |
559 | .num_boxes = 1, | |
560 | .fixed_ctr_bits = 32, | |
561 | .fixed_ctr = SNB_UNCORE_PCI_IMC_CTR_BASE, | |
562 | .event_descs = snb_uncore_imc_events, | |
563 | .format_group = &snb_uncore_imc_format_group, | |
564 | .perf_ctr = SNB_UNCORE_PCI_IMC_DATA_READS_BASE, | |
565 | .event_mask = SNB_UNCORE_PCI_IMC_EVENT_MASK, | |
566 | .ops = &snb_uncore_imc_ops, | |
567 | .pmu = &snb_uncore_imc_pmu, | |
568 | }; | |
569 | ||
570 | static struct intel_uncore_type *snb_pci_uncores[] = { | |
571 | [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc, | |
572 | NULL, | |
573 | }; | |
574 | ||
83bc90e1 | 575 | static const struct pci_device_id snb_uncore_pci_ids[] = { |
92807ffd YZ |
576 | { /* IMC */ |
577 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC), | |
578 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
579 | }, | |
580 | { /* end: all zeroes */ }, | |
581 | }; | |
582 | ||
83bc90e1 | 583 | static const struct pci_device_id ivb_uncore_pci_ids[] = { |
92807ffd YZ |
584 | { /* IMC */ |
585 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC), | |
586 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
587 | }, | |
521e8bac SE |
588 | { /* IMC */ |
589 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC), | |
590 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
591 | }, | |
92807ffd YZ |
592 | { /* end: all zeroes */ }, |
593 | }; | |
594 | ||
83bc90e1 | 595 | static const struct pci_device_id hsw_uncore_pci_ids[] = { |
92807ffd YZ |
596 | { /* IMC */ |
597 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC), | |
598 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
599 | }, | |
80bcffb3 SR |
600 | { /* IMC */ |
601 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC), | |
602 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
603 | }, | |
92807ffd YZ |
604 | { /* end: all zeroes */ }, |
605 | }; | |
606 | ||
a41f3c8c SE |
607 | static const struct pci_device_id bdw_uncore_pci_ids[] = { |
608 | { /* IMC */ | |
609 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC), | |
610 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
611 | }, | |
612 | { /* end: all zeroes */ }, | |
613 | }; | |
614 | ||
0e1eb0a1 SE |
615 | static const struct pci_device_id skl_uncore_pci_ids[] = { |
616 | { /* IMC */ | |
617 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC), | |
618 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
619 | }, | |
46866b59 KL |
620 | { /* IMC */ |
621 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC), | |
622 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
623 | }, | |
624 | ||
0e1eb0a1 SE |
625 | { /* end: all zeroes */ }, |
626 | }; | |
627 | ||
92807ffd YZ |
628 | static struct pci_driver snb_uncore_pci_driver = { |
629 | .name = "snb_uncore", | |
630 | .id_table = snb_uncore_pci_ids, | |
631 | }; | |
632 | ||
633 | static struct pci_driver ivb_uncore_pci_driver = { | |
634 | .name = "ivb_uncore", | |
635 | .id_table = ivb_uncore_pci_ids, | |
636 | }; | |
637 | ||
638 | static struct pci_driver hsw_uncore_pci_driver = { | |
639 | .name = "hsw_uncore", | |
640 | .id_table = hsw_uncore_pci_ids, | |
641 | }; | |
642 | ||
a41f3c8c SE |
643 | static struct pci_driver bdw_uncore_pci_driver = { |
644 | .name = "bdw_uncore", | |
645 | .id_table = bdw_uncore_pci_ids, | |
646 | }; | |
647 | ||
0e1eb0a1 SE |
648 | static struct pci_driver skl_uncore_pci_driver = { |
649 | .name = "skl_uncore", | |
650 | .id_table = skl_uncore_pci_ids, | |
651 | }; | |
652 | ||
521e8bac SE |
653 | struct imc_uncore_pci_dev { |
654 | __u32 pci_id; | |
655 | struct pci_driver *driver; | |
656 | }; | |
657 | #define IMC_DEV(a, d) \ | |
658 | { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) } | |
659 | ||
660 | static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { | |
661 | IMC_DEV(SNB_IMC, &snb_uncore_pci_driver), | |
662 | IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */ | |
663 | IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */ | |
664 | IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ | |
80bcffb3 | 665 | IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ |
a41f3c8c | 666 | IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */ |
0e1eb0a1 | 667 | IMC_DEV(SKL_IMC, &skl_uncore_pci_driver), /* 6th Gen Core */ |
46866b59 | 668 | IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */ |
521e8bac SE |
669 | { /* end marker */ } |
670 | }; | |
671 | ||
672 | ||
673 | #define for_each_imc_pci_id(x, t) \ | |
674 | for (x = (t); (x)->pci_id; x++) | |
675 | ||
676 | static struct pci_driver *imc_uncore_find_dev(void) | |
92807ffd | 677 | { |
521e8bac SE |
678 | const struct imc_uncore_pci_dev *p; |
679 | int ret; | |
680 | ||
681 | for_each_imc_pci_id(p, desktop_imc_pci_ids) { | |
682 | ret = snb_pci2phy_map_init(p->pci_id); | |
683 | if (ret == 0) | |
684 | return p->driver; | |
685 | } | |
686 | return NULL; | |
92807ffd YZ |
687 | } |
688 | ||
521e8bac | 689 | static int imc_uncore_pci_init(void) |
92807ffd | 690 | { |
521e8bac SE |
691 | struct pci_driver *imc_drv = imc_uncore_find_dev(); |
692 | ||
693 | if (!imc_drv) | |
694 | return -ENODEV; | |
695 | ||
92807ffd | 696 | uncore_pci_uncores = snb_pci_uncores; |
521e8bac SE |
697 | uncore_pci_driver = imc_drv; |
698 | ||
92807ffd YZ |
699 | return 0; |
700 | } | |
701 | ||
521e8bac SE |
702 | int snb_uncore_pci_init(void) |
703 | { | |
704 | return imc_uncore_pci_init(); | |
705 | } | |
706 | ||
707 | int ivb_uncore_pci_init(void) | |
708 | { | |
709 | return imc_uncore_pci_init(); | |
710 | } | |
92807ffd YZ |
711 | int hsw_uncore_pci_init(void) |
712 | { | |
521e8bac | 713 | return imc_uncore_pci_init(); |
92807ffd YZ |
714 | } |
715 | ||
a41f3c8c SE |
716 | int bdw_uncore_pci_init(void) |
717 | { | |
718 | return imc_uncore_pci_init(); | |
719 | } | |
720 | ||
0e1eb0a1 SE |
721 | int skl_uncore_pci_init(void) |
722 | { | |
723 | return imc_uncore_pci_init(); | |
724 | } | |
725 | ||
92807ffd YZ |
726 | /* end of Sandy Bridge uncore support */ |
727 | ||
728 | /* Nehalem uncore support */ | |
729 | static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) | |
730 | { | |
731 | wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0); | |
732 | } | |
733 | ||
734 | static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) | |
735 | { | |
736 | wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); | |
737 | } | |
738 | ||
739 | static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | |
740 | { | |
741 | struct hw_perf_event *hwc = &event->hw; | |
742 | ||
743 | if (hwc->idx < UNCORE_PMC_IDX_FIXED) | |
744 | wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); | |
745 | else | |
746 | wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); | |
747 | } | |
748 | ||
749 | static struct attribute *nhm_uncore_formats_attr[] = { | |
750 | &format_attr_event.attr, | |
751 | &format_attr_umask.attr, | |
752 | &format_attr_edge.attr, | |
753 | &format_attr_inv.attr, | |
754 | &format_attr_cmask8.attr, | |
755 | NULL, | |
756 | }; | |
757 | ||
758 | static struct attribute_group nhm_uncore_format_group = { | |
759 | .name = "format", | |
760 | .attrs = nhm_uncore_formats_attr, | |
761 | }; | |
762 | ||
763 | static struct uncore_event_desc nhm_uncore_events[] = { | |
764 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), | |
765 | INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"), | |
766 | INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"), | |
767 | INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"), | |
768 | INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"), | |
769 | INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"), | |
770 | INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"), | |
771 | INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"), | |
772 | INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"), | |
773 | { /* end: all zeroes */ }, | |
774 | }; | |
775 | ||
776 | static struct intel_uncore_ops nhm_uncore_msr_ops = { | |
777 | .disable_box = nhm_uncore_msr_disable_box, | |
778 | .enable_box = nhm_uncore_msr_enable_box, | |
779 | .disable_event = snb_uncore_msr_disable_event, | |
780 | .enable_event = nhm_uncore_msr_enable_event, | |
781 | .read_counter = uncore_msr_read_counter, | |
782 | }; | |
783 | ||
784 | static struct intel_uncore_type nhm_uncore = { | |
785 | .name = "", | |
786 | .num_counters = 8, | |
787 | .num_boxes = 1, | |
788 | .perf_ctr_bits = 48, | |
789 | .fixed_ctr_bits = 48, | |
790 | .event_ctl = NHM_UNC_PERFEVTSEL0, | |
791 | .perf_ctr = NHM_UNC_UNCORE_PMC0, | |
792 | .fixed_ctr = NHM_UNC_FIXED_CTR, | |
793 | .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL, | |
794 | .event_mask = NHM_UNC_RAW_EVENT_MASK, | |
795 | .event_descs = nhm_uncore_events, | |
796 | .ops = &nhm_uncore_msr_ops, | |
797 | .format_group = &nhm_uncore_format_group, | |
798 | }; | |
799 | ||
800 | static struct intel_uncore_type *nhm_msr_uncores[] = { | |
801 | &nhm_uncore, | |
802 | NULL, | |
803 | }; | |
804 | ||
805 | void nhm_uncore_cpu_init(void) | |
806 | { | |
807 | uncore_msr_uncores = nhm_msr_uncores; | |
808 | } | |
809 | ||
810 | /* end of Nehalem uncore support */ |