]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - arch/x86/events/intel/uncore_snbep.c
UBUNTU: Ubuntu-5.4.0-117.132
[mirror_ubuntu-focal-kernel.git] / arch / x86 / events / intel / uncore_snbep.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include "uncore.h"
4
5 /* SNB-EP pci bus to socket mapping */
6 #define SNBEP_CPUNODEID 0x40
7 #define SNBEP_GIDNIDMAP 0x54
8
9 /* SNB-EP Box level control */
10 #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
11 #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
12 #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
13 #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
14 #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
15 SNBEP_PMON_BOX_CTL_RST_CTRS | \
16 SNBEP_PMON_BOX_CTL_FRZ_EN)
17 /* SNB-EP event control */
18 #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
19 #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
20 #define SNBEP_PMON_CTL_RST (1 << 17)
21 #define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
22 #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
23 #define SNBEP_PMON_CTL_EN (1 << 22)
24 #define SNBEP_PMON_CTL_INVERT (1 << 23)
25 #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
26 #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
27 SNBEP_PMON_CTL_UMASK_MASK | \
28 SNBEP_PMON_CTL_EDGE_DET | \
29 SNBEP_PMON_CTL_INVERT | \
30 SNBEP_PMON_CTL_TRESH_MASK)
31
32 /* SNB-EP Ubox event control */
33 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
34 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
35 (SNBEP_PMON_CTL_EV_SEL_MASK | \
36 SNBEP_PMON_CTL_UMASK_MASK | \
37 SNBEP_PMON_CTL_EDGE_DET | \
38 SNBEP_PMON_CTL_INVERT | \
39 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
40
41 #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
42 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
43 SNBEP_CBO_PMON_CTL_TID_EN)
44
45 /* SNB-EP PCU event control */
46 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
47 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
50 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
51 (SNBEP_PMON_CTL_EV_SEL_MASK | \
52 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
53 SNBEP_PMON_CTL_EDGE_DET | \
54 SNBEP_PMON_CTL_INVERT | \
55 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
56 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
57 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
58
59 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
60 (SNBEP_PMON_RAW_EVENT_MASK | \
61 SNBEP_PMON_CTL_EV_SEL_EXT)
62
63 /* SNB-EP pci control register */
64 #define SNBEP_PCI_PMON_BOX_CTL 0xf4
65 #define SNBEP_PCI_PMON_CTL0 0xd8
66 /* SNB-EP pci counter register */
67 #define SNBEP_PCI_PMON_CTR0 0xa0
68
69 /* SNB-EP home agent register */
70 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
72 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
73 /* SNB-EP memory controller register */
74 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
76 /* SNB-EP QPI register */
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
81
82 /* SNB-EP Ubox register */
83 #define SNBEP_U_MSR_PMON_CTR0 0xc16
84 #define SNBEP_U_MSR_PMON_CTL0 0xc10
85
86 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
88
89 /* SNB-EP Cbo register */
90 #define SNBEP_C0_MSR_PMON_CTR0 0xd16
91 #define SNBEP_C0_MSR_PMON_CTL0 0xd10
92 #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
93 #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
94 #define SNBEP_CBO_MSR_OFFSET 0x20
95
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
100
101 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
102 .event = (e), \
103 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
104 .config_mask = (m), \
105 .idx = (i) \
106 }
107
108 /* SNB-EP PCU register */
109 #define SNBEP_PCU_MSR_PMON_CTR0 0xc36
110 #define SNBEP_PCU_MSR_PMON_CTL0 0xc30
111 #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
112 #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
114 #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
115 #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
116
117 /* IVBEP event control */
118 #define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
119 SNBEP_PMON_BOX_CTL_RST_CTRS)
120 #define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
121 SNBEP_PMON_CTL_UMASK_MASK | \
122 SNBEP_PMON_CTL_EDGE_DET | \
123 SNBEP_PMON_CTL_TRESH_MASK)
124 /* IVBEP Ubox */
125 #define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00
126 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
127 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
128
129 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
130 (SNBEP_PMON_CTL_EV_SEL_MASK | \
131 SNBEP_PMON_CTL_UMASK_MASK | \
132 SNBEP_PMON_CTL_EDGE_DET | \
133 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
134 /* IVBEP Cbo */
135 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \
136 SNBEP_CBO_PMON_CTL_TID_EN)
137
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
146
147 /* IVBEP home agent */
148 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
149 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \
150 (IVBEP_PMON_RAW_EVENT_MASK | \
151 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
152 /* IVBEP PCU */
153 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
154 (SNBEP_PMON_CTL_EV_SEL_MASK | \
155 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
156 SNBEP_PMON_CTL_EDGE_DET | \
157 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
158 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
159 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
160 /* IVBEP QPI */
161 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
162 (IVBEP_PMON_RAW_EVENT_MASK | \
163 SNBEP_PMON_CTL_EV_SEL_EXT)
164
165 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
166 ((1ULL << (n)) - 1)))
167
168 /* Haswell-EP Ubox */
169 #define HSWEP_U_MSR_PMON_CTR0 0x709
170 #define HSWEP_U_MSR_PMON_CTL0 0x705
171 #define HSWEP_U_MSR_PMON_FILTER 0x707
172
173 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704
175
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0)
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
179 (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
180 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
181
182 /* Haswell-EP CBo */
183 #define HSWEP_C0_MSR_PMON_CTR0 0xe08
184 #define HSWEP_C0_MSR_PMON_CTL0 0xe01
185 #define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00
186 #define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05
187 #define HSWEP_CBO_MSR_OFFSET 0x10
188
189
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
198
199
200 /* Haswell-EP Sbox */
201 #define HSWEP_S0_MSR_PMON_CTR0 0x726
202 #define HSWEP_S0_MSR_PMON_CTL0 0x721
203 #define HSWEP_S0_MSR_PMON_BOX_CTL 0x720
204 #define HSWEP_SBOX_MSR_OFFSET 0xa
205 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
206 SNBEP_CBO_PMON_CTL_TID_EN)
207
208 /* Haswell-EP PCU */
209 #define HSWEP_PCU_MSR_PMON_CTR0 0x717
210 #define HSWEP_PCU_MSR_PMON_CTL0 0x711
211 #define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710
212 #define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715
213
214 /* KNL Ubox */
215 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
216 (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
217 SNBEP_CBO_PMON_CTL_TID_EN)
218 /* KNL CHA */
219 #define KNL_CHA_MSR_OFFSET 0xc
220 #define KNL_CHA_MSR_PMON_CTL_QOR (1 << 16)
221 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
222 (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
223 KNL_CHA_MSR_PMON_CTL_QOR)
224 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18)
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC (0x1ULL << 37)
230
231 /* KNL EDC/MC UCLK */
232 #define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400
233 #define KNL_UCLK_MSR_PMON_CTL0 0x420
234 #define KNL_UCLK_MSR_PMON_BOX_CTL 0x430
235 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW 0x44c
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL 0x454
237 #define KNL_PMON_FIXED_CTL_EN 0x1
238
239 /* KNL EDC */
240 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW 0xa00
241 #define KNL_EDC0_ECLK_MSR_PMON_CTL0 0xa20
242 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL 0xa30
243 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW 0xa3c
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL 0xa44
245
246 /* KNL MC */
247 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW 0xb00
248 #define KNL_MC0_CH0_MSR_PMON_CTL0 0xb20
249 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL 0xb30
250 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW 0xb3c
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL 0xb44
252
253 /* KNL IRP */
254 #define KNL_IRP_PCI_PMON_BOX_CTL 0xf0
255 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
256 KNL_CHA_MSR_PMON_CTL_QOR)
257 /* KNL PCU */
258 #define KNL_PCU_PMON_CTL_EV_SEL_MASK 0x0000007f
259 #define KNL_PCU_PMON_CTL_USE_OCC_CTR (1 << 7)
260 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK 0x3f000000
261 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
262 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
263 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
264 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
265 SNBEP_PMON_CTL_EDGE_DET | \
266 SNBEP_CBO_PMON_CTL_TID_EN | \
267 SNBEP_PMON_CTL_INVERT | \
268 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
269 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
270 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
271
272 /* SKX pci bus to socket mapping */
273 #define SKX_CPUNODEID 0xc0
274 #define SKX_GIDNIDMAP 0xd4
275
276 /* SKX CHA */
277 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0)
278 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9)
279 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17)
280 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32)
281 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33)
282 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35)
283 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36)
284 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37)
285 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41)
286 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51)
287 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
288 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
289 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
290
291 /* SKX IIO */
292 #define SKX_IIO0_MSR_PMON_CTL0 0xa48
293 #define SKX_IIO0_MSR_PMON_CTR0 0xa41
294 #define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40
295 #define SKX_IIO_MSR_OFFSET 0x20
296
297 #define SKX_PMON_CTL_TRESH_MASK (0xff << 24)
298 #define SKX_PMON_CTL_TRESH_MASK_EXT (0xf)
299 #define SKX_PMON_CTL_CH_MASK (0xff << 4)
300 #define SKX_PMON_CTL_FC_MASK (0x7 << 12)
301 #define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
302 SNBEP_PMON_CTL_UMASK_MASK | \
303 SNBEP_PMON_CTL_EDGE_DET | \
304 SNBEP_PMON_CTL_INVERT | \
305 SKX_PMON_CTL_TRESH_MASK)
306 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
307 SKX_PMON_CTL_CH_MASK | \
308 SKX_PMON_CTL_FC_MASK)
309
310 /* SKX IRP */
311 #define SKX_IRP0_MSR_PMON_CTL0 0xa5b
312 #define SKX_IRP0_MSR_PMON_CTR0 0xa59
313 #define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58
314 #define SKX_IRP_MSR_OFFSET 0x20
315
316 /* SKX UPI */
317 #define SKX_UPI_PCI_PMON_CTL0 0x350
318 #define SKX_UPI_PCI_PMON_CTR0 0x318
319 #define SKX_UPI_PCI_PMON_BOX_CTL 0x378
320 #define SKX_UPI_CTL_UMASK_EXT 0xffefff
321
322 /* SKX M2M */
323 #define SKX_M2M_PCI_PMON_CTL0 0x228
324 #define SKX_M2M_PCI_PMON_CTR0 0x200
325 #define SKX_M2M_PCI_PMON_BOX_CTL 0x258
326
327 /* SNR Ubox */
328 #define SNR_U_MSR_PMON_CTR0 0x1f98
329 #define SNR_U_MSR_PMON_CTL0 0x1f91
330 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL 0x1f93
331 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR 0x1f94
332
333 /* SNR CHA */
334 #define SNR_CHA_RAW_EVENT_MASK_EXT 0x3ffffff
335 #define SNR_CHA_MSR_PMON_CTL0 0x1c01
336 #define SNR_CHA_MSR_PMON_CTR0 0x1c08
337 #define SNR_CHA_MSR_PMON_BOX_CTL 0x1c00
338 #define SNR_C0_MSR_PMON_BOX_FILTER0 0x1c05
339
340
341 /* SNR IIO */
342 #define SNR_IIO_MSR_PMON_CTL0 0x1e08
343 #define SNR_IIO_MSR_PMON_CTR0 0x1e01
344 #define SNR_IIO_MSR_PMON_BOX_CTL 0x1e00
345 #define SNR_IIO_MSR_OFFSET 0x10
346 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT 0x7ffff
347
348 /* SNR IRP */
349 #define SNR_IRP0_MSR_PMON_CTL0 0x1ea8
350 #define SNR_IRP0_MSR_PMON_CTR0 0x1ea1
351 #define SNR_IRP0_MSR_PMON_BOX_CTL 0x1ea0
352 #define SNR_IRP_MSR_OFFSET 0x10
353
354 /* SNR M2PCIE */
355 #define SNR_M2PCIE_MSR_PMON_CTL0 0x1e58
356 #define SNR_M2PCIE_MSR_PMON_CTR0 0x1e51
357 #define SNR_M2PCIE_MSR_PMON_BOX_CTL 0x1e50
358 #define SNR_M2PCIE_MSR_OFFSET 0x10
359
360 /* SNR PCU */
361 #define SNR_PCU_MSR_PMON_CTL0 0x1ef1
362 #define SNR_PCU_MSR_PMON_CTR0 0x1ef8
363 #define SNR_PCU_MSR_PMON_BOX_CTL 0x1ef0
364 #define SNR_PCU_MSR_PMON_BOX_FILTER 0x1efc
365
366 /* SNR M2M */
367 #define SNR_M2M_PCI_PMON_CTL0 0x468
368 #define SNR_M2M_PCI_PMON_CTR0 0x440
369 #define SNR_M2M_PCI_PMON_BOX_CTL 0x438
370 #define SNR_M2M_PCI_PMON_UMASK_EXT 0xff
371
372 /* SNR IMC */
373 #define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54
374 #define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38
375 #define SNR_IMC_MMIO_PMON_CTL0 0x40
376 #define SNR_IMC_MMIO_PMON_CTR0 0x8
377 #define SNR_IMC_MMIO_PMON_BOX_CTL 0x22800
378 #define SNR_IMC_MMIO_OFFSET 0x4000
379 #define SNR_IMC_MMIO_SIZE 0x4000
380 #define SNR_IMC_MMIO_BASE_OFFSET 0xd0
381 #define SNR_IMC_MMIO_BASE_MASK 0x1FFFFFFF
382 #define SNR_IMC_MMIO_MEM0_OFFSET 0xd8
383 #define SNR_IMC_MMIO_MEM0_MASK 0x7FF
384
385 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
386 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
387 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
388 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
389 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
390 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
391 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
392 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
393 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
394 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
395 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
396 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
397 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
398 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
399 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
400 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
401 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
402 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
403 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
404 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
405 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
406 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
407 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
408 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
409 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
410 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
411 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
412 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
413 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
414 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
415 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
416 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
417 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
418 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
419 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
420 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
421 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
422 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
423 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
424 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
425 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
426 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
427 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
428 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
429 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
430 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
431 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
432 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
433 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
434 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
435 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
436 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
437 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
438 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
439 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
440 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
441 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
442 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
443 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
444 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
445 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
446 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
447 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
448 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
449 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
450 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
451 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
452 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
453 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
454 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
455 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
456 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
457 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
458 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
459 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
460 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
461 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
462
463 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
464 {
465 struct pci_dev *pdev = box->pci_dev;
466 int box_ctl = uncore_pci_box_ctl(box);
467 u32 config = 0;
468
469 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
470 config |= SNBEP_PMON_BOX_CTL_FRZ;
471 pci_write_config_dword(pdev, box_ctl, config);
472 }
473 }
474
475 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
476 {
477 struct pci_dev *pdev = box->pci_dev;
478 int box_ctl = uncore_pci_box_ctl(box);
479 u32 config = 0;
480
481 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
482 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
483 pci_write_config_dword(pdev, box_ctl, config);
484 }
485 }
486
487 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
488 {
489 struct pci_dev *pdev = box->pci_dev;
490 struct hw_perf_event *hwc = &event->hw;
491
492 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
493 }
494
495 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
496 {
497 struct pci_dev *pdev = box->pci_dev;
498 struct hw_perf_event *hwc = &event->hw;
499
500 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
501 }
502
503 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
504 {
505 struct pci_dev *pdev = box->pci_dev;
506 struct hw_perf_event *hwc = &event->hw;
507 u64 count = 0;
508
509 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
510 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
511
512 return count;
513 }
514
515 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
516 {
517 struct pci_dev *pdev = box->pci_dev;
518 int box_ctl = uncore_pci_box_ctl(box);
519
520 pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
521 }
522
523 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
524 {
525 u64 config;
526 unsigned msr;
527
528 msr = uncore_msr_box_ctl(box);
529 if (msr) {
530 rdmsrl(msr, config);
531 config |= SNBEP_PMON_BOX_CTL_FRZ;
532 wrmsrl(msr, config);
533 }
534 }
535
536 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
537 {
538 u64 config;
539 unsigned msr;
540
541 msr = uncore_msr_box_ctl(box);
542 if (msr) {
543 rdmsrl(msr, config);
544 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
545 wrmsrl(msr, config);
546 }
547 }
548
549 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
550 {
551 struct hw_perf_event *hwc = &event->hw;
552 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
553
554 if (reg1->idx != EXTRA_REG_NONE)
555 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
556
557 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
558 }
559
560 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
561 struct perf_event *event)
562 {
563 struct hw_perf_event *hwc = &event->hw;
564
565 wrmsrl(hwc->config_base, hwc->config);
566 }
567
568 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
569 {
570 unsigned msr = uncore_msr_box_ctl(box);
571
572 if (msr)
573 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
574 }
575
576 static struct attribute *snbep_uncore_formats_attr[] = {
577 &format_attr_event.attr,
578 &format_attr_umask.attr,
579 &format_attr_edge.attr,
580 &format_attr_inv.attr,
581 &format_attr_thresh8.attr,
582 NULL,
583 };
584
585 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
586 &format_attr_event.attr,
587 &format_attr_umask.attr,
588 &format_attr_edge.attr,
589 &format_attr_inv.attr,
590 &format_attr_thresh5.attr,
591 NULL,
592 };
593
594 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
595 &format_attr_event.attr,
596 &format_attr_umask.attr,
597 &format_attr_edge.attr,
598 &format_attr_tid_en.attr,
599 &format_attr_inv.attr,
600 &format_attr_thresh8.attr,
601 &format_attr_filter_tid.attr,
602 &format_attr_filter_nid.attr,
603 &format_attr_filter_state.attr,
604 &format_attr_filter_opc.attr,
605 NULL,
606 };
607
608 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
609 &format_attr_event.attr,
610 &format_attr_occ_sel.attr,
611 &format_attr_edge.attr,
612 &format_attr_inv.attr,
613 &format_attr_thresh5.attr,
614 &format_attr_occ_invert.attr,
615 &format_attr_occ_edge.attr,
616 &format_attr_filter_band0.attr,
617 &format_attr_filter_band1.attr,
618 &format_attr_filter_band2.attr,
619 &format_attr_filter_band3.attr,
620 NULL,
621 };
622
623 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
624 &format_attr_event_ext.attr,
625 &format_attr_umask.attr,
626 &format_attr_edge.attr,
627 &format_attr_inv.attr,
628 &format_attr_thresh8.attr,
629 &format_attr_match_rds.attr,
630 &format_attr_match_rnid30.attr,
631 &format_attr_match_rnid4.attr,
632 &format_attr_match_dnid.attr,
633 &format_attr_match_mc.attr,
634 &format_attr_match_opc.attr,
635 &format_attr_match_vnw.attr,
636 &format_attr_match0.attr,
637 &format_attr_match1.attr,
638 &format_attr_mask_rds.attr,
639 &format_attr_mask_rnid30.attr,
640 &format_attr_mask_rnid4.attr,
641 &format_attr_mask_dnid.attr,
642 &format_attr_mask_mc.attr,
643 &format_attr_mask_opc.attr,
644 &format_attr_mask_vnw.attr,
645 &format_attr_mask0.attr,
646 &format_attr_mask1.attr,
647 NULL,
648 };
649
650 static struct uncore_event_desc snbep_uncore_imc_events[] = {
651 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
652 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
653 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
654 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
655 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
656 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
657 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
658 { /* end: all zeroes */ },
659 };
660
661 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
662 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
663 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
664 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
665 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
666 { /* end: all zeroes */ },
667 };
668
669 static const struct attribute_group snbep_uncore_format_group = {
670 .name = "format",
671 .attrs = snbep_uncore_formats_attr,
672 };
673
674 static const struct attribute_group snbep_uncore_ubox_format_group = {
675 .name = "format",
676 .attrs = snbep_uncore_ubox_formats_attr,
677 };
678
679 static const struct attribute_group snbep_uncore_cbox_format_group = {
680 .name = "format",
681 .attrs = snbep_uncore_cbox_formats_attr,
682 };
683
684 static const struct attribute_group snbep_uncore_pcu_format_group = {
685 .name = "format",
686 .attrs = snbep_uncore_pcu_formats_attr,
687 };
688
689 static const struct attribute_group snbep_uncore_qpi_format_group = {
690 .name = "format",
691 .attrs = snbep_uncore_qpi_formats_attr,
692 };
693
694 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
695 .disable_box = snbep_uncore_msr_disable_box, \
696 .enable_box = snbep_uncore_msr_enable_box, \
697 .disable_event = snbep_uncore_msr_disable_event, \
698 .enable_event = snbep_uncore_msr_enable_event, \
699 .read_counter = uncore_msr_read_counter
700
701 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
702 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
703 .init_box = snbep_uncore_msr_init_box \
704
705 static struct intel_uncore_ops snbep_uncore_msr_ops = {
706 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
707 };
708
709 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
710 .init_box = snbep_uncore_pci_init_box, \
711 .disable_box = snbep_uncore_pci_disable_box, \
712 .enable_box = snbep_uncore_pci_enable_box, \
713 .disable_event = snbep_uncore_pci_disable_event, \
714 .read_counter = snbep_uncore_pci_read_counter
715
716 static struct intel_uncore_ops snbep_uncore_pci_ops = {
717 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
718 .enable_event = snbep_uncore_pci_enable_event, \
719 };
720
721 static struct event_constraint snbep_uncore_cbox_constraints[] = {
722 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
723 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
724 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
725 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
726 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
727 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
728 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
729 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
730 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
731 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
732 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
733 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
734 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
735 UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
736 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
737 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
738 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
739 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
740 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
741 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
742 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
743 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
744 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
745 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
746 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
747 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
748 EVENT_CONSTRAINT_END
749 };
750
751 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
752 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
753 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
754 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
755 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
756 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
757 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
758 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
759 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
760 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
761 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
762 EVENT_CONSTRAINT_END
763 };
764
765 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
766 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
767 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
768 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
769 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
770 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
771 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
772 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
773 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
774 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
775 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
776 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
777 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
778 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
779 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
780 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
781 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
782 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
783 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
784 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
785 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
786 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
787 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
788 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
789 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
790 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
791 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
792 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
793 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
794 EVENT_CONSTRAINT_END
795 };
796
797 static struct intel_uncore_type snbep_uncore_ubox = {
798 .name = "ubox",
799 .num_counters = 2,
800 .num_boxes = 1,
801 .perf_ctr_bits = 44,
802 .fixed_ctr_bits = 48,
803 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
804 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
805 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
806 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
807 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
808 .ops = &snbep_uncore_msr_ops,
809 .format_group = &snbep_uncore_ubox_format_group,
810 };
811
812 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
813 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
814 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
815 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
816 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
817 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
818 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
819 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
820 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
821 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
822 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
823 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
824 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
825 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
826 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
827 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
828 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
829 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
830 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
831 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
832 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
833 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
834 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
835 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
836 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
837 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
838 EVENT_EXTRA_END
839 };
840
841 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
842 {
843 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
844 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
845 int i;
846
847 if (uncore_box_is_fake(box))
848 return;
849
850 for (i = 0; i < 5; i++) {
851 if (reg1->alloc & (0x1 << i))
852 atomic_sub(1 << (i * 6), &er->ref);
853 }
854 reg1->alloc = 0;
855 }
856
857 static struct event_constraint *
858 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
859 u64 (*cbox_filter_mask)(int fields))
860 {
861 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
862 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
863 int i, alloc = 0;
864 unsigned long flags;
865 u64 mask;
866
867 if (reg1->idx == EXTRA_REG_NONE)
868 return NULL;
869
870 raw_spin_lock_irqsave(&er->lock, flags);
871 for (i = 0; i < 5; i++) {
872 if (!(reg1->idx & (0x1 << i)))
873 continue;
874 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
875 continue;
876
877 mask = cbox_filter_mask(0x1 << i);
878 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
879 !((reg1->config ^ er->config) & mask)) {
880 atomic_add(1 << (i * 6), &er->ref);
881 er->config &= ~mask;
882 er->config |= reg1->config & mask;
883 alloc |= (0x1 << i);
884 } else {
885 break;
886 }
887 }
888 raw_spin_unlock_irqrestore(&er->lock, flags);
889 if (i < 5)
890 goto fail;
891
892 if (!uncore_box_is_fake(box))
893 reg1->alloc |= alloc;
894
895 return NULL;
896 fail:
897 for (; i >= 0; i--) {
898 if (alloc & (0x1 << i))
899 atomic_sub(1 << (i * 6), &er->ref);
900 }
901 return &uncore_constraint_empty;
902 }
903
904 static u64 snbep_cbox_filter_mask(int fields)
905 {
906 u64 mask = 0;
907
908 if (fields & 0x1)
909 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
910 if (fields & 0x2)
911 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
912 if (fields & 0x4)
913 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
914 if (fields & 0x8)
915 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
916
917 return mask;
918 }
919
920 static struct event_constraint *
921 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
922 {
923 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
924 }
925
926 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
927 {
928 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
929 struct extra_reg *er;
930 int idx = 0;
931
932 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
933 if (er->event != (event->hw.config & er->config_mask))
934 continue;
935 idx |= er->idx;
936 }
937
938 if (idx) {
939 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
940 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
941 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
942 reg1->idx = idx;
943 }
944 return 0;
945 }
946
947 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
948 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
949 .hw_config = snbep_cbox_hw_config,
950 .get_constraint = snbep_cbox_get_constraint,
951 .put_constraint = snbep_cbox_put_constraint,
952 };
953
954 static struct intel_uncore_type snbep_uncore_cbox = {
955 .name = "cbox",
956 .num_counters = 4,
957 .num_boxes = 8,
958 .perf_ctr_bits = 44,
959 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
960 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
961 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
962 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
963 .msr_offset = SNBEP_CBO_MSR_OFFSET,
964 .num_shared_regs = 1,
965 .constraints = snbep_uncore_cbox_constraints,
966 .ops = &snbep_uncore_cbox_ops,
967 .format_group = &snbep_uncore_cbox_format_group,
968 };
969
970 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
971 {
972 struct hw_perf_event *hwc = &event->hw;
973 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
974 u64 config = reg1->config;
975
976 if (new_idx > reg1->idx)
977 config <<= 8 * (new_idx - reg1->idx);
978 else
979 config >>= 8 * (reg1->idx - new_idx);
980
981 if (modify) {
982 hwc->config += new_idx - reg1->idx;
983 reg1->config = config;
984 reg1->idx = new_idx;
985 }
986 return config;
987 }
988
989 static struct event_constraint *
990 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
991 {
992 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
993 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
994 unsigned long flags;
995 int idx = reg1->idx;
996 u64 mask, config1 = reg1->config;
997 bool ok = false;
998
999 if (reg1->idx == EXTRA_REG_NONE ||
1000 (!uncore_box_is_fake(box) && reg1->alloc))
1001 return NULL;
1002 again:
1003 mask = 0xffULL << (idx * 8);
1004 raw_spin_lock_irqsave(&er->lock, flags);
1005 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1006 !((config1 ^ er->config) & mask)) {
1007 atomic_add(1 << (idx * 8), &er->ref);
1008 er->config &= ~mask;
1009 er->config |= config1 & mask;
1010 ok = true;
1011 }
1012 raw_spin_unlock_irqrestore(&er->lock, flags);
1013
1014 if (!ok) {
1015 idx = (idx + 1) % 4;
1016 if (idx != reg1->idx) {
1017 config1 = snbep_pcu_alter_er(event, idx, false);
1018 goto again;
1019 }
1020 return &uncore_constraint_empty;
1021 }
1022
1023 if (!uncore_box_is_fake(box)) {
1024 if (idx != reg1->idx)
1025 snbep_pcu_alter_er(event, idx, true);
1026 reg1->alloc = 1;
1027 }
1028 return NULL;
1029 }
1030
1031 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1032 {
1033 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1034 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1035
1036 if (uncore_box_is_fake(box) || !reg1->alloc)
1037 return;
1038
1039 atomic_sub(1 << (reg1->idx * 8), &er->ref);
1040 reg1->alloc = 0;
1041 }
1042
1043 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1044 {
1045 struct hw_perf_event *hwc = &event->hw;
1046 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1047 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1048
1049 if (ev_sel >= 0xb && ev_sel <= 0xe) {
1050 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1051 reg1->idx = ev_sel - 0xb;
1052 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1053 }
1054 return 0;
1055 }
1056
1057 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1058 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1059 .hw_config = snbep_pcu_hw_config,
1060 .get_constraint = snbep_pcu_get_constraint,
1061 .put_constraint = snbep_pcu_put_constraint,
1062 };
1063
1064 static struct intel_uncore_type snbep_uncore_pcu = {
1065 .name = "pcu",
1066 .num_counters = 4,
1067 .num_boxes = 1,
1068 .perf_ctr_bits = 48,
1069 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1070 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1071 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1072 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1073 .num_shared_regs = 1,
1074 .ops = &snbep_uncore_pcu_ops,
1075 .format_group = &snbep_uncore_pcu_format_group,
1076 };
1077
1078 static struct intel_uncore_type *snbep_msr_uncores[] = {
1079 &snbep_uncore_ubox,
1080 &snbep_uncore_cbox,
1081 &snbep_uncore_pcu,
1082 NULL,
1083 };
1084
1085 void snbep_uncore_cpu_init(void)
1086 {
1087 if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1088 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1089 uncore_msr_uncores = snbep_msr_uncores;
1090 }
1091
1092 enum {
1093 SNBEP_PCI_QPI_PORT0_FILTER,
1094 SNBEP_PCI_QPI_PORT1_FILTER,
1095 BDX_PCI_QPI_PORT2_FILTER,
1096 };
1097
1098 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1099 {
1100 struct hw_perf_event *hwc = &event->hw;
1101 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1102 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1103
1104 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1105 reg1->idx = 0;
1106 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1107 reg1->config = event->attr.config1;
1108 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1109 reg2->config = event->attr.config2;
1110 }
1111 return 0;
1112 }
1113
1114 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1115 {
1116 struct pci_dev *pdev = box->pci_dev;
1117 struct hw_perf_event *hwc = &event->hw;
1118 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1119 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1120
1121 if (reg1->idx != EXTRA_REG_NONE) {
1122 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1123 int die = box->dieid;
1124 struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1125
1126 if (filter_pdev) {
1127 pci_write_config_dword(filter_pdev, reg1->reg,
1128 (u32)reg1->config);
1129 pci_write_config_dword(filter_pdev, reg1->reg + 4,
1130 (u32)(reg1->config >> 32));
1131 pci_write_config_dword(filter_pdev, reg2->reg,
1132 (u32)reg2->config);
1133 pci_write_config_dword(filter_pdev, reg2->reg + 4,
1134 (u32)(reg2->config >> 32));
1135 }
1136 }
1137
1138 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1139 }
1140
1141 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1142 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1143 .enable_event = snbep_qpi_enable_event,
1144 .hw_config = snbep_qpi_hw_config,
1145 .get_constraint = uncore_get_constraint,
1146 .put_constraint = uncore_put_constraint,
1147 };
1148
1149 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
1150 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1151 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1152 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
1153 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1154 .ops = &snbep_uncore_pci_ops, \
1155 .format_group = &snbep_uncore_format_group
1156
1157 static struct intel_uncore_type snbep_uncore_ha = {
1158 .name = "ha",
1159 .num_counters = 4,
1160 .num_boxes = 1,
1161 .perf_ctr_bits = 48,
1162 SNBEP_UNCORE_PCI_COMMON_INIT(),
1163 };
1164
1165 static struct intel_uncore_type snbep_uncore_imc = {
1166 .name = "imc",
1167 .num_counters = 4,
1168 .num_boxes = 4,
1169 .perf_ctr_bits = 48,
1170 .fixed_ctr_bits = 48,
1171 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1172 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1173 .event_descs = snbep_uncore_imc_events,
1174 SNBEP_UNCORE_PCI_COMMON_INIT(),
1175 };
1176
1177 static struct intel_uncore_type snbep_uncore_qpi = {
1178 .name = "qpi",
1179 .num_counters = 4,
1180 .num_boxes = 2,
1181 .perf_ctr_bits = 48,
1182 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1183 .event_ctl = SNBEP_PCI_PMON_CTL0,
1184 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1185 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1186 .num_shared_regs = 1,
1187 .ops = &snbep_uncore_qpi_ops,
1188 .event_descs = snbep_uncore_qpi_events,
1189 .format_group = &snbep_uncore_qpi_format_group,
1190 };
1191
1192
1193 static struct intel_uncore_type snbep_uncore_r2pcie = {
1194 .name = "r2pcie",
1195 .num_counters = 4,
1196 .num_boxes = 1,
1197 .perf_ctr_bits = 44,
1198 .constraints = snbep_uncore_r2pcie_constraints,
1199 SNBEP_UNCORE_PCI_COMMON_INIT(),
1200 };
1201
1202 static struct intel_uncore_type snbep_uncore_r3qpi = {
1203 .name = "r3qpi",
1204 .num_counters = 3,
1205 .num_boxes = 2,
1206 .perf_ctr_bits = 44,
1207 .constraints = snbep_uncore_r3qpi_constraints,
1208 SNBEP_UNCORE_PCI_COMMON_INIT(),
1209 };
1210
1211 enum {
1212 SNBEP_PCI_UNCORE_HA,
1213 SNBEP_PCI_UNCORE_IMC,
1214 SNBEP_PCI_UNCORE_QPI,
1215 SNBEP_PCI_UNCORE_R2PCIE,
1216 SNBEP_PCI_UNCORE_R3QPI,
1217 };
1218
1219 static struct intel_uncore_type *snbep_pci_uncores[] = {
1220 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
1221 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
1222 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
1223 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
1224 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
1225 NULL,
1226 };
1227
1228 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1229 { /* Home Agent */
1230 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1231 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1232 },
1233 { /* MC Channel 0 */
1234 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1235 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1236 },
1237 { /* MC Channel 1 */
1238 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1239 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1240 },
1241 { /* MC Channel 2 */
1242 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1243 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1244 },
1245 { /* MC Channel 3 */
1246 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1247 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1248 },
1249 { /* QPI Port 0 */
1250 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1251 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1252 },
1253 { /* QPI Port 1 */
1254 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1255 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1256 },
1257 { /* R2PCIe */
1258 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1259 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1260 },
1261 { /* R3QPI Link 0 */
1262 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1263 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1264 },
1265 { /* R3QPI Link 1 */
1266 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1267 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1268 },
1269 { /* QPI Port 0 filter */
1270 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1271 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1272 SNBEP_PCI_QPI_PORT0_FILTER),
1273 },
1274 { /* QPI Port 0 filter */
1275 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1276 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1277 SNBEP_PCI_QPI_PORT1_FILTER),
1278 },
1279 { /* end: all zeroes */ }
1280 };
1281
1282 static struct pci_driver snbep_uncore_pci_driver = {
1283 .name = "snbep_uncore",
1284 .id_table = snbep_uncore_pci_ids,
1285 };
1286
1287 #define NODE_ID_MASK 0x7
1288
1289 /*
1290 * build pci bus to socket mapping
1291 */
1292 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1293 {
1294 struct pci_dev *ubox_dev = NULL;
1295 int i, bus, nodeid, segment;
1296 struct pci2phy_map *map;
1297 int err = 0;
1298 u32 config = 0;
1299
1300 while (1) {
1301 /* find the UBOX device */
1302 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1303 if (!ubox_dev)
1304 break;
1305 bus = ubox_dev->bus->number;
1306 /* get the Node ID of the local register */
1307 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1308 if (err)
1309 break;
1310 nodeid = config & NODE_ID_MASK;
1311 /* get the Node ID mapping */
1312 err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1313 if (err)
1314 break;
1315
1316 segment = pci_domain_nr(ubox_dev->bus);
1317 raw_spin_lock(&pci2phy_map_lock);
1318 map = __find_pci2phy_map(segment);
1319 if (!map) {
1320 raw_spin_unlock(&pci2phy_map_lock);
1321 err = -ENOMEM;
1322 break;
1323 }
1324
1325 /*
1326 * every three bits in the Node ID mapping register maps
1327 * to a particular node.
1328 */
1329 for (i = 0; i < 8; i++) {
1330 if (nodeid == ((config >> (3 * i)) & 0x7)) {
1331 map->pbus_to_physid[bus] = i;
1332 break;
1333 }
1334 }
1335 raw_spin_unlock(&pci2phy_map_lock);
1336 }
1337
1338 if (!err) {
1339 /*
1340 * For PCI bus with no UBOX device, find the next bus
1341 * that has UBOX device and use its mapping.
1342 */
1343 raw_spin_lock(&pci2phy_map_lock);
1344 list_for_each_entry(map, &pci2phy_map_head, list) {
1345 i = -1;
1346 if (reverse) {
1347 for (bus = 255; bus >= 0; bus--) {
1348 if (map->pbus_to_physid[bus] >= 0)
1349 i = map->pbus_to_physid[bus];
1350 else
1351 map->pbus_to_physid[bus] = i;
1352 }
1353 } else {
1354 for (bus = 0; bus <= 255; bus++) {
1355 if (map->pbus_to_physid[bus] >= 0)
1356 i = map->pbus_to_physid[bus];
1357 else
1358 map->pbus_to_physid[bus] = i;
1359 }
1360 }
1361 }
1362 raw_spin_unlock(&pci2phy_map_lock);
1363 }
1364
1365 pci_dev_put(ubox_dev);
1366
1367 return err ? pcibios_err_to_errno(err) : 0;
1368 }
1369
1370 int snbep_uncore_pci_init(void)
1371 {
1372 int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1373 if (ret)
1374 return ret;
1375 uncore_pci_uncores = snbep_pci_uncores;
1376 uncore_pci_driver = &snbep_uncore_pci_driver;
1377 return 0;
1378 }
1379 /* end of Sandy Bridge-EP uncore support */
1380
1381 /* IvyTown uncore support */
1382 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1383 {
1384 unsigned msr = uncore_msr_box_ctl(box);
1385 if (msr)
1386 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1387 }
1388
1389 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1390 {
1391 struct pci_dev *pdev = box->pci_dev;
1392
1393 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1394 }
1395
1396 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \
1397 .init_box = ivbep_uncore_msr_init_box, \
1398 .disable_box = snbep_uncore_msr_disable_box, \
1399 .enable_box = snbep_uncore_msr_enable_box, \
1400 .disable_event = snbep_uncore_msr_disable_event, \
1401 .enable_event = snbep_uncore_msr_enable_event, \
1402 .read_counter = uncore_msr_read_counter
1403
1404 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1405 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1406 };
1407
1408 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1409 .init_box = ivbep_uncore_pci_init_box,
1410 .disable_box = snbep_uncore_pci_disable_box,
1411 .enable_box = snbep_uncore_pci_enable_box,
1412 .disable_event = snbep_uncore_pci_disable_event,
1413 .enable_event = snbep_uncore_pci_enable_event,
1414 .read_counter = snbep_uncore_pci_read_counter,
1415 };
1416
1417 #define IVBEP_UNCORE_PCI_COMMON_INIT() \
1418 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1419 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1420 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \
1421 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1422 .ops = &ivbep_uncore_pci_ops, \
1423 .format_group = &ivbep_uncore_format_group
1424
1425 static struct attribute *ivbep_uncore_formats_attr[] = {
1426 &format_attr_event.attr,
1427 &format_attr_umask.attr,
1428 &format_attr_edge.attr,
1429 &format_attr_inv.attr,
1430 &format_attr_thresh8.attr,
1431 NULL,
1432 };
1433
1434 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1435 &format_attr_event.attr,
1436 &format_attr_umask.attr,
1437 &format_attr_edge.attr,
1438 &format_attr_inv.attr,
1439 &format_attr_thresh5.attr,
1440 NULL,
1441 };
1442
1443 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1444 &format_attr_event.attr,
1445 &format_attr_umask.attr,
1446 &format_attr_edge.attr,
1447 &format_attr_tid_en.attr,
1448 &format_attr_thresh8.attr,
1449 &format_attr_filter_tid.attr,
1450 &format_attr_filter_link.attr,
1451 &format_attr_filter_state2.attr,
1452 &format_attr_filter_nid2.attr,
1453 &format_attr_filter_opc2.attr,
1454 &format_attr_filter_nc.attr,
1455 &format_attr_filter_c6.attr,
1456 &format_attr_filter_isoc.attr,
1457 NULL,
1458 };
1459
1460 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1461 &format_attr_event.attr,
1462 &format_attr_occ_sel.attr,
1463 &format_attr_edge.attr,
1464 &format_attr_thresh5.attr,
1465 &format_attr_occ_invert.attr,
1466 &format_attr_occ_edge.attr,
1467 &format_attr_filter_band0.attr,
1468 &format_attr_filter_band1.attr,
1469 &format_attr_filter_band2.attr,
1470 &format_attr_filter_band3.attr,
1471 NULL,
1472 };
1473
1474 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1475 &format_attr_event_ext.attr,
1476 &format_attr_umask.attr,
1477 &format_attr_edge.attr,
1478 &format_attr_thresh8.attr,
1479 &format_attr_match_rds.attr,
1480 &format_attr_match_rnid30.attr,
1481 &format_attr_match_rnid4.attr,
1482 &format_attr_match_dnid.attr,
1483 &format_attr_match_mc.attr,
1484 &format_attr_match_opc.attr,
1485 &format_attr_match_vnw.attr,
1486 &format_attr_match0.attr,
1487 &format_attr_match1.attr,
1488 &format_attr_mask_rds.attr,
1489 &format_attr_mask_rnid30.attr,
1490 &format_attr_mask_rnid4.attr,
1491 &format_attr_mask_dnid.attr,
1492 &format_attr_mask_mc.attr,
1493 &format_attr_mask_opc.attr,
1494 &format_attr_mask_vnw.attr,
1495 &format_attr_mask0.attr,
1496 &format_attr_mask1.attr,
1497 NULL,
1498 };
1499
1500 static const struct attribute_group ivbep_uncore_format_group = {
1501 .name = "format",
1502 .attrs = ivbep_uncore_formats_attr,
1503 };
1504
1505 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1506 .name = "format",
1507 .attrs = ivbep_uncore_ubox_formats_attr,
1508 };
1509
1510 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1511 .name = "format",
1512 .attrs = ivbep_uncore_cbox_formats_attr,
1513 };
1514
1515 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1516 .name = "format",
1517 .attrs = ivbep_uncore_pcu_formats_attr,
1518 };
1519
1520 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1521 .name = "format",
1522 .attrs = ivbep_uncore_qpi_formats_attr,
1523 };
1524
1525 static struct intel_uncore_type ivbep_uncore_ubox = {
1526 .name = "ubox",
1527 .num_counters = 2,
1528 .num_boxes = 1,
1529 .perf_ctr_bits = 44,
1530 .fixed_ctr_bits = 48,
1531 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1532 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1533 .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1534 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1535 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1536 .ops = &ivbep_uncore_msr_ops,
1537 .format_group = &ivbep_uncore_ubox_format_group,
1538 };
1539
1540 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1541 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1542 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1543 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1544 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1545 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1546 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1547 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1548 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1549 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1550 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1551 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1552 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1553 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1554 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1555 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1556 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1557 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1558 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1559 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1560 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1561 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1562 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1563 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1564 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1565 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1566 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1567 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1568 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1569 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1570 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1571 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1572 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1573 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1574 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1575 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1576 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1577 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1578 EVENT_EXTRA_END
1579 };
1580
1581 static u64 ivbep_cbox_filter_mask(int fields)
1582 {
1583 u64 mask = 0;
1584
1585 if (fields & 0x1)
1586 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1587 if (fields & 0x2)
1588 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1589 if (fields & 0x4)
1590 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1591 if (fields & 0x8)
1592 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1593 if (fields & 0x10) {
1594 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1595 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1596 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1597 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1598 }
1599
1600 return mask;
1601 }
1602
1603 static struct event_constraint *
1604 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1605 {
1606 return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1607 }
1608
1609 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1610 {
1611 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1612 struct extra_reg *er;
1613 int idx = 0;
1614
1615 for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1616 if (er->event != (event->hw.config & er->config_mask))
1617 continue;
1618 idx |= er->idx;
1619 }
1620
1621 if (idx) {
1622 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1623 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1624 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1625 reg1->idx = idx;
1626 }
1627 return 0;
1628 }
1629
1630 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1631 {
1632 struct hw_perf_event *hwc = &event->hw;
1633 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1634
1635 if (reg1->idx != EXTRA_REG_NONE) {
1636 u64 filter = uncore_shared_reg_config(box, 0);
1637 wrmsrl(reg1->reg, filter & 0xffffffff);
1638 wrmsrl(reg1->reg + 6, filter >> 32);
1639 }
1640
1641 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1642 }
1643
1644 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1645 .init_box = ivbep_uncore_msr_init_box,
1646 .disable_box = snbep_uncore_msr_disable_box,
1647 .enable_box = snbep_uncore_msr_enable_box,
1648 .disable_event = snbep_uncore_msr_disable_event,
1649 .enable_event = ivbep_cbox_enable_event,
1650 .read_counter = uncore_msr_read_counter,
1651 .hw_config = ivbep_cbox_hw_config,
1652 .get_constraint = ivbep_cbox_get_constraint,
1653 .put_constraint = snbep_cbox_put_constraint,
1654 };
1655
1656 static struct intel_uncore_type ivbep_uncore_cbox = {
1657 .name = "cbox",
1658 .num_counters = 4,
1659 .num_boxes = 15,
1660 .perf_ctr_bits = 44,
1661 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1662 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1663 .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1664 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1665 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1666 .num_shared_regs = 1,
1667 .constraints = snbep_uncore_cbox_constraints,
1668 .ops = &ivbep_uncore_cbox_ops,
1669 .format_group = &ivbep_uncore_cbox_format_group,
1670 };
1671
1672 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1673 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1674 .hw_config = snbep_pcu_hw_config,
1675 .get_constraint = snbep_pcu_get_constraint,
1676 .put_constraint = snbep_pcu_put_constraint,
1677 };
1678
1679 static struct intel_uncore_type ivbep_uncore_pcu = {
1680 .name = "pcu",
1681 .num_counters = 4,
1682 .num_boxes = 1,
1683 .perf_ctr_bits = 48,
1684 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1685 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1686 .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1687 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1688 .num_shared_regs = 1,
1689 .ops = &ivbep_uncore_pcu_ops,
1690 .format_group = &ivbep_uncore_pcu_format_group,
1691 };
1692
1693 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1694 &ivbep_uncore_ubox,
1695 &ivbep_uncore_cbox,
1696 &ivbep_uncore_pcu,
1697 NULL,
1698 };
1699
1700 void ivbep_uncore_cpu_init(void)
1701 {
1702 if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1703 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1704 uncore_msr_uncores = ivbep_msr_uncores;
1705 }
1706
1707 static struct intel_uncore_type ivbep_uncore_ha = {
1708 .name = "ha",
1709 .num_counters = 4,
1710 .num_boxes = 2,
1711 .perf_ctr_bits = 48,
1712 IVBEP_UNCORE_PCI_COMMON_INIT(),
1713 };
1714
1715 static struct intel_uncore_type ivbep_uncore_imc = {
1716 .name = "imc",
1717 .num_counters = 4,
1718 .num_boxes = 8,
1719 .perf_ctr_bits = 48,
1720 .fixed_ctr_bits = 48,
1721 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1722 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1723 .event_descs = snbep_uncore_imc_events,
1724 IVBEP_UNCORE_PCI_COMMON_INIT(),
1725 };
1726
1727 /* registers in IRP boxes are not properly aligned */
1728 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1729 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1730
1731 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1732 {
1733 struct pci_dev *pdev = box->pci_dev;
1734 struct hw_perf_event *hwc = &event->hw;
1735
1736 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1737 hwc->config | SNBEP_PMON_CTL_EN);
1738 }
1739
1740 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1741 {
1742 struct pci_dev *pdev = box->pci_dev;
1743 struct hw_perf_event *hwc = &event->hw;
1744
1745 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1746 }
1747
1748 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1749 {
1750 struct pci_dev *pdev = box->pci_dev;
1751 struct hw_perf_event *hwc = &event->hw;
1752 u64 count = 0;
1753
1754 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1755 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1756
1757 return count;
1758 }
1759
1760 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1761 .init_box = ivbep_uncore_pci_init_box,
1762 .disable_box = snbep_uncore_pci_disable_box,
1763 .enable_box = snbep_uncore_pci_enable_box,
1764 .disable_event = ivbep_uncore_irp_disable_event,
1765 .enable_event = ivbep_uncore_irp_enable_event,
1766 .read_counter = ivbep_uncore_irp_read_counter,
1767 };
1768
1769 static struct intel_uncore_type ivbep_uncore_irp = {
1770 .name = "irp",
1771 .num_counters = 4,
1772 .num_boxes = 1,
1773 .perf_ctr_bits = 48,
1774 .event_mask = IVBEP_PMON_RAW_EVENT_MASK,
1775 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1776 .ops = &ivbep_uncore_irp_ops,
1777 .format_group = &ivbep_uncore_format_group,
1778 };
1779
1780 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1781 .init_box = ivbep_uncore_pci_init_box,
1782 .disable_box = snbep_uncore_pci_disable_box,
1783 .enable_box = snbep_uncore_pci_enable_box,
1784 .disable_event = snbep_uncore_pci_disable_event,
1785 .enable_event = snbep_qpi_enable_event,
1786 .read_counter = snbep_uncore_pci_read_counter,
1787 .hw_config = snbep_qpi_hw_config,
1788 .get_constraint = uncore_get_constraint,
1789 .put_constraint = uncore_put_constraint,
1790 };
1791
1792 static struct intel_uncore_type ivbep_uncore_qpi = {
1793 .name = "qpi",
1794 .num_counters = 4,
1795 .num_boxes = 3,
1796 .perf_ctr_bits = 48,
1797 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1798 .event_ctl = SNBEP_PCI_PMON_CTL0,
1799 .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1800 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1801 .num_shared_regs = 1,
1802 .ops = &ivbep_uncore_qpi_ops,
1803 .format_group = &ivbep_uncore_qpi_format_group,
1804 };
1805
1806 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1807 .name = "r2pcie",
1808 .num_counters = 4,
1809 .num_boxes = 1,
1810 .perf_ctr_bits = 44,
1811 .constraints = snbep_uncore_r2pcie_constraints,
1812 IVBEP_UNCORE_PCI_COMMON_INIT(),
1813 };
1814
1815 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1816 .name = "r3qpi",
1817 .num_counters = 3,
1818 .num_boxes = 2,
1819 .perf_ctr_bits = 44,
1820 .constraints = snbep_uncore_r3qpi_constraints,
1821 IVBEP_UNCORE_PCI_COMMON_INIT(),
1822 };
1823
1824 enum {
1825 IVBEP_PCI_UNCORE_HA,
1826 IVBEP_PCI_UNCORE_IMC,
1827 IVBEP_PCI_UNCORE_IRP,
1828 IVBEP_PCI_UNCORE_QPI,
1829 IVBEP_PCI_UNCORE_R2PCIE,
1830 IVBEP_PCI_UNCORE_R3QPI,
1831 };
1832
1833 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1834 [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha,
1835 [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc,
1836 [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp,
1837 [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi,
1838 [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie,
1839 [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi,
1840 NULL,
1841 };
1842
1843 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1844 { /* Home Agent 0 */
1845 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1846 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1847 },
1848 { /* Home Agent 1 */
1849 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1850 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1851 },
1852 { /* MC0 Channel 0 */
1853 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1854 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1855 },
1856 { /* MC0 Channel 1 */
1857 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1858 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1859 },
1860 { /* MC0 Channel 3 */
1861 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1862 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1863 },
1864 { /* MC0 Channel 4 */
1865 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1866 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1867 },
1868 { /* MC1 Channel 0 */
1869 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1870 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1871 },
1872 { /* MC1 Channel 1 */
1873 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1874 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1875 },
1876 { /* MC1 Channel 3 */
1877 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1878 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1879 },
1880 { /* MC1 Channel 4 */
1881 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1882 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1883 },
1884 { /* IRP */
1885 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1886 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1887 },
1888 { /* QPI0 Port 0 */
1889 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1890 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1891 },
1892 { /* QPI0 Port 1 */
1893 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1894 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1895 },
1896 { /* QPI1 Port 2 */
1897 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1898 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1899 },
1900 { /* R2PCIe */
1901 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1902 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1903 },
1904 { /* R3QPI0 Link 0 */
1905 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1906 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1907 },
1908 { /* R3QPI0 Link 1 */
1909 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1910 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1911 },
1912 { /* R3QPI1 Link 2 */
1913 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1914 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1915 },
1916 { /* QPI Port 0 filter */
1917 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1918 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1919 SNBEP_PCI_QPI_PORT0_FILTER),
1920 },
1921 { /* QPI Port 0 filter */
1922 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1923 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1924 SNBEP_PCI_QPI_PORT1_FILTER),
1925 },
1926 { /* end: all zeroes */ }
1927 };
1928
1929 static struct pci_driver ivbep_uncore_pci_driver = {
1930 .name = "ivbep_uncore",
1931 .id_table = ivbep_uncore_pci_ids,
1932 };
1933
1934 int ivbep_uncore_pci_init(void)
1935 {
1936 int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1937 if (ret)
1938 return ret;
1939 uncore_pci_uncores = ivbep_pci_uncores;
1940 uncore_pci_driver = &ivbep_uncore_pci_driver;
1941 return 0;
1942 }
1943 /* end of IvyTown uncore support */
1944
1945 /* KNL uncore support */
1946 static struct attribute *knl_uncore_ubox_formats_attr[] = {
1947 &format_attr_event.attr,
1948 &format_attr_umask.attr,
1949 &format_attr_edge.attr,
1950 &format_attr_tid_en.attr,
1951 &format_attr_inv.attr,
1952 &format_attr_thresh5.attr,
1953 NULL,
1954 };
1955
1956 static const struct attribute_group knl_uncore_ubox_format_group = {
1957 .name = "format",
1958 .attrs = knl_uncore_ubox_formats_attr,
1959 };
1960
1961 static struct intel_uncore_type knl_uncore_ubox = {
1962 .name = "ubox",
1963 .num_counters = 2,
1964 .num_boxes = 1,
1965 .perf_ctr_bits = 48,
1966 .fixed_ctr_bits = 48,
1967 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
1968 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
1969 .event_mask = KNL_U_MSR_PMON_RAW_EVENT_MASK,
1970 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
1971 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
1972 .ops = &snbep_uncore_msr_ops,
1973 .format_group = &knl_uncore_ubox_format_group,
1974 };
1975
1976 static struct attribute *knl_uncore_cha_formats_attr[] = {
1977 &format_attr_event.attr,
1978 &format_attr_umask.attr,
1979 &format_attr_qor.attr,
1980 &format_attr_edge.attr,
1981 &format_attr_tid_en.attr,
1982 &format_attr_inv.attr,
1983 &format_attr_thresh8.attr,
1984 &format_attr_filter_tid4.attr,
1985 &format_attr_filter_link3.attr,
1986 &format_attr_filter_state4.attr,
1987 &format_attr_filter_local.attr,
1988 &format_attr_filter_all_op.attr,
1989 &format_attr_filter_nnm.attr,
1990 &format_attr_filter_opc3.attr,
1991 &format_attr_filter_nc.attr,
1992 &format_attr_filter_isoc.attr,
1993 NULL,
1994 };
1995
1996 static const struct attribute_group knl_uncore_cha_format_group = {
1997 .name = "format",
1998 .attrs = knl_uncore_cha_formats_attr,
1999 };
2000
2001 static struct event_constraint knl_uncore_cha_constraints[] = {
2002 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2003 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2004 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2005 EVENT_CONSTRAINT_END
2006 };
2007
2008 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2009 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2010 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2011 SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2012 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2013 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2014 EVENT_EXTRA_END
2015 };
2016
2017 static u64 knl_cha_filter_mask(int fields)
2018 {
2019 u64 mask = 0;
2020
2021 if (fields & 0x1)
2022 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2023 if (fields & 0x2)
2024 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2025 if (fields & 0x4)
2026 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2027 return mask;
2028 }
2029
2030 static struct event_constraint *
2031 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2032 {
2033 return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2034 }
2035
2036 static int knl_cha_hw_config(struct intel_uncore_box *box,
2037 struct perf_event *event)
2038 {
2039 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2040 struct extra_reg *er;
2041 int idx = 0;
2042
2043 for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2044 if (er->event != (event->hw.config & er->config_mask))
2045 continue;
2046 idx |= er->idx;
2047 }
2048
2049 if (idx) {
2050 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2051 KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2052 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2053
2054 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2055 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2056 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2057 reg1->idx = idx;
2058 }
2059 return 0;
2060 }
2061
2062 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2063 struct perf_event *event);
2064
2065 static struct intel_uncore_ops knl_uncore_cha_ops = {
2066 .init_box = snbep_uncore_msr_init_box,
2067 .disable_box = snbep_uncore_msr_disable_box,
2068 .enable_box = snbep_uncore_msr_enable_box,
2069 .disable_event = snbep_uncore_msr_disable_event,
2070 .enable_event = hswep_cbox_enable_event,
2071 .read_counter = uncore_msr_read_counter,
2072 .hw_config = knl_cha_hw_config,
2073 .get_constraint = knl_cha_get_constraint,
2074 .put_constraint = snbep_cbox_put_constraint,
2075 };
2076
2077 static struct intel_uncore_type knl_uncore_cha = {
2078 .name = "cha",
2079 .num_counters = 4,
2080 .num_boxes = 38,
2081 .perf_ctr_bits = 48,
2082 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2083 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2084 .event_mask = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2085 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2086 .msr_offset = KNL_CHA_MSR_OFFSET,
2087 .num_shared_regs = 1,
2088 .constraints = knl_uncore_cha_constraints,
2089 .ops = &knl_uncore_cha_ops,
2090 .format_group = &knl_uncore_cha_format_group,
2091 };
2092
2093 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2094 &format_attr_event2.attr,
2095 &format_attr_use_occ_ctr.attr,
2096 &format_attr_occ_sel.attr,
2097 &format_attr_edge.attr,
2098 &format_attr_tid_en.attr,
2099 &format_attr_inv.attr,
2100 &format_attr_thresh6.attr,
2101 &format_attr_occ_invert.attr,
2102 &format_attr_occ_edge_det.attr,
2103 NULL,
2104 };
2105
2106 static const struct attribute_group knl_uncore_pcu_format_group = {
2107 .name = "format",
2108 .attrs = knl_uncore_pcu_formats_attr,
2109 };
2110
2111 static struct intel_uncore_type knl_uncore_pcu = {
2112 .name = "pcu",
2113 .num_counters = 4,
2114 .num_boxes = 1,
2115 .perf_ctr_bits = 48,
2116 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2117 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2118 .event_mask = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2119 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2120 .ops = &snbep_uncore_msr_ops,
2121 .format_group = &knl_uncore_pcu_format_group,
2122 };
2123
2124 static struct intel_uncore_type *knl_msr_uncores[] = {
2125 &knl_uncore_ubox,
2126 &knl_uncore_cha,
2127 &knl_uncore_pcu,
2128 NULL,
2129 };
2130
2131 void knl_uncore_cpu_init(void)
2132 {
2133 uncore_msr_uncores = knl_msr_uncores;
2134 }
2135
2136 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2137 {
2138 struct pci_dev *pdev = box->pci_dev;
2139 int box_ctl = uncore_pci_box_ctl(box);
2140
2141 pci_write_config_dword(pdev, box_ctl, 0);
2142 }
2143
2144 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2145 struct perf_event *event)
2146 {
2147 struct pci_dev *pdev = box->pci_dev;
2148 struct hw_perf_event *hwc = &event->hw;
2149
2150 if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2151 == UNCORE_FIXED_EVENT)
2152 pci_write_config_dword(pdev, hwc->config_base,
2153 hwc->config | KNL_PMON_FIXED_CTL_EN);
2154 else
2155 pci_write_config_dword(pdev, hwc->config_base,
2156 hwc->config | SNBEP_PMON_CTL_EN);
2157 }
2158
2159 static struct intel_uncore_ops knl_uncore_imc_ops = {
2160 .init_box = snbep_uncore_pci_init_box,
2161 .disable_box = snbep_uncore_pci_disable_box,
2162 .enable_box = knl_uncore_imc_enable_box,
2163 .read_counter = snbep_uncore_pci_read_counter,
2164 .enable_event = knl_uncore_imc_enable_event,
2165 .disable_event = snbep_uncore_pci_disable_event,
2166 };
2167
2168 static struct intel_uncore_type knl_uncore_imc_uclk = {
2169 .name = "imc_uclk",
2170 .num_counters = 4,
2171 .num_boxes = 2,
2172 .perf_ctr_bits = 48,
2173 .fixed_ctr_bits = 48,
2174 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2175 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2176 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2177 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2178 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2179 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2180 .ops = &knl_uncore_imc_ops,
2181 .format_group = &snbep_uncore_format_group,
2182 };
2183
2184 static struct intel_uncore_type knl_uncore_imc_dclk = {
2185 .name = "imc",
2186 .num_counters = 4,
2187 .num_boxes = 6,
2188 .perf_ctr_bits = 48,
2189 .fixed_ctr_bits = 48,
2190 .perf_ctr = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2191 .event_ctl = KNL_MC0_CH0_MSR_PMON_CTL0,
2192 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2193 .fixed_ctr = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2194 .fixed_ctl = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2195 .box_ctl = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2196 .ops = &knl_uncore_imc_ops,
2197 .format_group = &snbep_uncore_format_group,
2198 };
2199
2200 static struct intel_uncore_type knl_uncore_edc_uclk = {
2201 .name = "edc_uclk",
2202 .num_counters = 4,
2203 .num_boxes = 8,
2204 .perf_ctr_bits = 48,
2205 .fixed_ctr_bits = 48,
2206 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2207 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2208 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2209 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2210 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2211 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2212 .ops = &knl_uncore_imc_ops,
2213 .format_group = &snbep_uncore_format_group,
2214 };
2215
2216 static struct intel_uncore_type knl_uncore_edc_eclk = {
2217 .name = "edc_eclk",
2218 .num_counters = 4,
2219 .num_boxes = 8,
2220 .perf_ctr_bits = 48,
2221 .fixed_ctr_bits = 48,
2222 .perf_ctr = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2223 .event_ctl = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2224 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2225 .fixed_ctr = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2226 .fixed_ctl = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2227 .box_ctl = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2228 .ops = &knl_uncore_imc_ops,
2229 .format_group = &snbep_uncore_format_group,
2230 };
2231
2232 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2233 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2234 EVENT_CONSTRAINT_END
2235 };
2236
2237 static struct intel_uncore_type knl_uncore_m2pcie = {
2238 .name = "m2pcie",
2239 .num_counters = 4,
2240 .num_boxes = 1,
2241 .perf_ctr_bits = 48,
2242 .constraints = knl_uncore_m2pcie_constraints,
2243 SNBEP_UNCORE_PCI_COMMON_INIT(),
2244 };
2245
2246 static struct attribute *knl_uncore_irp_formats_attr[] = {
2247 &format_attr_event.attr,
2248 &format_attr_umask.attr,
2249 &format_attr_qor.attr,
2250 &format_attr_edge.attr,
2251 &format_attr_inv.attr,
2252 &format_attr_thresh8.attr,
2253 NULL,
2254 };
2255
2256 static const struct attribute_group knl_uncore_irp_format_group = {
2257 .name = "format",
2258 .attrs = knl_uncore_irp_formats_attr,
2259 };
2260
2261 static struct intel_uncore_type knl_uncore_irp = {
2262 .name = "irp",
2263 .num_counters = 2,
2264 .num_boxes = 1,
2265 .perf_ctr_bits = 48,
2266 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2267 .event_ctl = SNBEP_PCI_PMON_CTL0,
2268 .event_mask = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2269 .box_ctl = KNL_IRP_PCI_PMON_BOX_CTL,
2270 .ops = &snbep_uncore_pci_ops,
2271 .format_group = &knl_uncore_irp_format_group,
2272 };
2273
2274 enum {
2275 KNL_PCI_UNCORE_MC_UCLK,
2276 KNL_PCI_UNCORE_MC_DCLK,
2277 KNL_PCI_UNCORE_EDC_UCLK,
2278 KNL_PCI_UNCORE_EDC_ECLK,
2279 KNL_PCI_UNCORE_M2PCIE,
2280 KNL_PCI_UNCORE_IRP,
2281 };
2282
2283 static struct intel_uncore_type *knl_pci_uncores[] = {
2284 [KNL_PCI_UNCORE_MC_UCLK] = &knl_uncore_imc_uclk,
2285 [KNL_PCI_UNCORE_MC_DCLK] = &knl_uncore_imc_dclk,
2286 [KNL_PCI_UNCORE_EDC_UCLK] = &knl_uncore_edc_uclk,
2287 [KNL_PCI_UNCORE_EDC_ECLK] = &knl_uncore_edc_eclk,
2288 [KNL_PCI_UNCORE_M2PCIE] = &knl_uncore_m2pcie,
2289 [KNL_PCI_UNCORE_IRP] = &knl_uncore_irp,
2290 NULL,
2291 };
2292
2293 /*
2294 * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2295 * device type. prior to KNL, each instance of a PMU device type had a unique
2296 * device ID.
2297 *
2298 * PCI Device ID Uncore PMU Devices
2299 * ----------------------------------
2300 * 0x7841 MC0 UClk, MC1 UClk
2301 * 0x7843 MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2302 * MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2303 * 0x7833 EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2304 * EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2305 * 0x7835 EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2306 * EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2307 * 0x7817 M2PCIe
2308 * 0x7814 IRP
2309 */
2310
2311 static const struct pci_device_id knl_uncore_pci_ids[] = {
2312 { /* MC0 UClk */
2313 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2314 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2315 },
2316 { /* MC1 UClk */
2317 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2318 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2319 },
2320 { /* MC0 DClk CH 0 */
2321 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2322 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2323 },
2324 { /* MC0 DClk CH 1 */
2325 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2326 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2327 },
2328 { /* MC0 DClk CH 2 */
2329 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2330 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2331 },
2332 { /* MC1 DClk CH 0 */
2333 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2334 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2335 },
2336 { /* MC1 DClk CH 1 */
2337 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2338 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2339 },
2340 { /* MC1 DClk CH 2 */
2341 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2342 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2343 },
2344 { /* EDC0 UClk */
2345 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2346 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2347 },
2348 { /* EDC1 UClk */
2349 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2350 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2351 },
2352 { /* EDC2 UClk */
2353 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2354 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2355 },
2356 { /* EDC3 UClk */
2357 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2358 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2359 },
2360 { /* EDC4 UClk */
2361 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2362 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2363 },
2364 { /* EDC5 UClk */
2365 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2366 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2367 },
2368 { /* EDC6 UClk */
2369 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2370 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2371 },
2372 { /* EDC7 UClk */
2373 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2374 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2375 },
2376 { /* EDC0 EClk */
2377 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2378 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2379 },
2380 { /* EDC1 EClk */
2381 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2382 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2383 },
2384 { /* EDC2 EClk */
2385 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2386 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2387 },
2388 { /* EDC3 EClk */
2389 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2390 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2391 },
2392 { /* EDC4 EClk */
2393 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2394 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2395 },
2396 { /* EDC5 EClk */
2397 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2398 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2399 },
2400 { /* EDC6 EClk */
2401 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2402 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2403 },
2404 { /* EDC7 EClk */
2405 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2406 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2407 },
2408 { /* M2PCIe */
2409 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2410 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2411 },
2412 { /* IRP */
2413 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2414 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2415 },
2416 { /* end: all zeroes */ }
2417 };
2418
2419 static struct pci_driver knl_uncore_pci_driver = {
2420 .name = "knl_uncore",
2421 .id_table = knl_uncore_pci_ids,
2422 };
2423
2424 int knl_uncore_pci_init(void)
2425 {
2426 int ret;
2427
2428 /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2429 ret = snb_pci2phy_map_init(0x7814); /* IRP */
2430 if (ret)
2431 return ret;
2432 ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2433 if (ret)
2434 return ret;
2435 uncore_pci_uncores = knl_pci_uncores;
2436 uncore_pci_driver = &knl_uncore_pci_driver;
2437 return 0;
2438 }
2439
2440 /* end of KNL uncore support */
2441
2442 /* Haswell-EP uncore support */
2443 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2444 &format_attr_event.attr,
2445 &format_attr_umask.attr,
2446 &format_attr_edge.attr,
2447 &format_attr_inv.attr,
2448 &format_attr_thresh5.attr,
2449 &format_attr_filter_tid2.attr,
2450 &format_attr_filter_cid.attr,
2451 NULL,
2452 };
2453
2454 static const struct attribute_group hswep_uncore_ubox_format_group = {
2455 .name = "format",
2456 .attrs = hswep_uncore_ubox_formats_attr,
2457 };
2458
2459 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2460 {
2461 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2462 reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2463 reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2464 reg1->idx = 0;
2465 return 0;
2466 }
2467
2468 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2469 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2470 .hw_config = hswep_ubox_hw_config,
2471 .get_constraint = uncore_get_constraint,
2472 .put_constraint = uncore_put_constraint,
2473 };
2474
2475 static struct intel_uncore_type hswep_uncore_ubox = {
2476 .name = "ubox",
2477 .num_counters = 2,
2478 .num_boxes = 1,
2479 .perf_ctr_bits = 44,
2480 .fixed_ctr_bits = 48,
2481 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2482 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2483 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2484 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2485 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2486 .num_shared_regs = 1,
2487 .ops = &hswep_uncore_ubox_ops,
2488 .format_group = &hswep_uncore_ubox_format_group,
2489 };
2490
2491 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2492 &format_attr_event.attr,
2493 &format_attr_umask.attr,
2494 &format_attr_edge.attr,
2495 &format_attr_tid_en.attr,
2496 &format_attr_thresh8.attr,
2497 &format_attr_filter_tid3.attr,
2498 &format_attr_filter_link2.attr,
2499 &format_attr_filter_state3.attr,
2500 &format_attr_filter_nid2.attr,
2501 &format_attr_filter_opc2.attr,
2502 &format_attr_filter_nc.attr,
2503 &format_attr_filter_c6.attr,
2504 &format_attr_filter_isoc.attr,
2505 NULL,
2506 };
2507
2508 static const struct attribute_group hswep_uncore_cbox_format_group = {
2509 .name = "format",
2510 .attrs = hswep_uncore_cbox_formats_attr,
2511 };
2512
2513 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2514 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2515 UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2516 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2517 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2518 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2519 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2520 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2521 EVENT_CONSTRAINT_END
2522 };
2523
2524 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2525 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2526 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2527 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2528 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2529 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2530 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2531 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2532 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2533 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2534 SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2535 SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2536 SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2537 SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2538 SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2539 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2540 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2541 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2542 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2543 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2544 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2545 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2546 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2547 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2548 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2549 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2550 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2551 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2552 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2553 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2554 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2555 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2556 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2557 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2558 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2559 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2560 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2561 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2562 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2563 EVENT_EXTRA_END
2564 };
2565
2566 static u64 hswep_cbox_filter_mask(int fields)
2567 {
2568 u64 mask = 0;
2569 if (fields & 0x1)
2570 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2571 if (fields & 0x2)
2572 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2573 if (fields & 0x4)
2574 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2575 if (fields & 0x8)
2576 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2577 if (fields & 0x10) {
2578 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2579 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2580 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2581 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2582 }
2583 return mask;
2584 }
2585
2586 static struct event_constraint *
2587 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2588 {
2589 return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2590 }
2591
2592 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2593 {
2594 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2595 struct extra_reg *er;
2596 int idx = 0;
2597
2598 for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2599 if (er->event != (event->hw.config & er->config_mask))
2600 continue;
2601 idx |= er->idx;
2602 }
2603
2604 if (idx) {
2605 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2606 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2607 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2608 reg1->idx = idx;
2609 }
2610 return 0;
2611 }
2612
2613 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2614 struct perf_event *event)
2615 {
2616 struct hw_perf_event *hwc = &event->hw;
2617 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2618
2619 if (reg1->idx != EXTRA_REG_NONE) {
2620 u64 filter = uncore_shared_reg_config(box, 0);
2621 wrmsrl(reg1->reg, filter & 0xffffffff);
2622 wrmsrl(reg1->reg + 1, filter >> 32);
2623 }
2624
2625 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2626 }
2627
2628 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2629 .init_box = snbep_uncore_msr_init_box,
2630 .disable_box = snbep_uncore_msr_disable_box,
2631 .enable_box = snbep_uncore_msr_enable_box,
2632 .disable_event = snbep_uncore_msr_disable_event,
2633 .enable_event = hswep_cbox_enable_event,
2634 .read_counter = uncore_msr_read_counter,
2635 .hw_config = hswep_cbox_hw_config,
2636 .get_constraint = hswep_cbox_get_constraint,
2637 .put_constraint = snbep_cbox_put_constraint,
2638 };
2639
2640 static struct intel_uncore_type hswep_uncore_cbox = {
2641 .name = "cbox",
2642 .num_counters = 4,
2643 .num_boxes = 18,
2644 .perf_ctr_bits = 48,
2645 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2646 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2647 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2648 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2649 .msr_offset = HSWEP_CBO_MSR_OFFSET,
2650 .num_shared_regs = 1,
2651 .constraints = hswep_uncore_cbox_constraints,
2652 .ops = &hswep_uncore_cbox_ops,
2653 .format_group = &hswep_uncore_cbox_format_group,
2654 };
2655
2656 /*
2657 * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2658 */
2659 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2660 {
2661 unsigned msr = uncore_msr_box_ctl(box);
2662
2663 if (msr) {
2664 u64 init = SNBEP_PMON_BOX_CTL_INT;
2665 u64 flags = 0;
2666 int i;
2667
2668 for_each_set_bit(i, (unsigned long *)&init, 64) {
2669 flags |= (1ULL << i);
2670 wrmsrl(msr, flags);
2671 }
2672 }
2673 }
2674
2675 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2676 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2677 .init_box = hswep_uncore_sbox_msr_init_box
2678 };
2679
2680 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2681 &format_attr_event.attr,
2682 &format_attr_umask.attr,
2683 &format_attr_edge.attr,
2684 &format_attr_tid_en.attr,
2685 &format_attr_inv.attr,
2686 &format_attr_thresh8.attr,
2687 NULL,
2688 };
2689
2690 static const struct attribute_group hswep_uncore_sbox_format_group = {
2691 .name = "format",
2692 .attrs = hswep_uncore_sbox_formats_attr,
2693 };
2694
2695 static struct intel_uncore_type hswep_uncore_sbox = {
2696 .name = "sbox",
2697 .num_counters = 4,
2698 .num_boxes = 4,
2699 .perf_ctr_bits = 44,
2700 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
2701 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
2702 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2703 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
2704 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
2705 .ops = &hswep_uncore_sbox_msr_ops,
2706 .format_group = &hswep_uncore_sbox_format_group,
2707 };
2708
2709 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2710 {
2711 struct hw_perf_event *hwc = &event->hw;
2712 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2713 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2714
2715 if (ev_sel >= 0xb && ev_sel <= 0xe) {
2716 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2717 reg1->idx = ev_sel - 0xb;
2718 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2719 }
2720 return 0;
2721 }
2722
2723 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2724 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2725 .hw_config = hswep_pcu_hw_config,
2726 .get_constraint = snbep_pcu_get_constraint,
2727 .put_constraint = snbep_pcu_put_constraint,
2728 };
2729
2730 static struct intel_uncore_type hswep_uncore_pcu = {
2731 .name = "pcu",
2732 .num_counters = 4,
2733 .num_boxes = 1,
2734 .perf_ctr_bits = 48,
2735 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2736 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2737 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2738 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2739 .num_shared_regs = 1,
2740 .ops = &hswep_uncore_pcu_ops,
2741 .format_group = &snbep_uncore_pcu_format_group,
2742 };
2743
2744 static struct intel_uncore_type *hswep_msr_uncores[] = {
2745 &hswep_uncore_ubox,
2746 &hswep_uncore_cbox,
2747 &hswep_uncore_sbox,
2748 &hswep_uncore_pcu,
2749 NULL,
2750 };
2751
2752 #define HSWEP_PCU_DID 0x2fc0
2753 #define HSWEP_PCU_CAPID4_OFFET 0x94
2754 #define hswep_get_chop(_cap) (((_cap) >> 6) & 0x3)
2755
2756 static bool hswep_has_limit_sbox(unsigned int device)
2757 {
2758 struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
2759 u32 capid4;
2760
2761 if (!dev)
2762 return false;
2763
2764 pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
2765 if (!hswep_get_chop(capid4))
2766 return true;
2767
2768 return false;
2769 }
2770
2771 void hswep_uncore_cpu_init(void)
2772 {
2773 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2774 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2775
2776 /* Detect 6-8 core systems with only two SBOXes */
2777 if (hswep_has_limit_sbox(HSWEP_PCU_DID))
2778 hswep_uncore_sbox.num_boxes = 2;
2779
2780 uncore_msr_uncores = hswep_msr_uncores;
2781 }
2782
2783 static struct intel_uncore_type hswep_uncore_ha = {
2784 .name = "ha",
2785 .num_counters = 4,
2786 .num_boxes = 2,
2787 .perf_ctr_bits = 48,
2788 SNBEP_UNCORE_PCI_COMMON_INIT(),
2789 };
2790
2791 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2792 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
2793 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
2794 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2795 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2796 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2797 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2798 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2799 { /* end: all zeroes */ },
2800 };
2801
2802 static struct intel_uncore_type hswep_uncore_imc = {
2803 .name = "imc",
2804 .num_counters = 4,
2805 .num_boxes = 8,
2806 .perf_ctr_bits = 48,
2807 .fixed_ctr_bits = 48,
2808 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2809 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2810 .event_descs = hswep_uncore_imc_events,
2811 SNBEP_UNCORE_PCI_COMMON_INIT(),
2812 };
2813
2814 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2815
2816 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2817 {
2818 struct pci_dev *pdev = box->pci_dev;
2819 struct hw_perf_event *hwc = &event->hw;
2820 u64 count = 0;
2821
2822 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2823 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2824
2825 return count;
2826 }
2827
2828 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2829 .init_box = snbep_uncore_pci_init_box,
2830 .disable_box = snbep_uncore_pci_disable_box,
2831 .enable_box = snbep_uncore_pci_enable_box,
2832 .disable_event = ivbep_uncore_irp_disable_event,
2833 .enable_event = ivbep_uncore_irp_enable_event,
2834 .read_counter = hswep_uncore_irp_read_counter,
2835 };
2836
2837 static struct intel_uncore_type hswep_uncore_irp = {
2838 .name = "irp",
2839 .num_counters = 4,
2840 .num_boxes = 1,
2841 .perf_ctr_bits = 48,
2842 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2843 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2844 .ops = &hswep_uncore_irp_ops,
2845 .format_group = &snbep_uncore_format_group,
2846 };
2847
2848 static struct intel_uncore_type hswep_uncore_qpi = {
2849 .name = "qpi",
2850 .num_counters = 4,
2851 .num_boxes = 3,
2852 .perf_ctr_bits = 48,
2853 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2854 .event_ctl = SNBEP_PCI_PMON_CTL0,
2855 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2856 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2857 .num_shared_regs = 1,
2858 .ops = &snbep_uncore_qpi_ops,
2859 .format_group = &snbep_uncore_qpi_format_group,
2860 };
2861
2862 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2863 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2864 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2865 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2866 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2867 UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2868 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2869 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2870 UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2871 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2872 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2873 UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2874 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2875 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2876 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2877 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2878 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2879 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2880 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2881 EVENT_CONSTRAINT_END
2882 };
2883
2884 static struct intel_uncore_type hswep_uncore_r2pcie = {
2885 .name = "r2pcie",
2886 .num_counters = 4,
2887 .num_boxes = 1,
2888 .perf_ctr_bits = 48,
2889 .constraints = hswep_uncore_r2pcie_constraints,
2890 SNBEP_UNCORE_PCI_COMMON_INIT(),
2891 };
2892
2893 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2894 UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2895 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2896 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2897 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2898 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2899 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2900 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2901 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2902 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2903 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2904 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2905 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2906 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2907 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2908 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2909 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2910 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2911 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2912 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2913 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2914 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2915 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2916 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2917 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2918 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2919 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2920 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2921 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2922 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2923 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2924 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2925 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2926 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2927 EVENT_CONSTRAINT_END
2928 };
2929
2930 static struct intel_uncore_type hswep_uncore_r3qpi = {
2931 .name = "r3qpi",
2932 .num_counters = 3,
2933 .num_boxes = 3,
2934 .perf_ctr_bits = 44,
2935 .constraints = hswep_uncore_r3qpi_constraints,
2936 SNBEP_UNCORE_PCI_COMMON_INIT(),
2937 };
2938
2939 enum {
2940 HSWEP_PCI_UNCORE_HA,
2941 HSWEP_PCI_UNCORE_IMC,
2942 HSWEP_PCI_UNCORE_IRP,
2943 HSWEP_PCI_UNCORE_QPI,
2944 HSWEP_PCI_UNCORE_R2PCIE,
2945 HSWEP_PCI_UNCORE_R3QPI,
2946 };
2947
2948 static struct intel_uncore_type *hswep_pci_uncores[] = {
2949 [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha,
2950 [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc,
2951 [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp,
2952 [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi,
2953 [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie,
2954 [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi,
2955 NULL,
2956 };
2957
2958 static const struct pci_device_id hswep_uncore_pci_ids[] = {
2959 { /* Home Agent 0 */
2960 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
2961 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
2962 },
2963 { /* Home Agent 1 */
2964 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
2965 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
2966 },
2967 { /* MC0 Channel 0 */
2968 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
2969 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
2970 },
2971 { /* MC0 Channel 1 */
2972 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
2973 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
2974 },
2975 { /* MC0 Channel 2 */
2976 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
2977 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
2978 },
2979 { /* MC0 Channel 3 */
2980 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
2981 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
2982 },
2983 { /* MC1 Channel 0 */
2984 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
2985 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
2986 },
2987 { /* MC1 Channel 1 */
2988 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
2989 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
2990 },
2991 { /* MC1 Channel 2 */
2992 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
2993 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
2994 },
2995 { /* MC1 Channel 3 */
2996 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
2997 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
2998 },
2999 { /* IRP */
3000 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3001 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3002 },
3003 { /* QPI0 Port 0 */
3004 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3005 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3006 },
3007 { /* QPI0 Port 1 */
3008 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3009 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3010 },
3011 { /* QPI1 Port 2 */
3012 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3013 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3014 },
3015 { /* R2PCIe */
3016 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3017 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3018 },
3019 { /* R3QPI0 Link 0 */
3020 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3021 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3022 },
3023 { /* R3QPI0 Link 1 */
3024 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3025 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3026 },
3027 { /* R3QPI1 Link 2 */
3028 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3029 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3030 },
3031 { /* QPI Port 0 filter */
3032 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3033 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3034 SNBEP_PCI_QPI_PORT0_FILTER),
3035 },
3036 { /* QPI Port 1 filter */
3037 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3038 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3039 SNBEP_PCI_QPI_PORT1_FILTER),
3040 },
3041 { /* end: all zeroes */ }
3042 };
3043
3044 static struct pci_driver hswep_uncore_pci_driver = {
3045 .name = "hswep_uncore",
3046 .id_table = hswep_uncore_pci_ids,
3047 };
3048
3049 int hswep_uncore_pci_init(void)
3050 {
3051 int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3052 if (ret)
3053 return ret;
3054 uncore_pci_uncores = hswep_pci_uncores;
3055 uncore_pci_driver = &hswep_uncore_pci_driver;
3056 return 0;
3057 }
3058 /* end of Haswell-EP uncore support */
3059
3060 /* BDX uncore support */
3061
3062 static struct intel_uncore_type bdx_uncore_ubox = {
3063 .name = "ubox",
3064 .num_counters = 2,
3065 .num_boxes = 1,
3066 .perf_ctr_bits = 48,
3067 .fixed_ctr_bits = 48,
3068 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3069 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3070 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3071 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3072 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3073 .num_shared_regs = 1,
3074 .ops = &ivbep_uncore_msr_ops,
3075 .format_group = &ivbep_uncore_ubox_format_group,
3076 };
3077
3078 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3079 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3080 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3081 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3082 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3083 EVENT_CONSTRAINT_END
3084 };
3085
3086 static struct intel_uncore_type bdx_uncore_cbox = {
3087 .name = "cbox",
3088 .num_counters = 4,
3089 .num_boxes = 24,
3090 .perf_ctr_bits = 48,
3091 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3092 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3093 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3094 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3095 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3096 .num_shared_regs = 1,
3097 .constraints = bdx_uncore_cbox_constraints,
3098 .ops = &hswep_uncore_cbox_ops,
3099 .format_group = &hswep_uncore_cbox_format_group,
3100 };
3101
3102 static struct intel_uncore_type bdx_uncore_sbox = {
3103 .name = "sbox",
3104 .num_counters = 4,
3105 .num_boxes = 4,
3106 .perf_ctr_bits = 48,
3107 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
3108 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
3109 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3110 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
3111 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
3112 .ops = &hswep_uncore_sbox_msr_ops,
3113 .format_group = &hswep_uncore_sbox_format_group,
3114 };
3115
3116 #define BDX_MSR_UNCORE_SBOX 3
3117
3118 static struct intel_uncore_type *bdx_msr_uncores[] = {
3119 &bdx_uncore_ubox,
3120 &bdx_uncore_cbox,
3121 &hswep_uncore_pcu,
3122 &bdx_uncore_sbox,
3123 NULL,
3124 };
3125
3126 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3127 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3128 EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3129 EVENT_CONSTRAINT_END
3130 };
3131
3132 #define BDX_PCU_DID 0x6fc0
3133
3134 void bdx_uncore_cpu_init(void)
3135 {
3136 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3137 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3138 uncore_msr_uncores = bdx_msr_uncores;
3139
3140 /* Detect systems with no SBOXes */
3141 if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
3142 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3143
3144 hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3145 }
3146
3147 static struct intel_uncore_type bdx_uncore_ha = {
3148 .name = "ha",
3149 .num_counters = 4,
3150 .num_boxes = 2,
3151 .perf_ctr_bits = 48,
3152 SNBEP_UNCORE_PCI_COMMON_INIT(),
3153 };
3154
3155 static struct intel_uncore_type bdx_uncore_imc = {
3156 .name = "imc",
3157 .num_counters = 4,
3158 .num_boxes = 8,
3159 .perf_ctr_bits = 48,
3160 .fixed_ctr_bits = 48,
3161 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3162 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3163 .event_descs = hswep_uncore_imc_events,
3164 SNBEP_UNCORE_PCI_COMMON_INIT(),
3165 };
3166
3167 static struct intel_uncore_type bdx_uncore_irp = {
3168 .name = "irp",
3169 .num_counters = 4,
3170 .num_boxes = 1,
3171 .perf_ctr_bits = 48,
3172 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3173 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3174 .ops = &hswep_uncore_irp_ops,
3175 .format_group = &snbep_uncore_format_group,
3176 };
3177
3178 static struct intel_uncore_type bdx_uncore_qpi = {
3179 .name = "qpi",
3180 .num_counters = 4,
3181 .num_boxes = 3,
3182 .perf_ctr_bits = 48,
3183 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3184 .event_ctl = SNBEP_PCI_PMON_CTL0,
3185 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3186 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3187 .num_shared_regs = 1,
3188 .ops = &snbep_uncore_qpi_ops,
3189 .format_group = &snbep_uncore_qpi_format_group,
3190 };
3191
3192 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3193 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3194 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3195 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3196 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3197 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3198 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3199 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3200 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3201 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3202 EVENT_CONSTRAINT_END
3203 };
3204
3205 static struct intel_uncore_type bdx_uncore_r2pcie = {
3206 .name = "r2pcie",
3207 .num_counters = 4,
3208 .num_boxes = 1,
3209 .perf_ctr_bits = 48,
3210 .constraints = bdx_uncore_r2pcie_constraints,
3211 SNBEP_UNCORE_PCI_COMMON_INIT(),
3212 };
3213
3214 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3215 UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3216 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3217 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3218 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3219 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3220 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3221 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3222 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3223 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3224 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3225 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3226 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3227 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3228 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3229 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3230 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3231 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3232 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3233 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3234 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3235 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3236 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3237 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3238 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3239 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3240 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3241 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3242 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3243 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3244 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3245 EVENT_CONSTRAINT_END
3246 };
3247
3248 static struct intel_uncore_type bdx_uncore_r3qpi = {
3249 .name = "r3qpi",
3250 .num_counters = 3,
3251 .num_boxes = 3,
3252 .perf_ctr_bits = 48,
3253 .constraints = bdx_uncore_r3qpi_constraints,
3254 SNBEP_UNCORE_PCI_COMMON_INIT(),
3255 };
3256
3257 enum {
3258 BDX_PCI_UNCORE_HA,
3259 BDX_PCI_UNCORE_IMC,
3260 BDX_PCI_UNCORE_IRP,
3261 BDX_PCI_UNCORE_QPI,
3262 BDX_PCI_UNCORE_R2PCIE,
3263 BDX_PCI_UNCORE_R3QPI,
3264 };
3265
3266 static struct intel_uncore_type *bdx_pci_uncores[] = {
3267 [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha,
3268 [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc,
3269 [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp,
3270 [BDX_PCI_UNCORE_QPI] = &bdx_uncore_qpi,
3271 [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3272 [BDX_PCI_UNCORE_R3QPI] = &bdx_uncore_r3qpi,
3273 NULL,
3274 };
3275
3276 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3277 { /* Home Agent 0 */
3278 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3279 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3280 },
3281 { /* Home Agent 1 */
3282 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3283 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3284 },
3285 { /* MC0 Channel 0 */
3286 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3287 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3288 },
3289 { /* MC0 Channel 1 */
3290 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3291 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3292 },
3293 { /* MC0 Channel 2 */
3294 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3295 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3296 },
3297 { /* MC0 Channel 3 */
3298 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3299 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3300 },
3301 { /* MC1 Channel 0 */
3302 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3303 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3304 },
3305 { /* MC1 Channel 1 */
3306 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3307 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3308 },
3309 { /* MC1 Channel 2 */
3310 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3311 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3312 },
3313 { /* MC1 Channel 3 */
3314 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3315 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3316 },
3317 { /* IRP */
3318 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3319 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3320 },
3321 { /* QPI0 Port 0 */
3322 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3323 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3324 },
3325 { /* QPI0 Port 1 */
3326 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3327 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3328 },
3329 { /* QPI1 Port 2 */
3330 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3331 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3332 },
3333 { /* R2PCIe */
3334 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3335 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3336 },
3337 { /* R3QPI0 Link 0 */
3338 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3339 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3340 },
3341 { /* R3QPI0 Link 1 */
3342 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3343 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3344 },
3345 { /* R3QPI1 Link 2 */
3346 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3347 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3348 },
3349 { /* QPI Port 0 filter */
3350 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3351 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3352 SNBEP_PCI_QPI_PORT0_FILTER),
3353 },
3354 { /* QPI Port 1 filter */
3355 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3356 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3357 SNBEP_PCI_QPI_PORT1_FILTER),
3358 },
3359 { /* QPI Port 2 filter */
3360 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3361 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3362 BDX_PCI_QPI_PORT2_FILTER),
3363 },
3364 { /* end: all zeroes */ }
3365 };
3366
3367 static struct pci_driver bdx_uncore_pci_driver = {
3368 .name = "bdx_uncore",
3369 .id_table = bdx_uncore_pci_ids,
3370 };
3371
3372 int bdx_uncore_pci_init(void)
3373 {
3374 int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3375
3376 if (ret)
3377 return ret;
3378 uncore_pci_uncores = bdx_pci_uncores;
3379 uncore_pci_driver = &bdx_uncore_pci_driver;
3380 return 0;
3381 }
3382
3383 /* end of BDX uncore support */
3384
3385 /* SKX uncore support */
3386
3387 static struct intel_uncore_type skx_uncore_ubox = {
3388 .name = "ubox",
3389 .num_counters = 2,
3390 .num_boxes = 1,
3391 .perf_ctr_bits = 48,
3392 .fixed_ctr_bits = 48,
3393 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3394 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3395 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3396 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3397 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3398 .ops = &ivbep_uncore_msr_ops,
3399 .format_group = &ivbep_uncore_ubox_format_group,
3400 };
3401
3402 static struct attribute *skx_uncore_cha_formats_attr[] = {
3403 &format_attr_event.attr,
3404 &format_attr_umask.attr,
3405 &format_attr_edge.attr,
3406 &format_attr_tid_en.attr,
3407 &format_attr_inv.attr,
3408 &format_attr_thresh8.attr,
3409 &format_attr_filter_tid4.attr,
3410 &format_attr_filter_state5.attr,
3411 &format_attr_filter_rem.attr,
3412 &format_attr_filter_loc.attr,
3413 &format_attr_filter_nm.attr,
3414 &format_attr_filter_all_op.attr,
3415 &format_attr_filter_not_nm.attr,
3416 &format_attr_filter_opc_0.attr,
3417 &format_attr_filter_opc_1.attr,
3418 &format_attr_filter_nc.attr,
3419 &format_attr_filter_isoc.attr,
3420 NULL,
3421 };
3422
3423 static const struct attribute_group skx_uncore_chabox_format_group = {
3424 .name = "format",
3425 .attrs = skx_uncore_cha_formats_attr,
3426 };
3427
3428 static struct event_constraint skx_uncore_chabox_constraints[] = {
3429 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3430 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3431 EVENT_CONSTRAINT_END
3432 };
3433
3434 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3435 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3436 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3437 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3438 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3439 SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3440 SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3441 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3442 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3443 SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3444 EVENT_EXTRA_END
3445 };
3446
3447 static u64 skx_cha_filter_mask(int fields)
3448 {
3449 u64 mask = 0;
3450
3451 if (fields & 0x1)
3452 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3453 if (fields & 0x2)
3454 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3455 if (fields & 0x4)
3456 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3457 if (fields & 0x8) {
3458 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3459 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3460 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3461 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3462 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3463 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3464 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3465 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3466 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3467 }
3468 return mask;
3469 }
3470
3471 static struct event_constraint *
3472 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3473 {
3474 return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3475 }
3476
3477 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3478 {
3479 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3480 struct extra_reg *er;
3481 int idx = 0;
3482 /* Any of the CHA events may be filtered by Thread/Core-ID.*/
3483 if (event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN)
3484 idx = SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3485
3486 for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3487 if (er->event != (event->hw.config & er->config_mask))
3488 continue;
3489 idx |= er->idx;
3490 }
3491
3492 if (idx) {
3493 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3494 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3495 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3496 reg1->idx = idx;
3497 }
3498 return 0;
3499 }
3500
3501 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3502 /* There is no frz_en for chabox ctl */
3503 .init_box = ivbep_uncore_msr_init_box,
3504 .disable_box = snbep_uncore_msr_disable_box,
3505 .enable_box = snbep_uncore_msr_enable_box,
3506 .disable_event = snbep_uncore_msr_disable_event,
3507 .enable_event = hswep_cbox_enable_event,
3508 .read_counter = uncore_msr_read_counter,
3509 .hw_config = skx_cha_hw_config,
3510 .get_constraint = skx_cha_get_constraint,
3511 .put_constraint = snbep_cbox_put_constraint,
3512 };
3513
3514 static struct intel_uncore_type skx_uncore_chabox = {
3515 .name = "cha",
3516 .num_counters = 4,
3517 .perf_ctr_bits = 48,
3518 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3519 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3520 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3521 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3522 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3523 .num_shared_regs = 1,
3524 .constraints = skx_uncore_chabox_constraints,
3525 .ops = &skx_uncore_chabox_ops,
3526 .format_group = &skx_uncore_chabox_format_group,
3527 };
3528
3529 static struct attribute *skx_uncore_iio_formats_attr[] = {
3530 &format_attr_event.attr,
3531 &format_attr_umask.attr,
3532 &format_attr_edge.attr,
3533 &format_attr_inv.attr,
3534 &format_attr_thresh9.attr,
3535 &format_attr_ch_mask.attr,
3536 &format_attr_fc_mask.attr,
3537 NULL,
3538 };
3539
3540 static const struct attribute_group skx_uncore_iio_format_group = {
3541 .name = "format",
3542 .attrs = skx_uncore_iio_formats_attr,
3543 };
3544
3545 static struct event_constraint skx_uncore_iio_constraints[] = {
3546 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3547 UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3548 UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3549 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3550 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3551 UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3552 UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
3553 EVENT_CONSTRAINT_END
3554 };
3555
3556 static void skx_iio_enable_event(struct intel_uncore_box *box,
3557 struct perf_event *event)
3558 {
3559 struct hw_perf_event *hwc = &event->hw;
3560
3561 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3562 }
3563
3564 static struct intel_uncore_ops skx_uncore_iio_ops = {
3565 .init_box = ivbep_uncore_msr_init_box,
3566 .disable_box = snbep_uncore_msr_disable_box,
3567 .enable_box = snbep_uncore_msr_enable_box,
3568 .disable_event = snbep_uncore_msr_disable_event,
3569 .enable_event = skx_iio_enable_event,
3570 .read_counter = uncore_msr_read_counter,
3571 };
3572
3573 static struct intel_uncore_type skx_uncore_iio = {
3574 .name = "iio",
3575 .num_counters = 4,
3576 .num_boxes = 6,
3577 .perf_ctr_bits = 48,
3578 .event_ctl = SKX_IIO0_MSR_PMON_CTL0,
3579 .perf_ctr = SKX_IIO0_MSR_PMON_CTR0,
3580 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
3581 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3582 .box_ctl = SKX_IIO0_MSR_PMON_BOX_CTL,
3583 .msr_offset = SKX_IIO_MSR_OFFSET,
3584 .constraints = skx_uncore_iio_constraints,
3585 .ops = &skx_uncore_iio_ops,
3586 .format_group = &skx_uncore_iio_format_group,
3587 };
3588
3589 enum perf_uncore_iio_freerunning_type_id {
3590 SKX_IIO_MSR_IOCLK = 0,
3591 SKX_IIO_MSR_BW = 1,
3592 SKX_IIO_MSR_UTIL = 2,
3593
3594 SKX_IIO_FREERUNNING_TYPE_MAX,
3595 };
3596
3597
3598 static struct freerunning_counters skx_iio_freerunning[] = {
3599 [SKX_IIO_MSR_IOCLK] = { 0xa45, 0x1, 0x20, 1, 36 },
3600 [SKX_IIO_MSR_BW] = { 0xb00, 0x1, 0x10, 8, 36 },
3601 [SKX_IIO_MSR_UTIL] = { 0xb08, 0x1, 0x10, 8, 36 },
3602 };
3603
3604 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3605 /* Free-Running IO CLOCKS Counter */
3606 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
3607 /* Free-Running IIO BANDWIDTH Counters */
3608 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
3609 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
3610 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
3611 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
3612 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
3613 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
3614 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
3615 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
3616 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
3617 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
3618 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
3619 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
3620 INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x24"),
3621 INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"),
3622 INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"),
3623 INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x25"),
3624 INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"),
3625 INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"),
3626 INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x26"),
3627 INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"),
3628 INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"),
3629 INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x27"),
3630 INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"),
3631 INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"),
3632 /* Free-running IIO UTILIZATION Counters */
3633 INTEL_UNCORE_EVENT_DESC(util_in_port0, "event=0xff,umask=0x30"),
3634 INTEL_UNCORE_EVENT_DESC(util_out_port0, "event=0xff,umask=0x31"),
3635 INTEL_UNCORE_EVENT_DESC(util_in_port1, "event=0xff,umask=0x32"),
3636 INTEL_UNCORE_EVENT_DESC(util_out_port1, "event=0xff,umask=0x33"),
3637 INTEL_UNCORE_EVENT_DESC(util_in_port2, "event=0xff,umask=0x34"),
3638 INTEL_UNCORE_EVENT_DESC(util_out_port2, "event=0xff,umask=0x35"),
3639 INTEL_UNCORE_EVENT_DESC(util_in_port3, "event=0xff,umask=0x36"),
3640 INTEL_UNCORE_EVENT_DESC(util_out_port3, "event=0xff,umask=0x37"),
3641 { /* end: all zeroes */ },
3642 };
3643
3644 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3645 .read_counter = uncore_msr_read_counter,
3646 .hw_config = uncore_freerunning_hw_config,
3647 };
3648
3649 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3650 &format_attr_event.attr,
3651 &format_attr_umask.attr,
3652 NULL,
3653 };
3654
3655 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3656 .name = "format",
3657 .attrs = skx_uncore_iio_freerunning_formats_attr,
3658 };
3659
3660 static struct intel_uncore_type skx_uncore_iio_free_running = {
3661 .name = "iio_free_running",
3662 .num_counters = 17,
3663 .num_boxes = 6,
3664 .num_freerunning_types = SKX_IIO_FREERUNNING_TYPE_MAX,
3665 .freerunning = skx_iio_freerunning,
3666 .ops = &skx_uncore_iio_freerunning_ops,
3667 .event_descs = skx_uncore_iio_freerunning_events,
3668 .format_group = &skx_uncore_iio_freerunning_format_group,
3669 };
3670
3671 static struct attribute *skx_uncore_formats_attr[] = {
3672 &format_attr_event.attr,
3673 &format_attr_umask.attr,
3674 &format_attr_edge.attr,
3675 &format_attr_inv.attr,
3676 &format_attr_thresh8.attr,
3677 NULL,
3678 };
3679
3680 static const struct attribute_group skx_uncore_format_group = {
3681 .name = "format",
3682 .attrs = skx_uncore_formats_attr,
3683 };
3684
3685 static struct intel_uncore_type skx_uncore_irp = {
3686 .name = "irp",
3687 .num_counters = 2,
3688 .num_boxes = 6,
3689 .perf_ctr_bits = 48,
3690 .event_ctl = SKX_IRP0_MSR_PMON_CTL0,
3691 .perf_ctr = SKX_IRP0_MSR_PMON_CTR0,
3692 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3693 .box_ctl = SKX_IRP0_MSR_PMON_BOX_CTL,
3694 .msr_offset = SKX_IRP_MSR_OFFSET,
3695 .ops = &skx_uncore_iio_ops,
3696 .format_group = &skx_uncore_format_group,
3697 };
3698
3699 static struct attribute *skx_uncore_pcu_formats_attr[] = {
3700 &format_attr_event.attr,
3701 &format_attr_umask.attr,
3702 &format_attr_edge.attr,
3703 &format_attr_inv.attr,
3704 &format_attr_thresh8.attr,
3705 &format_attr_occ_invert.attr,
3706 &format_attr_occ_edge_det.attr,
3707 &format_attr_filter_band0.attr,
3708 &format_attr_filter_band1.attr,
3709 &format_attr_filter_band2.attr,
3710 &format_attr_filter_band3.attr,
3711 NULL,
3712 };
3713
3714 static struct attribute_group skx_uncore_pcu_format_group = {
3715 .name = "format",
3716 .attrs = skx_uncore_pcu_formats_attr,
3717 };
3718
3719 static struct intel_uncore_ops skx_uncore_pcu_ops = {
3720 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3721 .hw_config = hswep_pcu_hw_config,
3722 .get_constraint = snbep_pcu_get_constraint,
3723 .put_constraint = snbep_pcu_put_constraint,
3724 };
3725
3726 static struct intel_uncore_type skx_uncore_pcu = {
3727 .name = "pcu",
3728 .num_counters = 4,
3729 .num_boxes = 1,
3730 .perf_ctr_bits = 48,
3731 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
3732 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
3733 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
3734 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
3735 .num_shared_regs = 1,
3736 .ops = &skx_uncore_pcu_ops,
3737 .format_group = &skx_uncore_pcu_format_group,
3738 };
3739
3740 static struct intel_uncore_type *skx_msr_uncores[] = {
3741 &skx_uncore_ubox,
3742 &skx_uncore_chabox,
3743 &skx_uncore_iio,
3744 &skx_uncore_iio_free_running,
3745 &skx_uncore_irp,
3746 &skx_uncore_pcu,
3747 NULL,
3748 };
3749
3750 /*
3751 * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
3752 * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
3753 */
3754 #define SKX_CAPID6 0x9c
3755 #define SKX_CHA_BIT_MASK GENMASK(27, 0)
3756
3757 static int skx_count_chabox(void)
3758 {
3759 struct pci_dev *dev = NULL;
3760 u32 val = 0;
3761
3762 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
3763 if (!dev)
3764 goto out;
3765
3766 pci_read_config_dword(dev, SKX_CAPID6, &val);
3767 val &= SKX_CHA_BIT_MASK;
3768 out:
3769 pci_dev_put(dev);
3770 return hweight32(val);
3771 }
3772
3773 void skx_uncore_cpu_init(void)
3774 {
3775 skx_uncore_chabox.num_boxes = skx_count_chabox();
3776 uncore_msr_uncores = skx_msr_uncores;
3777 }
3778
3779 static struct intel_uncore_type skx_uncore_imc = {
3780 .name = "imc",
3781 .num_counters = 4,
3782 .num_boxes = 6,
3783 .perf_ctr_bits = 48,
3784 .fixed_ctr_bits = 48,
3785 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3786 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3787 .event_descs = hswep_uncore_imc_events,
3788 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3789 .event_ctl = SNBEP_PCI_PMON_CTL0,
3790 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3791 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3792 .ops = &ivbep_uncore_pci_ops,
3793 .format_group = &skx_uncore_format_group,
3794 };
3795
3796 static struct attribute *skx_upi_uncore_formats_attr[] = {
3797 &format_attr_event.attr,
3798 &format_attr_umask_ext.attr,
3799 &format_attr_edge.attr,
3800 &format_attr_inv.attr,
3801 &format_attr_thresh8.attr,
3802 NULL,
3803 };
3804
3805 static const struct attribute_group skx_upi_uncore_format_group = {
3806 .name = "format",
3807 .attrs = skx_upi_uncore_formats_attr,
3808 };
3809
3810 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
3811 {
3812 struct pci_dev *pdev = box->pci_dev;
3813
3814 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3815 pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3816 }
3817
3818 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
3819 .init_box = skx_upi_uncore_pci_init_box,
3820 .disable_box = snbep_uncore_pci_disable_box,
3821 .enable_box = snbep_uncore_pci_enable_box,
3822 .disable_event = snbep_uncore_pci_disable_event,
3823 .enable_event = snbep_uncore_pci_enable_event,
3824 .read_counter = snbep_uncore_pci_read_counter,
3825 };
3826
3827 static struct intel_uncore_type skx_uncore_upi = {
3828 .name = "upi",
3829 .num_counters = 4,
3830 .num_boxes = 3,
3831 .perf_ctr_bits = 48,
3832 .perf_ctr = SKX_UPI_PCI_PMON_CTR0,
3833 .event_ctl = SKX_UPI_PCI_PMON_CTL0,
3834 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3835 .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
3836 .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL,
3837 .ops = &skx_upi_uncore_pci_ops,
3838 .format_group = &skx_upi_uncore_format_group,
3839 };
3840
3841 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
3842 {
3843 struct pci_dev *pdev = box->pci_dev;
3844
3845 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3846 pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3847 }
3848
3849 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
3850 .init_box = skx_m2m_uncore_pci_init_box,
3851 .disable_box = snbep_uncore_pci_disable_box,
3852 .enable_box = snbep_uncore_pci_enable_box,
3853 .disable_event = snbep_uncore_pci_disable_event,
3854 .enable_event = snbep_uncore_pci_enable_event,
3855 .read_counter = snbep_uncore_pci_read_counter,
3856 };
3857
3858 static struct intel_uncore_type skx_uncore_m2m = {
3859 .name = "m2m",
3860 .num_counters = 4,
3861 .num_boxes = 2,
3862 .perf_ctr_bits = 48,
3863 .perf_ctr = SKX_M2M_PCI_PMON_CTR0,
3864 .event_ctl = SKX_M2M_PCI_PMON_CTL0,
3865 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3866 .box_ctl = SKX_M2M_PCI_PMON_BOX_CTL,
3867 .ops = &skx_m2m_uncore_pci_ops,
3868 .format_group = &skx_uncore_format_group,
3869 };
3870
3871 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
3872 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3873 EVENT_CONSTRAINT_END
3874 };
3875
3876 static struct intel_uncore_type skx_uncore_m2pcie = {
3877 .name = "m2pcie",
3878 .num_counters = 4,
3879 .num_boxes = 4,
3880 .perf_ctr_bits = 48,
3881 .constraints = skx_uncore_m2pcie_constraints,
3882 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3883 .event_ctl = SNBEP_PCI_PMON_CTL0,
3884 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3885 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3886 .ops = &ivbep_uncore_pci_ops,
3887 .format_group = &skx_uncore_format_group,
3888 };
3889
3890 static struct event_constraint skx_uncore_m3upi_constraints[] = {
3891 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
3892 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
3893 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
3894 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
3895 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
3896 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
3897 UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
3898 UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
3899 EVENT_CONSTRAINT_END
3900 };
3901
3902 static struct intel_uncore_type skx_uncore_m3upi = {
3903 .name = "m3upi",
3904 .num_counters = 3,
3905 .num_boxes = 3,
3906 .perf_ctr_bits = 48,
3907 .constraints = skx_uncore_m3upi_constraints,
3908 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3909 .event_ctl = SNBEP_PCI_PMON_CTL0,
3910 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3911 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3912 .ops = &ivbep_uncore_pci_ops,
3913 .format_group = &skx_uncore_format_group,
3914 };
3915
3916 enum {
3917 SKX_PCI_UNCORE_IMC,
3918 SKX_PCI_UNCORE_M2M,
3919 SKX_PCI_UNCORE_UPI,
3920 SKX_PCI_UNCORE_M2PCIE,
3921 SKX_PCI_UNCORE_M3UPI,
3922 };
3923
3924 static struct intel_uncore_type *skx_pci_uncores[] = {
3925 [SKX_PCI_UNCORE_IMC] = &skx_uncore_imc,
3926 [SKX_PCI_UNCORE_M2M] = &skx_uncore_m2m,
3927 [SKX_PCI_UNCORE_UPI] = &skx_uncore_upi,
3928 [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
3929 [SKX_PCI_UNCORE_M3UPI] = &skx_uncore_m3upi,
3930 NULL,
3931 };
3932
3933 static const struct pci_device_id skx_uncore_pci_ids[] = {
3934 { /* MC0 Channel 0 */
3935 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3936 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
3937 },
3938 { /* MC0 Channel 1 */
3939 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3940 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
3941 },
3942 { /* MC0 Channel 2 */
3943 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3944 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
3945 },
3946 { /* MC1 Channel 0 */
3947 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3948 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
3949 },
3950 { /* MC1 Channel 1 */
3951 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3952 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
3953 },
3954 { /* MC1 Channel 2 */
3955 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3956 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
3957 },
3958 { /* M2M0 */
3959 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3960 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
3961 },
3962 { /* M2M1 */
3963 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3964 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
3965 },
3966 { /* UPI0 Link 0 */
3967 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3968 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
3969 },
3970 { /* UPI0 Link 1 */
3971 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3972 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
3973 },
3974 { /* UPI1 Link 2 */
3975 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3976 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
3977 },
3978 { /* M2PCIe 0 */
3979 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3980 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
3981 },
3982 { /* M2PCIe 1 */
3983 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3984 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
3985 },
3986 { /* M2PCIe 2 */
3987 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3988 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
3989 },
3990 { /* M2PCIe 3 */
3991 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3992 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
3993 },
3994 { /* M3UPI0 Link 0 */
3995 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
3996 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
3997 },
3998 { /* M3UPI0 Link 1 */
3999 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4000 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4001 },
4002 { /* M3UPI1 Link 2 */
4003 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4004 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4005 },
4006 { /* end: all zeroes */ }
4007 };
4008
4009
4010 static struct pci_driver skx_uncore_pci_driver = {
4011 .name = "skx_uncore",
4012 .id_table = skx_uncore_pci_ids,
4013 };
4014
4015 int skx_uncore_pci_init(void)
4016 {
4017 /* need to double check pci address */
4018 int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4019
4020 if (ret)
4021 return ret;
4022
4023 uncore_pci_uncores = skx_pci_uncores;
4024 uncore_pci_driver = &skx_uncore_pci_driver;
4025 return 0;
4026 }
4027
4028 /* end of SKX uncore support */
4029
4030 /* SNR uncore support */
4031
4032 static struct intel_uncore_type snr_uncore_ubox = {
4033 .name = "ubox",
4034 .num_counters = 2,
4035 .num_boxes = 1,
4036 .perf_ctr_bits = 48,
4037 .fixed_ctr_bits = 48,
4038 .perf_ctr = SNR_U_MSR_PMON_CTR0,
4039 .event_ctl = SNR_U_MSR_PMON_CTL0,
4040 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4041 .fixed_ctr = SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4042 .fixed_ctl = SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4043 .ops = &ivbep_uncore_msr_ops,
4044 .format_group = &ivbep_uncore_format_group,
4045 };
4046
4047 static struct attribute *snr_uncore_cha_formats_attr[] = {
4048 &format_attr_event.attr,
4049 &format_attr_umask_ext2.attr,
4050 &format_attr_edge.attr,
4051 &format_attr_tid_en.attr,
4052 &format_attr_inv.attr,
4053 &format_attr_thresh8.attr,
4054 &format_attr_filter_tid5.attr,
4055 NULL,
4056 };
4057 static const struct attribute_group snr_uncore_chabox_format_group = {
4058 .name = "format",
4059 .attrs = snr_uncore_cha_formats_attr,
4060 };
4061
4062 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4063 {
4064 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4065
4066 reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4067 box->pmu->type->msr_offset * box->pmu->pmu_idx;
4068 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4069 reg1->idx = 0;
4070
4071 return 0;
4072 }
4073
4074 static void snr_cha_enable_event(struct intel_uncore_box *box,
4075 struct perf_event *event)
4076 {
4077 struct hw_perf_event *hwc = &event->hw;
4078 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4079
4080 if (reg1->idx != EXTRA_REG_NONE)
4081 wrmsrl(reg1->reg, reg1->config);
4082
4083 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4084 }
4085
4086 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4087 .init_box = ivbep_uncore_msr_init_box,
4088 .disable_box = snbep_uncore_msr_disable_box,
4089 .enable_box = snbep_uncore_msr_enable_box,
4090 .disable_event = snbep_uncore_msr_disable_event,
4091 .enable_event = snr_cha_enable_event,
4092 .read_counter = uncore_msr_read_counter,
4093 .hw_config = snr_cha_hw_config,
4094 };
4095
4096 static struct intel_uncore_type snr_uncore_chabox = {
4097 .name = "cha",
4098 .num_counters = 4,
4099 .num_boxes = 6,
4100 .perf_ctr_bits = 48,
4101 .event_ctl = SNR_CHA_MSR_PMON_CTL0,
4102 .perf_ctr = SNR_CHA_MSR_PMON_CTR0,
4103 .box_ctl = SNR_CHA_MSR_PMON_BOX_CTL,
4104 .msr_offset = HSWEP_CBO_MSR_OFFSET,
4105 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4106 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
4107 .ops = &snr_uncore_chabox_ops,
4108 .format_group = &snr_uncore_chabox_format_group,
4109 };
4110
4111 static struct attribute *snr_uncore_iio_formats_attr[] = {
4112 &format_attr_event.attr,
4113 &format_attr_umask.attr,
4114 &format_attr_edge.attr,
4115 &format_attr_inv.attr,
4116 &format_attr_thresh9.attr,
4117 &format_attr_ch_mask2.attr,
4118 &format_attr_fc_mask2.attr,
4119 NULL,
4120 };
4121
4122 static const struct attribute_group snr_uncore_iio_format_group = {
4123 .name = "format",
4124 .attrs = snr_uncore_iio_formats_attr,
4125 };
4126
4127 static struct intel_uncore_type snr_uncore_iio = {
4128 .name = "iio",
4129 .num_counters = 4,
4130 .num_boxes = 5,
4131 .perf_ctr_bits = 48,
4132 .event_ctl = SNR_IIO_MSR_PMON_CTL0,
4133 .perf_ctr = SNR_IIO_MSR_PMON_CTR0,
4134 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4135 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4136 .box_ctl = SNR_IIO_MSR_PMON_BOX_CTL,
4137 .msr_offset = SNR_IIO_MSR_OFFSET,
4138 .ops = &ivbep_uncore_msr_ops,
4139 .format_group = &snr_uncore_iio_format_group,
4140 };
4141
4142 static struct intel_uncore_type snr_uncore_irp = {
4143 .name = "irp",
4144 .num_counters = 2,
4145 .num_boxes = 5,
4146 .perf_ctr_bits = 48,
4147 .event_ctl = SNR_IRP0_MSR_PMON_CTL0,
4148 .perf_ctr = SNR_IRP0_MSR_PMON_CTR0,
4149 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4150 .box_ctl = SNR_IRP0_MSR_PMON_BOX_CTL,
4151 .msr_offset = SNR_IRP_MSR_OFFSET,
4152 .ops = &ivbep_uncore_msr_ops,
4153 .format_group = &ivbep_uncore_format_group,
4154 };
4155
4156 static struct intel_uncore_type snr_uncore_m2pcie = {
4157 .name = "m2pcie",
4158 .num_counters = 4,
4159 .num_boxes = 5,
4160 .perf_ctr_bits = 48,
4161 .event_ctl = SNR_M2PCIE_MSR_PMON_CTL0,
4162 .perf_ctr = SNR_M2PCIE_MSR_PMON_CTR0,
4163 .box_ctl = SNR_M2PCIE_MSR_PMON_BOX_CTL,
4164 .msr_offset = SNR_M2PCIE_MSR_OFFSET,
4165 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4166 .ops = &ivbep_uncore_msr_ops,
4167 .format_group = &ivbep_uncore_format_group,
4168 };
4169
4170 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4171 {
4172 struct hw_perf_event *hwc = &event->hw;
4173 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4174 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4175
4176 if (ev_sel >= 0xb && ev_sel <= 0xe) {
4177 reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4178 reg1->idx = ev_sel - 0xb;
4179 reg1->config = event->attr.config1 & (0xff << reg1->idx);
4180 }
4181 return 0;
4182 }
4183
4184 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4185 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4186 .hw_config = snr_pcu_hw_config,
4187 .get_constraint = snbep_pcu_get_constraint,
4188 .put_constraint = snbep_pcu_put_constraint,
4189 };
4190
4191 static struct intel_uncore_type snr_uncore_pcu = {
4192 .name = "pcu",
4193 .num_counters = 4,
4194 .num_boxes = 1,
4195 .perf_ctr_bits = 48,
4196 .perf_ctr = SNR_PCU_MSR_PMON_CTR0,
4197 .event_ctl = SNR_PCU_MSR_PMON_CTL0,
4198 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4199 .box_ctl = SNR_PCU_MSR_PMON_BOX_CTL,
4200 .num_shared_regs = 1,
4201 .ops = &snr_uncore_pcu_ops,
4202 .format_group = &skx_uncore_pcu_format_group,
4203 };
4204
4205 enum perf_uncore_snr_iio_freerunning_type_id {
4206 SNR_IIO_MSR_IOCLK,
4207 SNR_IIO_MSR_BW_IN,
4208
4209 SNR_IIO_FREERUNNING_TYPE_MAX,
4210 };
4211
4212 static struct freerunning_counters snr_iio_freerunning[] = {
4213 [SNR_IIO_MSR_IOCLK] = { 0x1eac, 0x1, 0x10, 1, 48 },
4214 [SNR_IIO_MSR_BW_IN] = { 0x1f00, 0x1, 0x10, 8, 48 },
4215 };
4216
4217 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4218 /* Free-Running IIO CLOCKS Counter */
4219 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
4220 /* Free-Running IIO BANDWIDTH IN Counters */
4221 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
4222 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
4223 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
4224 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
4225 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
4226 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
4227 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
4228 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
4229 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
4230 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
4231 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
4232 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
4233 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
4234 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
4235 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
4236 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
4237 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
4238 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
4239 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
4240 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
4241 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
4242 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
4243 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
4244 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
4245 { /* end: all zeroes */ },
4246 };
4247
4248 static struct intel_uncore_type snr_uncore_iio_free_running = {
4249 .name = "iio_free_running",
4250 .num_counters = 9,
4251 .num_boxes = 5,
4252 .num_freerunning_types = SNR_IIO_FREERUNNING_TYPE_MAX,
4253 .freerunning = snr_iio_freerunning,
4254 .ops = &skx_uncore_iio_freerunning_ops,
4255 .event_descs = snr_uncore_iio_freerunning_events,
4256 .format_group = &skx_uncore_iio_freerunning_format_group,
4257 };
4258
4259 static struct intel_uncore_type *snr_msr_uncores[] = {
4260 &snr_uncore_ubox,
4261 &snr_uncore_chabox,
4262 &snr_uncore_iio,
4263 &snr_uncore_irp,
4264 &snr_uncore_m2pcie,
4265 &snr_uncore_pcu,
4266 &snr_uncore_iio_free_running,
4267 NULL,
4268 };
4269
4270 void snr_uncore_cpu_init(void)
4271 {
4272 uncore_msr_uncores = snr_msr_uncores;
4273 }
4274
4275 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4276 {
4277 struct pci_dev *pdev = box->pci_dev;
4278 int box_ctl = uncore_pci_box_ctl(box);
4279
4280 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4281 pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4282 }
4283
4284 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4285 .init_box = snr_m2m_uncore_pci_init_box,
4286 .disable_box = snbep_uncore_pci_disable_box,
4287 .enable_box = snbep_uncore_pci_enable_box,
4288 .disable_event = snbep_uncore_pci_disable_event,
4289 .enable_event = snbep_uncore_pci_enable_event,
4290 .read_counter = snbep_uncore_pci_read_counter,
4291 };
4292
4293 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4294 &format_attr_event.attr,
4295 &format_attr_umask_ext3.attr,
4296 &format_attr_edge.attr,
4297 &format_attr_inv.attr,
4298 &format_attr_thresh8.attr,
4299 NULL,
4300 };
4301
4302 static const struct attribute_group snr_m2m_uncore_format_group = {
4303 .name = "format",
4304 .attrs = snr_m2m_uncore_formats_attr,
4305 };
4306
4307 static struct intel_uncore_type snr_uncore_m2m = {
4308 .name = "m2m",
4309 .num_counters = 4,
4310 .num_boxes = 1,
4311 .perf_ctr_bits = 48,
4312 .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
4313 .event_ctl = SNR_M2M_PCI_PMON_CTL0,
4314 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4315 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
4316 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
4317 .ops = &snr_m2m_uncore_pci_ops,
4318 .format_group = &snr_m2m_uncore_format_group,
4319 };
4320
4321 enum {
4322 SNR_PCI_UNCORE_M2M,
4323 };
4324
4325 static struct intel_uncore_type *snr_pci_uncores[] = {
4326 [SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m,
4327 NULL,
4328 };
4329
4330 static const struct pci_device_id snr_uncore_pci_ids[] = {
4331 { /* M2M */
4332 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4333 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4334 },
4335 { /* end: all zeroes */ }
4336 };
4337
4338 static struct pci_driver snr_uncore_pci_driver = {
4339 .name = "snr_uncore",
4340 .id_table = snr_uncore_pci_ids,
4341 };
4342
4343 int snr_uncore_pci_init(void)
4344 {
4345 /* SNR UBOX DID */
4346 int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4347 SKX_GIDNIDMAP, true);
4348
4349 if (ret)
4350 return ret;
4351
4352 uncore_pci_uncores = snr_pci_uncores;
4353 uncore_pci_driver = &snr_uncore_pci_driver;
4354 return 0;
4355 }
4356
4357 static struct pci_dev *snr_uncore_get_mc_dev(int id)
4358 {
4359 struct pci_dev *mc_dev = NULL;
4360 int phys_id, pkg;
4361
4362 while (1) {
4363 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
4364 if (!mc_dev)
4365 break;
4366 phys_id = uncore_pcibus_to_physid(mc_dev->bus);
4367 if (phys_id < 0)
4368 continue;
4369 pkg = topology_phys_to_logical_pkg(phys_id);
4370 if (pkg < 0)
4371 continue;
4372 else if (pkg == id)
4373 break;
4374 }
4375 return mc_dev;
4376 }
4377
4378 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4379 {
4380 struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
4381 unsigned int box_ctl = uncore_mmio_box_ctl(box);
4382 resource_size_t addr;
4383 u32 pci_dword;
4384
4385 if (!pdev)
4386 return;
4387
4388 pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4389 addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4390
4391 pci_read_config_dword(pdev, SNR_IMC_MMIO_MEM0_OFFSET, &pci_dword);
4392 addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4393
4394 addr += box_ctl;
4395
4396 box->io_addr = ioremap(addr, SNR_IMC_MMIO_SIZE);
4397 if (!box->io_addr)
4398 return;
4399
4400 writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4401 }
4402
4403 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4404 {
4405 u32 config;
4406
4407 if (!box->io_addr)
4408 return;
4409
4410 config = readl(box->io_addr);
4411 config |= SNBEP_PMON_BOX_CTL_FRZ;
4412 writel(config, box->io_addr);
4413 }
4414
4415 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4416 {
4417 u32 config;
4418
4419 if (!box->io_addr)
4420 return;
4421
4422 config = readl(box->io_addr);
4423 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4424 writel(config, box->io_addr);
4425 }
4426
4427 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4428 struct perf_event *event)
4429 {
4430 struct hw_perf_event *hwc = &event->hw;
4431
4432 if (!box->io_addr)
4433 return;
4434
4435 writel(hwc->config | SNBEP_PMON_CTL_EN,
4436 box->io_addr + hwc->config_base);
4437 }
4438
4439 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4440 struct perf_event *event)
4441 {
4442 struct hw_perf_event *hwc = &event->hw;
4443
4444 if (!box->io_addr)
4445 return;
4446
4447 writel(hwc->config, box->io_addr + hwc->config_base);
4448 }
4449
4450 static struct intel_uncore_ops snr_uncore_mmio_ops = {
4451 .init_box = snr_uncore_mmio_init_box,
4452 .exit_box = uncore_mmio_exit_box,
4453 .disable_box = snr_uncore_mmio_disable_box,
4454 .enable_box = snr_uncore_mmio_enable_box,
4455 .disable_event = snr_uncore_mmio_disable_event,
4456 .enable_event = snr_uncore_mmio_enable_event,
4457 .read_counter = uncore_mmio_read_counter,
4458 };
4459
4460 static struct uncore_event_desc snr_uncore_imc_events[] = {
4461 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
4462 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x0f"),
4463 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4464 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4465 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4466 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4467 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4468 { /* end: all zeroes */ },
4469 };
4470
4471 static struct intel_uncore_type snr_uncore_imc = {
4472 .name = "imc",
4473 .num_counters = 4,
4474 .num_boxes = 2,
4475 .perf_ctr_bits = 48,
4476 .fixed_ctr_bits = 48,
4477 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
4478 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
4479 .event_descs = snr_uncore_imc_events,
4480 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
4481 .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
4482 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4483 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
4484 .mmio_offset = SNR_IMC_MMIO_OFFSET,
4485 .ops = &snr_uncore_mmio_ops,
4486 .format_group = &skx_uncore_format_group,
4487 };
4488
4489 enum perf_uncore_snr_imc_freerunning_type_id {
4490 SNR_IMC_DCLK,
4491 SNR_IMC_DDR,
4492
4493 SNR_IMC_FREERUNNING_TYPE_MAX,
4494 };
4495
4496 static struct freerunning_counters snr_imc_freerunning[] = {
4497 [SNR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
4498 [SNR_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
4499 };
4500
4501 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
4502 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
4503
4504 INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
4505 INTEL_UNCORE_EVENT_DESC(read.scale, "3.814697266e-6"),
4506 INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
4507 INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
4508 INTEL_UNCORE_EVENT_DESC(write.scale, "3.814697266e-6"),
4509 INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
4510 { /* end: all zeroes */ },
4511 };
4512
4513 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
4514 .init_box = snr_uncore_mmio_init_box,
4515 .exit_box = uncore_mmio_exit_box,
4516 .read_counter = uncore_mmio_read_counter,
4517 .hw_config = uncore_freerunning_hw_config,
4518 };
4519
4520 static struct intel_uncore_type snr_uncore_imc_free_running = {
4521 .name = "imc_free_running",
4522 .num_counters = 3,
4523 .num_boxes = 1,
4524 .num_freerunning_types = SNR_IMC_FREERUNNING_TYPE_MAX,
4525 .freerunning = snr_imc_freerunning,
4526 .ops = &snr_uncore_imc_freerunning_ops,
4527 .event_descs = snr_uncore_imc_freerunning_events,
4528 .format_group = &skx_uncore_iio_freerunning_format_group,
4529 };
4530
4531 static struct intel_uncore_type *snr_mmio_uncores[] = {
4532 &snr_uncore_imc,
4533 &snr_uncore_imc_free_running,
4534 NULL,
4535 };
4536
4537 void snr_uncore_mmio_init(void)
4538 {
4539 uncore_mmio_uncores = snr_mmio_uncores;
4540 }
4541
4542 /* end of SNR uncore support */