]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/x86/events/intel/uncore_snbep.c
perf/x86/intel/uncore: Add PCI ID of IMC for Xeon E3 V5 Family
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / events / intel / uncore_snbep.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
8268fdfc 2/* SandyBridge-EP/IvyTown uncore support */
ed367e6c 3#include "uncore.h"
8268fdfc 4
68ce4a0d
KL
5/* SNB-EP pci bus to socket mapping */
6#define SNBEP_CPUNODEID 0x40
7#define SNBEP_GIDNIDMAP 0x54
8
8268fdfc
YZ
9/* SNB-EP Box level control */
10#define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
11#define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
12#define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
13#define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
14#define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
15 SNBEP_PMON_BOX_CTL_RST_CTRS | \
16 SNBEP_PMON_BOX_CTL_FRZ_EN)
17/* SNB-EP event control */
18#define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
19#define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
20#define SNBEP_PMON_CTL_RST (1 << 17)
21#define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
22#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
23#define SNBEP_PMON_CTL_EN (1 << 22)
24#define SNBEP_PMON_CTL_INVERT (1 << 23)
25#define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
26#define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
27 SNBEP_PMON_CTL_UMASK_MASK | \
28 SNBEP_PMON_CTL_EDGE_DET | \
29 SNBEP_PMON_CTL_INVERT | \
30 SNBEP_PMON_CTL_TRESH_MASK)
31
32/* SNB-EP Ubox event control */
33#define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
34#define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
35 (SNBEP_PMON_CTL_EV_SEL_MASK | \
36 SNBEP_PMON_CTL_UMASK_MASK | \
37 SNBEP_PMON_CTL_EDGE_DET | \
38 SNBEP_PMON_CTL_INVERT | \
39 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
40
41#define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
42#define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
43 SNBEP_CBO_PMON_CTL_TID_EN)
44
45/* SNB-EP PCU event control */
46#define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
47#define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
48#define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
49#define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
50#define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
51 (SNBEP_PMON_CTL_EV_SEL_MASK | \
52 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
53 SNBEP_PMON_CTL_EDGE_DET | \
8268fdfc
YZ
54 SNBEP_PMON_CTL_INVERT | \
55 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
56 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
57 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
58
59#define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
60 (SNBEP_PMON_RAW_EVENT_MASK | \
61 SNBEP_PMON_CTL_EV_SEL_EXT)
62
63/* SNB-EP pci control register */
64#define SNBEP_PCI_PMON_BOX_CTL 0xf4
65#define SNBEP_PCI_PMON_CTL0 0xd8
66/* SNB-EP pci counter register */
67#define SNBEP_PCI_PMON_CTR0 0xa0
68
69/* SNB-EP home agent register */
70#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
71#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
72#define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
73/* SNB-EP memory controller register */
74#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
75#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
76/* SNB-EP QPI register */
77#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
78#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
79#define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
80#define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
81
82/* SNB-EP Ubox register */
83#define SNBEP_U_MSR_PMON_CTR0 0xc16
84#define SNBEP_U_MSR_PMON_CTL0 0xc10
85
86#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
87#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
88
89/* SNB-EP Cbo register */
90#define SNBEP_C0_MSR_PMON_CTR0 0xd16
91#define SNBEP_C0_MSR_PMON_CTL0 0xd10
92#define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
93#define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
94#define SNBEP_CBO_MSR_OFFSET 0x20
95
96#define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
97#define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
98#define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
99#define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
100
101#define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
102 .event = (e), \
103 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
104 .config_mask = (m), \
105 .idx = (i) \
106}
107
108/* SNB-EP PCU register */
109#define SNBEP_PCU_MSR_PMON_CTR0 0xc36
110#define SNBEP_PCU_MSR_PMON_CTL0 0xc30
111#define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
112#define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
113#define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
114#define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
115#define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
116
ddcd0973
PZ
117/* IVBEP event control */
118#define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
8268fdfc 119 SNBEP_PMON_BOX_CTL_RST_CTRS)
ddcd0973 120#define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
8268fdfc
YZ
121 SNBEP_PMON_CTL_UMASK_MASK | \
122 SNBEP_PMON_CTL_EDGE_DET | \
123 SNBEP_PMON_CTL_TRESH_MASK)
ddcd0973
PZ
124/* IVBEP Ubox */
125#define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00
126#define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
127#define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
8268fdfc 128
ddcd0973 129#define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
8268fdfc
YZ
130 (SNBEP_PMON_CTL_EV_SEL_MASK | \
131 SNBEP_PMON_CTL_UMASK_MASK | \
132 SNBEP_PMON_CTL_EDGE_DET | \
133 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
ddcd0973
PZ
134/* IVBEP Cbo */
135#define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \
8268fdfc
YZ
136 SNBEP_CBO_PMON_CTL_TID_EN)
137
ddcd0973
PZ
138#define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
139#define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
140#define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
141#define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
142#define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
143#define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
144#define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
7e96ae1a 145#define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
ddcd0973
PZ
146
147/* IVBEP home agent */
148#define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
149#define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \
150 (IVBEP_PMON_RAW_EVENT_MASK | \
151 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
152/* IVBEP PCU */
153#define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
8268fdfc 154 (SNBEP_PMON_CTL_EV_SEL_MASK | \
8268fdfc
YZ
155 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
156 SNBEP_PMON_CTL_EDGE_DET | \
157 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
158 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
159 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
ddcd0973
PZ
160/* IVBEP QPI */
161#define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
162 (IVBEP_PMON_RAW_EVENT_MASK | \
8268fdfc
YZ
163 SNBEP_PMON_CTL_EV_SEL_EXT)
164
165#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
166 ((1ULL << (n)) - 1)))
167
e735b9db 168/* Haswell-EP Ubox */
8cf1a3de
KL
169#define HSWEP_U_MSR_PMON_CTR0 0x709
170#define HSWEP_U_MSR_PMON_CTL0 0x705
e735b9db
YZ
171#define HSWEP_U_MSR_PMON_FILTER 0x707
172
173#define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703
174#define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704
175
176#define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0)
177#define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1)
178#define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
179 (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
180 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
181
182/* Haswell-EP CBo */
183#define HSWEP_C0_MSR_PMON_CTR0 0xe08
184#define HSWEP_C0_MSR_PMON_CTL0 0xe01
185#define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00
186#define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05
187#define HSWEP_CBO_MSR_OFFSET 0x10
188
189
190#define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0)
191#define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6)
192#define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17)
193#define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
194#define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
195#define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
196#define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
197#define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
198
199
200/* Haswell-EP Sbox */
201#define HSWEP_S0_MSR_PMON_CTR0 0x726
202#define HSWEP_S0_MSR_PMON_CTL0 0x721
203#define HSWEP_S0_MSR_PMON_BOX_CTL 0x720
204#define HSWEP_SBOX_MSR_OFFSET 0xa
205#define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
206 SNBEP_CBO_PMON_CTL_TID_EN)
207
208/* Haswell-EP PCU */
209#define HSWEP_PCU_MSR_PMON_CTR0 0x717
210#define HSWEP_PCU_MSR_PMON_CTL0 0x711
211#define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710
212#define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715
213
77af0037
HC
214/* KNL Ubox */
215#define KNL_U_MSR_PMON_RAW_EVENT_MASK \
216 (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
217 SNBEP_CBO_PMON_CTL_TID_EN)
218/* KNL CHA */
219#define KNL_CHA_MSR_OFFSET 0xc
220#define KNL_CHA_MSR_PMON_CTL_QOR (1 << 16)
221#define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
222 (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
223 KNL_CHA_MSR_PMON_CTL_QOR)
224#define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff
225#define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18)
226#define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32)
ec336c87 227#define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
228#define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33)
229#define KNL_CHA_MSR_PMON_BOX_FILTER_NNC (0x1ULL << 37)
77af0037
HC
230
231/* KNL EDC/MC UCLK */
232#define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400
233#define KNL_UCLK_MSR_PMON_CTL0 0x420
234#define KNL_UCLK_MSR_PMON_BOX_CTL 0x430
235#define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW 0x44c
236#define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL 0x454
237#define KNL_PMON_FIXED_CTL_EN 0x1
238
239/* KNL EDC */
240#define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW 0xa00
241#define KNL_EDC0_ECLK_MSR_PMON_CTL0 0xa20
242#define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL 0xa30
243#define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW 0xa3c
244#define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL 0xa44
245
246/* KNL MC */
247#define KNL_MC0_CH0_MSR_PMON_CTR0_LOW 0xb00
248#define KNL_MC0_CH0_MSR_PMON_CTL0 0xb20
249#define KNL_MC0_CH0_MSR_PMON_BOX_CTL 0xb30
250#define KNL_MC0_CH0_MSR_PMON_FIXED_LOW 0xb3c
251#define KNL_MC0_CH0_MSR_PMON_FIXED_CTL 0xb44
252
253/* KNL IRP */
254#define KNL_IRP_PCI_PMON_BOX_CTL 0xf0
255#define KNL_IRP_PCI_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
256 KNL_CHA_MSR_PMON_CTL_QOR)
257/* KNL PCU */
258#define KNL_PCU_PMON_CTL_EV_SEL_MASK 0x0000007f
259#define KNL_PCU_PMON_CTL_USE_OCC_CTR (1 << 7)
260#define KNL_PCU_MSR_PMON_CTL_TRESH_MASK 0x3f000000
261#define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
262 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
263 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
264 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
265 SNBEP_PMON_CTL_EDGE_DET | \
266 SNBEP_CBO_PMON_CTL_TID_EN | \
77af0037
HC
267 SNBEP_PMON_CTL_INVERT | \
268 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
269 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
270 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
e735b9db 271
cd34cd97
KL
272/* SKX pci bus to socket mapping */
273#define SKX_CPUNODEID 0xc0
274#define SKX_GIDNIDMAP 0xd4
275
276/* SKX CHA */
277#define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0)
278#define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9)
279#define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17)
280#define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32)
281#define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33)
282#define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35)
283#define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36)
284#define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37)
285#define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41)
286#define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51)
287#define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
288#define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
289#define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
290
291/* SKX IIO */
292#define SKX_IIO0_MSR_PMON_CTL0 0xa48
293#define SKX_IIO0_MSR_PMON_CTR0 0xa41
294#define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40
295#define SKX_IIO_MSR_OFFSET 0x20
296
297#define SKX_PMON_CTL_TRESH_MASK (0xff << 24)
298#define SKX_PMON_CTL_TRESH_MASK_EXT (0xf)
299#define SKX_PMON_CTL_CH_MASK (0xff << 4)
300#define SKX_PMON_CTL_FC_MASK (0x7 << 12)
301#define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
302 SNBEP_PMON_CTL_UMASK_MASK | \
303 SNBEP_PMON_CTL_EDGE_DET | \
304 SNBEP_PMON_CTL_INVERT | \
305 SKX_PMON_CTL_TRESH_MASK)
306#define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
307 SKX_PMON_CTL_CH_MASK | \
308 SKX_PMON_CTL_FC_MASK)
309
310/* SKX IRP */
311#define SKX_IRP0_MSR_PMON_CTL0 0xa5b
312#define SKX_IRP0_MSR_PMON_CTR0 0xa59
313#define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58
314#define SKX_IRP_MSR_OFFSET 0x20
315
316/* SKX UPI */
317#define SKX_UPI_PCI_PMON_CTL0 0x350
318#define SKX_UPI_PCI_PMON_CTR0 0x318
319#define SKX_UPI_PCI_PMON_BOX_CTL 0x378
b3625980 320#define SKX_UPI_CTL_UMASK_EXT 0xffefff
cd34cd97
KL
321
322/* SKX M2M */
323#define SKX_M2M_PCI_PMON_CTL0 0x228
324#define SKX_M2M_PCI_PMON_CTR0 0x200
325#define SKX_M2M_PCI_PMON_BOX_CTL 0x258
326
210cc5f9
KL
327/* SNR Ubox */
328#define SNR_U_MSR_PMON_CTR0 0x1f98
329#define SNR_U_MSR_PMON_CTL0 0x1f91
330#define SNR_U_MSR_PMON_UCLK_FIXED_CTL 0x1f93
331#define SNR_U_MSR_PMON_UCLK_FIXED_CTR 0x1f94
332
333/* SNR CHA */
334#define SNR_CHA_RAW_EVENT_MASK_EXT 0x3ffffff
335#define SNR_CHA_MSR_PMON_CTL0 0x1c01
336#define SNR_CHA_MSR_PMON_CTR0 0x1c08
337#define SNR_CHA_MSR_PMON_BOX_CTL 0x1c00
338#define SNR_C0_MSR_PMON_BOX_FILTER0 0x1c05
339
340
341/* SNR IIO */
342#define SNR_IIO_MSR_PMON_CTL0 0x1e08
343#define SNR_IIO_MSR_PMON_CTR0 0x1e01
344#define SNR_IIO_MSR_PMON_BOX_CTL 0x1e00
345#define SNR_IIO_MSR_OFFSET 0x10
346#define SNR_IIO_PMON_RAW_EVENT_MASK_EXT 0x7ffff
347
348/* SNR IRP */
349#define SNR_IRP0_MSR_PMON_CTL0 0x1ea8
350#define SNR_IRP0_MSR_PMON_CTR0 0x1ea1
351#define SNR_IRP0_MSR_PMON_BOX_CTL 0x1ea0
352#define SNR_IRP_MSR_OFFSET 0x10
353
354/* SNR M2PCIE */
355#define SNR_M2PCIE_MSR_PMON_CTL0 0x1e58
356#define SNR_M2PCIE_MSR_PMON_CTR0 0x1e51
357#define SNR_M2PCIE_MSR_PMON_BOX_CTL 0x1e50
358#define SNR_M2PCIE_MSR_OFFSET 0x10
359
360/* SNR PCU */
361#define SNR_PCU_MSR_PMON_CTL0 0x1ef1
362#define SNR_PCU_MSR_PMON_CTR0 0x1ef8
363#define SNR_PCU_MSR_PMON_BOX_CTL 0x1ef0
364#define SNR_PCU_MSR_PMON_BOX_FILTER 0x1efc
365
366/* SNR M2M */
367#define SNR_M2M_PCI_PMON_CTL0 0x468
368#define SNR_M2M_PCI_PMON_CTR0 0x440
369#define SNR_M2M_PCI_PMON_BOX_CTL 0x438
370#define SNR_M2M_PCI_PMON_UMASK_EXT 0xff
371
372/* SNR PCIE3 */
373#define SNR_PCIE3_PCI_PMON_CTL0 0x508
374#define SNR_PCIE3_PCI_PMON_CTR0 0x4e8
375#define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e4
376
ee49532b
KL
377/* SNR IMC */
378#define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54
379#define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38
380#define SNR_IMC_MMIO_PMON_CTL0 0x40
381#define SNR_IMC_MMIO_PMON_CTR0 0x8
382#define SNR_IMC_MMIO_PMON_BOX_CTL 0x22800
383#define SNR_IMC_MMIO_OFFSET 0x4000
384#define SNR_IMC_MMIO_SIZE 0x4000
385#define SNR_IMC_MMIO_BASE_OFFSET 0xd0
386#define SNR_IMC_MMIO_BASE_MASK 0x1FFFFFFF
387#define SNR_IMC_MMIO_MEM0_OFFSET 0xd8
388#define SNR_IMC_MMIO_MEM0_MASK 0x7FF
389
8268fdfc 390DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
77af0037 391DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
8268fdfc 392DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
77af0037 393DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
8268fdfc 394DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
b3625980 395DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
210cc5f9
KL
396DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
397DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
77af0037 398DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
8268fdfc
YZ
399DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
400DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
401DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
cd34cd97 402DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
8268fdfc 403DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
77af0037 404DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
8268fdfc
YZ
405DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
406DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
407DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
408DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
77af0037 409DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
cd34cd97 410DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
210cc5f9 411DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
cd34cd97 412DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
210cc5f9 413DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
8268fdfc 414DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
e735b9db
YZ
415DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
416DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
77af0037 417DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
210cc5f9 418DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
e735b9db 419DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
8268fdfc 420DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
e735b9db 421DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
77af0037 422DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
8268fdfc
YZ
423DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
424DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
425DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
426DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
e735b9db 427DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
77af0037 428DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
cd34cd97
KL
429DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
430DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
431DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
432DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
433DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
77af0037
HC
434DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
435DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
436DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
8268fdfc
YZ
437DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
438DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
77af0037 439DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
cd34cd97
KL
440DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
441DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
e735b9db
YZ
442DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
443DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
444DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
8268fdfc
YZ
445DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
446DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
447DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
448DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
449DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
450DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
451DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
452DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
453DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
454DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
455DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
456DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
457DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
458DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
459DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
460DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
461DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
462DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
463DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
464DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
465DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
466DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
467
468static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
469{
470 struct pci_dev *pdev = box->pci_dev;
471 int box_ctl = uncore_pci_box_ctl(box);
472 u32 config = 0;
473
474 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
475 config |= SNBEP_PMON_BOX_CTL_FRZ;
476 pci_write_config_dword(pdev, box_ctl, config);
477 }
478}
479
480static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
481{
482 struct pci_dev *pdev = box->pci_dev;
483 int box_ctl = uncore_pci_box_ctl(box);
484 u32 config = 0;
485
486 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
487 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
488 pci_write_config_dword(pdev, box_ctl, config);
489 }
490}
491
492static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
493{
494 struct pci_dev *pdev = box->pci_dev;
495 struct hw_perf_event *hwc = &event->hw;
496
497 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
498}
499
500static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
501{
502 struct pci_dev *pdev = box->pci_dev;
503 struct hw_perf_event *hwc = &event->hw;
504
505 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
506}
507
508static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
509{
510 struct pci_dev *pdev = box->pci_dev;
511 struct hw_perf_event *hwc = &event->hw;
512 u64 count = 0;
513
514 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
515 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
516
517 return count;
518}
519
520static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
521{
522 struct pci_dev *pdev = box->pci_dev;
dae25530 523 int box_ctl = uncore_pci_box_ctl(box);
8268fdfc 524
dae25530 525 pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
8268fdfc
YZ
526}
527
528static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
529{
530 u64 config;
531 unsigned msr;
532
533 msr = uncore_msr_box_ctl(box);
534 if (msr) {
535 rdmsrl(msr, config);
536 config |= SNBEP_PMON_BOX_CTL_FRZ;
537 wrmsrl(msr, config);
538 }
539}
540
541static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
542{
543 u64 config;
544 unsigned msr;
545
546 msr = uncore_msr_box_ctl(box);
547 if (msr) {
548 rdmsrl(msr, config);
549 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
550 wrmsrl(msr, config);
551 }
552}
553
554static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
555{
556 struct hw_perf_event *hwc = &event->hw;
557 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
558
559 if (reg1->idx != EXTRA_REG_NONE)
560 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
561
562 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
563}
564
565static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
566 struct perf_event *event)
567{
568 struct hw_perf_event *hwc = &event->hw;
569
570 wrmsrl(hwc->config_base, hwc->config);
571}
572
573static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
574{
575 unsigned msr = uncore_msr_box_ctl(box);
576
577 if (msr)
578 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
579}
580
581static struct attribute *snbep_uncore_formats_attr[] = {
582 &format_attr_event.attr,
583 &format_attr_umask.attr,
584 &format_attr_edge.attr,
585 &format_attr_inv.attr,
586 &format_attr_thresh8.attr,
587 NULL,
588};
589
590static struct attribute *snbep_uncore_ubox_formats_attr[] = {
591 &format_attr_event.attr,
592 &format_attr_umask.attr,
593 &format_attr_edge.attr,
594 &format_attr_inv.attr,
595 &format_attr_thresh5.attr,
596 NULL,
597};
598
599static struct attribute *snbep_uncore_cbox_formats_attr[] = {
600 &format_attr_event.attr,
601 &format_attr_umask.attr,
602 &format_attr_edge.attr,
603 &format_attr_tid_en.attr,
604 &format_attr_inv.attr,
605 &format_attr_thresh8.attr,
606 &format_attr_filter_tid.attr,
607 &format_attr_filter_nid.attr,
608 &format_attr_filter_state.attr,
609 &format_attr_filter_opc.attr,
610 NULL,
611};
612
613static struct attribute *snbep_uncore_pcu_formats_attr[] = {
cb225252 614 &format_attr_event.attr,
8268fdfc
YZ
615 &format_attr_occ_sel.attr,
616 &format_attr_edge.attr,
617 &format_attr_inv.attr,
618 &format_attr_thresh5.attr,
619 &format_attr_occ_invert.attr,
620 &format_attr_occ_edge.attr,
621 &format_attr_filter_band0.attr,
622 &format_attr_filter_band1.attr,
623 &format_attr_filter_band2.attr,
624 &format_attr_filter_band3.attr,
625 NULL,
626};
627
628static struct attribute *snbep_uncore_qpi_formats_attr[] = {
629 &format_attr_event_ext.attr,
630 &format_attr_umask.attr,
631 &format_attr_edge.attr,
632 &format_attr_inv.attr,
633 &format_attr_thresh8.attr,
634 &format_attr_match_rds.attr,
635 &format_attr_match_rnid30.attr,
636 &format_attr_match_rnid4.attr,
637 &format_attr_match_dnid.attr,
638 &format_attr_match_mc.attr,
639 &format_attr_match_opc.attr,
640 &format_attr_match_vnw.attr,
641 &format_attr_match0.attr,
642 &format_attr_match1.attr,
643 &format_attr_mask_rds.attr,
644 &format_attr_mask_rnid30.attr,
645 &format_attr_mask_rnid4.attr,
646 &format_attr_mask_dnid.attr,
647 &format_attr_mask_mc.attr,
648 &format_attr_mask_opc.attr,
649 &format_attr_mask_vnw.attr,
650 &format_attr_mask0.attr,
651 &format_attr_mask1.attr,
652 NULL,
653};
654
655static struct uncore_event_desc snbep_uncore_imc_events[] = {
656 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
657 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
c0737ce4
AK
658 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
659 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
8268fdfc 660 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
c0737ce4
AK
661 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
662 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
8268fdfc
YZ
663 { /* end: all zeroes */ },
664};
665
666static struct uncore_event_desc snbep_uncore_qpi_events[] = {
667 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
668 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
669 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
670 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
671 { /* end: all zeroes */ },
672};
673
45bd07ad 674static const struct attribute_group snbep_uncore_format_group = {
8268fdfc
YZ
675 .name = "format",
676 .attrs = snbep_uncore_formats_attr,
677};
678
45bd07ad 679static const struct attribute_group snbep_uncore_ubox_format_group = {
8268fdfc
YZ
680 .name = "format",
681 .attrs = snbep_uncore_ubox_formats_attr,
682};
683
45bd07ad 684static const struct attribute_group snbep_uncore_cbox_format_group = {
8268fdfc
YZ
685 .name = "format",
686 .attrs = snbep_uncore_cbox_formats_attr,
687};
688
45bd07ad 689static const struct attribute_group snbep_uncore_pcu_format_group = {
8268fdfc
YZ
690 .name = "format",
691 .attrs = snbep_uncore_pcu_formats_attr,
692};
693
45bd07ad 694static const struct attribute_group snbep_uncore_qpi_format_group = {
8268fdfc
YZ
695 .name = "format",
696 .attrs = snbep_uncore_qpi_formats_attr,
697};
698
68055915 699#define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
8268fdfc
YZ
700 .disable_box = snbep_uncore_msr_disable_box, \
701 .enable_box = snbep_uncore_msr_enable_box, \
702 .disable_event = snbep_uncore_msr_disable_event, \
703 .enable_event = snbep_uncore_msr_enable_event, \
704 .read_counter = uncore_msr_read_counter
705
68055915
AK
706#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
707 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
708 .init_box = snbep_uncore_msr_init_box \
709
8268fdfc
YZ
710static struct intel_uncore_ops snbep_uncore_msr_ops = {
711 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
712};
713
714#define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
715 .init_box = snbep_uncore_pci_init_box, \
716 .disable_box = snbep_uncore_pci_disable_box, \
717 .enable_box = snbep_uncore_pci_enable_box, \
718 .disable_event = snbep_uncore_pci_disable_event, \
719 .read_counter = snbep_uncore_pci_read_counter
720
721static struct intel_uncore_ops snbep_uncore_pci_ops = {
722 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
723 .enable_event = snbep_uncore_pci_enable_event, \
724};
725
726static struct event_constraint snbep_uncore_cbox_constraints[] = {
727 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
728 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
729 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
730 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
731 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
732 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
733 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
734 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
735 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
736 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
737 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
738 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
739 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
1134c2b5 740 UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
8268fdfc
YZ
741 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
742 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
743 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
744 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
745 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
746 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
747 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
748 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
749 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
750 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
751 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
752 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
753 EVENT_CONSTRAINT_END
754};
755
756static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
757 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
758 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
759 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
760 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
761 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
762 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
763 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
764 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
765 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
766 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
767 EVENT_CONSTRAINT_END
768};
769
770static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
771 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
772 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
773 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
774 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
775 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
776 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
777 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
778 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
779 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
780 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
781 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
782 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
783 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
784 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
785 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
786 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
787 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
788 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
789 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
790 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
791 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
792 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
793 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
794 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
795 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
796 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
797 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
798 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
799 EVENT_CONSTRAINT_END
800};
801
802static struct intel_uncore_type snbep_uncore_ubox = {
803 .name = "ubox",
804 .num_counters = 2,
805 .num_boxes = 1,
806 .perf_ctr_bits = 44,
807 .fixed_ctr_bits = 48,
808 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
809 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
810 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
811 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
812 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
813 .ops = &snbep_uncore_msr_ops,
814 .format_group = &snbep_uncore_ubox_format_group,
815};
816
817static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
818 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
819 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
820 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
821 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
822 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
823 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
824 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
825 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
826 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
827 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
828 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
829 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
830 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
831 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
832 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
833 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
834 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
835 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
836 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
837 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
838 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
839 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
840 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
841 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
842 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
843 EVENT_EXTRA_END
844};
845
846static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
847{
848 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
849 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
850 int i;
851
852 if (uncore_box_is_fake(box))
853 return;
854
855 for (i = 0; i < 5; i++) {
856 if (reg1->alloc & (0x1 << i))
857 atomic_sub(1 << (i * 6), &er->ref);
858 }
859 reg1->alloc = 0;
860}
861
862static struct event_constraint *
863__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
864 u64 (*cbox_filter_mask)(int fields))
865{
866 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
867 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
868 int i, alloc = 0;
869 unsigned long flags;
870 u64 mask;
871
872 if (reg1->idx == EXTRA_REG_NONE)
873 return NULL;
874
875 raw_spin_lock_irqsave(&er->lock, flags);
876 for (i = 0; i < 5; i++) {
877 if (!(reg1->idx & (0x1 << i)))
878 continue;
879 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
880 continue;
881
882 mask = cbox_filter_mask(0x1 << i);
883 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
884 !((reg1->config ^ er->config) & mask)) {
885 atomic_add(1 << (i * 6), &er->ref);
886 er->config &= ~mask;
887 er->config |= reg1->config & mask;
888 alloc |= (0x1 << i);
889 } else {
890 break;
891 }
892 }
893 raw_spin_unlock_irqrestore(&er->lock, flags);
894 if (i < 5)
895 goto fail;
896
897 if (!uncore_box_is_fake(box))
898 reg1->alloc |= alloc;
899
900 return NULL;
901fail:
902 for (; i >= 0; i--) {
903 if (alloc & (0x1 << i))
904 atomic_sub(1 << (i * 6), &er->ref);
905 }
906 return &uncore_constraint_empty;
907}
908
909static u64 snbep_cbox_filter_mask(int fields)
910{
911 u64 mask = 0;
912
913 if (fields & 0x1)
914 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
915 if (fields & 0x2)
916 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
917 if (fields & 0x4)
918 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
919 if (fields & 0x8)
920 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
921
922 return mask;
923}
924
925static struct event_constraint *
926snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
927{
928 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
929}
930
931static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
932{
933 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
934 struct extra_reg *er;
935 int idx = 0;
936
937 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
938 if (er->event != (event->hw.config & er->config_mask))
939 continue;
940 idx |= er->idx;
941 }
942
943 if (idx) {
944 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
945 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
946 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
947 reg1->idx = idx;
948 }
949 return 0;
950}
951
952static struct intel_uncore_ops snbep_uncore_cbox_ops = {
953 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
954 .hw_config = snbep_cbox_hw_config,
955 .get_constraint = snbep_cbox_get_constraint,
956 .put_constraint = snbep_cbox_put_constraint,
957};
958
959static struct intel_uncore_type snbep_uncore_cbox = {
960 .name = "cbox",
961 .num_counters = 4,
962 .num_boxes = 8,
963 .perf_ctr_bits = 44,
964 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
965 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
966 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
967 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
968 .msr_offset = SNBEP_CBO_MSR_OFFSET,
969 .num_shared_regs = 1,
970 .constraints = snbep_uncore_cbox_constraints,
971 .ops = &snbep_uncore_cbox_ops,
972 .format_group = &snbep_uncore_cbox_format_group,
973};
974
975static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
976{
977 struct hw_perf_event *hwc = &event->hw;
978 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
979 u64 config = reg1->config;
980
981 if (new_idx > reg1->idx)
982 config <<= 8 * (new_idx - reg1->idx);
983 else
984 config >>= 8 * (reg1->idx - new_idx);
985
986 if (modify) {
987 hwc->config += new_idx - reg1->idx;
988 reg1->config = config;
989 reg1->idx = new_idx;
990 }
991 return config;
992}
993
994static struct event_constraint *
995snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
996{
997 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
998 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
999 unsigned long flags;
1000 int idx = reg1->idx;
1001 u64 mask, config1 = reg1->config;
1002 bool ok = false;
1003
1004 if (reg1->idx == EXTRA_REG_NONE ||
1005 (!uncore_box_is_fake(box) && reg1->alloc))
1006 return NULL;
1007again:
1008 mask = 0xffULL << (idx * 8);
1009 raw_spin_lock_irqsave(&er->lock, flags);
1010 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1011 !((config1 ^ er->config) & mask)) {
1012 atomic_add(1 << (idx * 8), &er->ref);
1013 er->config &= ~mask;
1014 er->config |= config1 & mask;
1015 ok = true;
1016 }
1017 raw_spin_unlock_irqrestore(&er->lock, flags);
1018
1019 if (!ok) {
1020 idx = (idx + 1) % 4;
1021 if (idx != reg1->idx) {
1022 config1 = snbep_pcu_alter_er(event, idx, false);
1023 goto again;
1024 }
1025 return &uncore_constraint_empty;
1026 }
1027
1028 if (!uncore_box_is_fake(box)) {
1029 if (idx != reg1->idx)
1030 snbep_pcu_alter_er(event, idx, true);
1031 reg1->alloc = 1;
1032 }
1033 return NULL;
1034}
1035
1036static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1037{
1038 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1039 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1040
1041 if (uncore_box_is_fake(box) || !reg1->alloc)
1042 return;
1043
1044 atomic_sub(1 << (reg1->idx * 8), &er->ref);
1045 reg1->alloc = 0;
1046}
1047
1048static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1049{
1050 struct hw_perf_event *hwc = &event->hw;
1051 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1052 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1053
1054 if (ev_sel >= 0xb && ev_sel <= 0xe) {
1055 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1056 reg1->idx = ev_sel - 0xb;
b10fc1c3 1057 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
8268fdfc
YZ
1058 }
1059 return 0;
1060}
1061
1062static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1063 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1064 .hw_config = snbep_pcu_hw_config,
1065 .get_constraint = snbep_pcu_get_constraint,
1066 .put_constraint = snbep_pcu_put_constraint,
1067};
1068
1069static struct intel_uncore_type snbep_uncore_pcu = {
1070 .name = "pcu",
1071 .num_counters = 4,
1072 .num_boxes = 1,
1073 .perf_ctr_bits = 48,
1074 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1075 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1076 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1077 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1078 .num_shared_regs = 1,
1079 .ops = &snbep_uncore_pcu_ops,
1080 .format_group = &snbep_uncore_pcu_format_group,
1081};
1082
1083static struct intel_uncore_type *snbep_msr_uncores[] = {
1084 &snbep_uncore_ubox,
1085 &snbep_uncore_cbox,
1086 &snbep_uncore_pcu,
1087 NULL,
1088};
1089
1090void snbep_uncore_cpu_init(void)
1091{
1092 if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1093 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1094 uncore_msr_uncores = snbep_msr_uncores;
1095}
1096
1097enum {
1098 SNBEP_PCI_QPI_PORT0_FILTER,
1099 SNBEP_PCI_QPI_PORT1_FILTER,
156c8b58 1100 BDX_PCI_QPI_PORT2_FILTER,
5306c31c 1101 HSWEP_PCI_PCU_3,
8268fdfc
YZ
1102};
1103
1104static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1105{
1106 struct hw_perf_event *hwc = &event->hw;
1107 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1108 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1109
1110 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1111 reg1->idx = 0;
1112 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1113 reg1->config = event->attr.config1;
1114 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1115 reg2->config = event->attr.config2;
1116 }
1117 return 0;
1118}
1119
1120static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1121{
1122 struct pci_dev *pdev = box->pci_dev;
1123 struct hw_perf_event *hwc = &event->hw;
1124 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1125 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1126
1127 if (reg1->idx != EXTRA_REG_NONE) {
1128 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
b0529b9c
KL
1129 int die = box->dieid;
1130 struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
cf6d445f 1131
8268fdfc
YZ
1132 if (filter_pdev) {
1133 pci_write_config_dword(filter_pdev, reg1->reg,
1134 (u32)reg1->config);
1135 pci_write_config_dword(filter_pdev, reg1->reg + 4,
1136 (u32)(reg1->config >> 32));
1137 pci_write_config_dword(filter_pdev, reg2->reg,
1138 (u32)reg2->config);
1139 pci_write_config_dword(filter_pdev, reg2->reg + 4,
1140 (u32)(reg2->config >> 32));
1141 }
1142 }
1143
1144 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1145}
1146
1147static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1148 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1149 .enable_event = snbep_qpi_enable_event,
1150 .hw_config = snbep_qpi_hw_config,
1151 .get_constraint = uncore_get_constraint,
1152 .put_constraint = uncore_put_constraint,
1153};
1154
1155#define SNBEP_UNCORE_PCI_COMMON_INIT() \
1156 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1157 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1158 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
1159 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1160 .ops = &snbep_uncore_pci_ops, \
1161 .format_group = &snbep_uncore_format_group
1162
1163static struct intel_uncore_type snbep_uncore_ha = {
1164 .name = "ha",
1165 .num_counters = 4,
1166 .num_boxes = 1,
1167 .perf_ctr_bits = 48,
1168 SNBEP_UNCORE_PCI_COMMON_INIT(),
1169};
1170
1171static struct intel_uncore_type snbep_uncore_imc = {
1172 .name = "imc",
1173 .num_counters = 4,
1174 .num_boxes = 4,
1175 .perf_ctr_bits = 48,
1176 .fixed_ctr_bits = 48,
1177 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1178 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1179 .event_descs = snbep_uncore_imc_events,
1180 SNBEP_UNCORE_PCI_COMMON_INIT(),
1181};
1182
1183static struct intel_uncore_type snbep_uncore_qpi = {
1184 .name = "qpi",
1185 .num_counters = 4,
1186 .num_boxes = 2,
1187 .perf_ctr_bits = 48,
1188 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1189 .event_ctl = SNBEP_PCI_PMON_CTL0,
1190 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1191 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1192 .num_shared_regs = 1,
1193 .ops = &snbep_uncore_qpi_ops,
1194 .event_descs = snbep_uncore_qpi_events,
1195 .format_group = &snbep_uncore_qpi_format_group,
1196};
1197
1198
1199static struct intel_uncore_type snbep_uncore_r2pcie = {
1200 .name = "r2pcie",
1201 .num_counters = 4,
1202 .num_boxes = 1,
1203 .perf_ctr_bits = 44,
1204 .constraints = snbep_uncore_r2pcie_constraints,
1205 SNBEP_UNCORE_PCI_COMMON_INIT(),
1206};
1207
1208static struct intel_uncore_type snbep_uncore_r3qpi = {
1209 .name = "r3qpi",
1210 .num_counters = 3,
1211 .num_boxes = 2,
1212 .perf_ctr_bits = 44,
1213 .constraints = snbep_uncore_r3qpi_constraints,
1214 SNBEP_UNCORE_PCI_COMMON_INIT(),
1215};
1216
1217enum {
1218 SNBEP_PCI_UNCORE_HA,
1219 SNBEP_PCI_UNCORE_IMC,
1220 SNBEP_PCI_UNCORE_QPI,
1221 SNBEP_PCI_UNCORE_R2PCIE,
1222 SNBEP_PCI_UNCORE_R3QPI,
1223};
1224
1225static struct intel_uncore_type *snbep_pci_uncores[] = {
1226 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
1227 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
1228 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
1229 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
1230 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
1231 NULL,
1232};
1233
83bc90e1 1234static const struct pci_device_id snbep_uncore_pci_ids[] = {
8268fdfc
YZ
1235 { /* Home Agent */
1236 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1237 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1238 },
1239 { /* MC Channel 0 */
1240 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1241 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1242 },
1243 { /* MC Channel 1 */
1244 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1245 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1246 },
1247 { /* MC Channel 2 */
1248 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1249 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1250 },
1251 { /* MC Channel 3 */
1252 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1253 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1254 },
1255 { /* QPI Port 0 */
1256 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1257 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1258 },
1259 { /* QPI Port 1 */
1260 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1261 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1262 },
1263 { /* R2PCIe */
1264 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1265 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1266 },
1267 { /* R3QPI Link 0 */
1268 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1269 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1270 },
1271 { /* R3QPI Link 1 */
1272 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1273 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1274 },
1275 { /* QPI Port 0 filter */
1276 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1277 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1278 SNBEP_PCI_QPI_PORT0_FILTER),
1279 },
1280 { /* QPI Port 0 filter */
1281 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1282 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1283 SNBEP_PCI_QPI_PORT1_FILTER),
1284 },
1285 { /* end: all zeroes */ }
1286};
1287
1288static struct pci_driver snbep_uncore_pci_driver = {
1289 .name = "snbep_uncore",
1290 .id_table = snbep_uncore_pci_ids,
1291};
1292
9e63a789
KL
1293#define NODE_ID_MASK 0x7
1294
8268fdfc
YZ
1295/*
1296 * build pci bus to socket mapping
1297 */
68ce4a0d 1298static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
8268fdfc
YZ
1299{
1300 struct pci_dev *ubox_dev = NULL;
712df65c
TI
1301 int i, bus, nodeid, segment;
1302 struct pci2phy_map *map;
8268fdfc
YZ
1303 int err = 0;
1304 u32 config = 0;
1305
1306 while (1) {
1307 /* find the UBOX device */
1308 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1309 if (!ubox_dev)
1310 break;
1311 bus = ubox_dev->bus->number;
1312 /* get the Node ID of the local register */
68ce4a0d 1313 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
8268fdfc
YZ
1314 if (err)
1315 break;
9e63a789 1316 nodeid = config & NODE_ID_MASK;
8268fdfc 1317 /* get the Node ID mapping */
68ce4a0d 1318 err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
8268fdfc
YZ
1319 if (err)
1320 break;
712df65c
TI
1321
1322 segment = pci_domain_nr(ubox_dev->bus);
1323 raw_spin_lock(&pci2phy_map_lock);
1324 map = __find_pci2phy_map(segment);
1325 if (!map) {
1326 raw_spin_unlock(&pci2phy_map_lock);
1327 err = -ENOMEM;
1328 break;
1329 }
1330
8268fdfc
YZ
1331 /*
1332 * every three bits in the Node ID mapping register maps
1333 * to a particular node.
1334 */
1335 for (i = 0; i < 8; i++) {
1336 if (nodeid == ((config >> (3 * i)) & 0x7)) {
712df65c 1337 map->pbus_to_physid[bus] = i;
8268fdfc
YZ
1338 break;
1339 }
1340 }
712df65c 1341 raw_spin_unlock(&pci2phy_map_lock);
8268fdfc
YZ
1342 }
1343
1344 if (!err) {
1345 /*
1346 * For PCI bus with no UBOX device, find the next bus
1347 * that has UBOX device and use its mapping.
1348 */
712df65c
TI
1349 raw_spin_lock(&pci2phy_map_lock);
1350 list_for_each_entry(map, &pci2phy_map_head, list) {
1351 i = -1;
68ce4a0d
KL
1352 if (reverse) {
1353 for (bus = 255; bus >= 0; bus--) {
1354 if (map->pbus_to_physid[bus] >= 0)
1355 i = map->pbus_to_physid[bus];
1356 else
1357 map->pbus_to_physid[bus] = i;
1358 }
1359 } else {
1360 for (bus = 0; bus <= 255; bus++) {
1361 if (map->pbus_to_physid[bus] >= 0)
1362 i = map->pbus_to_physid[bus];
1363 else
1364 map->pbus_to_physid[bus] = i;
1365 }
712df65c 1366 }
8268fdfc 1367 }
712df65c 1368 raw_spin_unlock(&pci2phy_map_lock);
8268fdfc
YZ
1369 }
1370
8e57c586 1371 pci_dev_put(ubox_dev);
8268fdfc
YZ
1372
1373 return err ? pcibios_err_to_errno(err) : 0;
1374}
1375
1376int snbep_uncore_pci_init(void)
1377{
68ce4a0d 1378 int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
8268fdfc
YZ
1379 if (ret)
1380 return ret;
1381 uncore_pci_uncores = snbep_pci_uncores;
1382 uncore_pci_driver = &snbep_uncore_pci_driver;
1383 return 0;
1384}
1385/* end of Sandy Bridge-EP uncore support */
1386
1387/* IvyTown uncore support */
ddcd0973 1388static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
8268fdfc
YZ
1389{
1390 unsigned msr = uncore_msr_box_ctl(box);
1391 if (msr)
ddcd0973 1392 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
8268fdfc
YZ
1393}
1394
ddcd0973 1395static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
8268fdfc
YZ
1396{
1397 struct pci_dev *pdev = box->pci_dev;
1398
ddcd0973 1399 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
8268fdfc
YZ
1400}
1401
ddcd0973
PZ
1402#define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \
1403 .init_box = ivbep_uncore_msr_init_box, \
8268fdfc
YZ
1404 .disable_box = snbep_uncore_msr_disable_box, \
1405 .enable_box = snbep_uncore_msr_enable_box, \
1406 .disable_event = snbep_uncore_msr_disable_event, \
1407 .enable_event = snbep_uncore_msr_enable_event, \
1408 .read_counter = uncore_msr_read_counter
1409
ddcd0973
PZ
1410static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1411 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
8268fdfc
YZ
1412};
1413
ddcd0973
PZ
1414static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1415 .init_box = ivbep_uncore_pci_init_box,
8268fdfc
YZ
1416 .disable_box = snbep_uncore_pci_disable_box,
1417 .enable_box = snbep_uncore_pci_enable_box,
1418 .disable_event = snbep_uncore_pci_disable_event,
1419 .enable_event = snbep_uncore_pci_enable_event,
1420 .read_counter = snbep_uncore_pci_read_counter,
1421};
1422
ddcd0973 1423#define IVBEP_UNCORE_PCI_COMMON_INIT() \
8268fdfc
YZ
1424 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1425 .event_ctl = SNBEP_PCI_PMON_CTL0, \
ddcd0973 1426 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \
8268fdfc 1427 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
ddcd0973
PZ
1428 .ops = &ivbep_uncore_pci_ops, \
1429 .format_group = &ivbep_uncore_format_group
8268fdfc 1430
ddcd0973 1431static struct attribute *ivbep_uncore_formats_attr[] = {
8268fdfc
YZ
1432 &format_attr_event.attr,
1433 &format_attr_umask.attr,
1434 &format_attr_edge.attr,
1435 &format_attr_inv.attr,
1436 &format_attr_thresh8.attr,
1437 NULL,
1438};
1439
ddcd0973 1440static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
8268fdfc
YZ
1441 &format_attr_event.attr,
1442 &format_attr_umask.attr,
1443 &format_attr_edge.attr,
1444 &format_attr_inv.attr,
1445 &format_attr_thresh5.attr,
1446 NULL,
1447};
1448
ddcd0973 1449static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
8268fdfc
YZ
1450 &format_attr_event.attr,
1451 &format_attr_umask.attr,
1452 &format_attr_edge.attr,
1453 &format_attr_tid_en.attr,
1454 &format_attr_thresh8.attr,
1455 &format_attr_filter_tid.attr,
1456 &format_attr_filter_link.attr,
1457 &format_attr_filter_state2.attr,
1458 &format_attr_filter_nid2.attr,
1459 &format_attr_filter_opc2.attr,
7e96ae1a
AK
1460 &format_attr_filter_nc.attr,
1461 &format_attr_filter_c6.attr,
1462 &format_attr_filter_isoc.attr,
8268fdfc
YZ
1463 NULL,
1464};
1465
ddcd0973 1466static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
cb225252 1467 &format_attr_event.attr,
8268fdfc
YZ
1468 &format_attr_occ_sel.attr,
1469 &format_attr_edge.attr,
1470 &format_attr_thresh5.attr,
1471 &format_attr_occ_invert.attr,
1472 &format_attr_occ_edge.attr,
1473 &format_attr_filter_band0.attr,
1474 &format_attr_filter_band1.attr,
1475 &format_attr_filter_band2.attr,
1476 &format_attr_filter_band3.attr,
1477 NULL,
1478};
1479
ddcd0973 1480static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
8268fdfc
YZ
1481 &format_attr_event_ext.attr,
1482 &format_attr_umask.attr,
1483 &format_attr_edge.attr,
1484 &format_attr_thresh8.attr,
1485 &format_attr_match_rds.attr,
1486 &format_attr_match_rnid30.attr,
1487 &format_attr_match_rnid4.attr,
1488 &format_attr_match_dnid.attr,
1489 &format_attr_match_mc.attr,
1490 &format_attr_match_opc.attr,
1491 &format_attr_match_vnw.attr,
1492 &format_attr_match0.attr,
1493 &format_attr_match1.attr,
1494 &format_attr_mask_rds.attr,
1495 &format_attr_mask_rnid30.attr,
1496 &format_attr_mask_rnid4.attr,
1497 &format_attr_mask_dnid.attr,
1498 &format_attr_mask_mc.attr,
1499 &format_attr_mask_opc.attr,
1500 &format_attr_mask_vnw.attr,
1501 &format_attr_mask0.attr,
1502 &format_attr_mask1.attr,
1503 NULL,
1504};
1505
45bd07ad 1506static const struct attribute_group ivbep_uncore_format_group = {
8268fdfc 1507 .name = "format",
ddcd0973 1508 .attrs = ivbep_uncore_formats_attr,
8268fdfc
YZ
1509};
1510
45bd07ad 1511static const struct attribute_group ivbep_uncore_ubox_format_group = {
8268fdfc 1512 .name = "format",
ddcd0973 1513 .attrs = ivbep_uncore_ubox_formats_attr,
8268fdfc
YZ
1514};
1515
45bd07ad 1516static const struct attribute_group ivbep_uncore_cbox_format_group = {
8268fdfc 1517 .name = "format",
ddcd0973 1518 .attrs = ivbep_uncore_cbox_formats_attr,
8268fdfc
YZ
1519};
1520
45bd07ad 1521static const struct attribute_group ivbep_uncore_pcu_format_group = {
8268fdfc 1522 .name = "format",
ddcd0973 1523 .attrs = ivbep_uncore_pcu_formats_attr,
8268fdfc
YZ
1524};
1525
45bd07ad 1526static const struct attribute_group ivbep_uncore_qpi_format_group = {
8268fdfc 1527 .name = "format",
ddcd0973 1528 .attrs = ivbep_uncore_qpi_formats_attr,
8268fdfc
YZ
1529};
1530
ddcd0973 1531static struct intel_uncore_type ivbep_uncore_ubox = {
8268fdfc
YZ
1532 .name = "ubox",
1533 .num_counters = 2,
1534 .num_boxes = 1,
1535 .perf_ctr_bits = 44,
1536 .fixed_ctr_bits = 48,
1537 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1538 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
ddcd0973 1539 .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
8268fdfc
YZ
1540 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1541 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
ddcd0973
PZ
1542 .ops = &ivbep_uncore_msr_ops,
1543 .format_group = &ivbep_uncore_ubox_format_group,
8268fdfc
YZ
1544};
1545
ddcd0973 1546static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
8268fdfc
YZ
1547 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1548 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1549 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1550 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1551 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1552 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1553 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1554 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1555 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1556 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1557 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1558 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1559 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1560 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1561 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1562 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1563 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1564 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1565 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1566 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1567 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1568 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1569 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1570 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1571 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1572 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1573 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1574 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1575 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1576 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1577 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1578 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1579 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1580 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1581 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1582 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1583 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1584 EVENT_EXTRA_END
1585};
1586
ddcd0973 1587static u64 ivbep_cbox_filter_mask(int fields)
8268fdfc
YZ
1588{
1589 u64 mask = 0;
1590
1591 if (fields & 0x1)
ddcd0973 1592 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
8268fdfc 1593 if (fields & 0x2)
ddcd0973 1594 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
8268fdfc 1595 if (fields & 0x4)
ddcd0973 1596 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
8268fdfc 1597 if (fields & 0x8)
ddcd0973 1598 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
7e96ae1a 1599 if (fields & 0x10) {
ddcd0973 1600 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
7e96ae1a
AK
1601 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1602 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1603 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1604 }
8268fdfc
YZ
1605
1606 return mask;
1607}
1608
1609static struct event_constraint *
ddcd0973 1610ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
8268fdfc 1611{
ddcd0973 1612 return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
8268fdfc
YZ
1613}
1614
ddcd0973 1615static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
8268fdfc
YZ
1616{
1617 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1618 struct extra_reg *er;
1619 int idx = 0;
1620
ddcd0973 1621 for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
8268fdfc
YZ
1622 if (er->event != (event->hw.config & er->config_mask))
1623 continue;
1624 idx |= er->idx;
1625 }
1626
1627 if (idx) {
1628 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1629 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
ddcd0973 1630 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
8268fdfc
YZ
1631 reg1->idx = idx;
1632 }
1633 return 0;
1634}
1635
ddcd0973 1636static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
8268fdfc
YZ
1637{
1638 struct hw_perf_event *hwc = &event->hw;
1639 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1640
1641 if (reg1->idx != EXTRA_REG_NONE) {
1642 u64 filter = uncore_shared_reg_config(box, 0);
1643 wrmsrl(reg1->reg, filter & 0xffffffff);
1644 wrmsrl(reg1->reg + 6, filter >> 32);
1645 }
1646
1647 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1648}
1649
ddcd0973
PZ
1650static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1651 .init_box = ivbep_uncore_msr_init_box,
8268fdfc
YZ
1652 .disable_box = snbep_uncore_msr_disable_box,
1653 .enable_box = snbep_uncore_msr_enable_box,
1654 .disable_event = snbep_uncore_msr_disable_event,
ddcd0973 1655 .enable_event = ivbep_cbox_enable_event,
8268fdfc 1656 .read_counter = uncore_msr_read_counter,
ddcd0973
PZ
1657 .hw_config = ivbep_cbox_hw_config,
1658 .get_constraint = ivbep_cbox_get_constraint,
8268fdfc
YZ
1659 .put_constraint = snbep_cbox_put_constraint,
1660};
1661
ddcd0973 1662static struct intel_uncore_type ivbep_uncore_cbox = {
8268fdfc
YZ
1663 .name = "cbox",
1664 .num_counters = 4,
1665 .num_boxes = 15,
1666 .perf_ctr_bits = 44,
1667 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1668 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
ddcd0973 1669 .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
8268fdfc
YZ
1670 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1671 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1672 .num_shared_regs = 1,
1673 .constraints = snbep_uncore_cbox_constraints,
ddcd0973
PZ
1674 .ops = &ivbep_uncore_cbox_ops,
1675 .format_group = &ivbep_uncore_cbox_format_group,
8268fdfc
YZ
1676};
1677
ddcd0973
PZ
1678static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1679 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
8268fdfc
YZ
1680 .hw_config = snbep_pcu_hw_config,
1681 .get_constraint = snbep_pcu_get_constraint,
1682 .put_constraint = snbep_pcu_put_constraint,
1683};
1684
ddcd0973 1685static struct intel_uncore_type ivbep_uncore_pcu = {
8268fdfc
YZ
1686 .name = "pcu",
1687 .num_counters = 4,
1688 .num_boxes = 1,
1689 .perf_ctr_bits = 48,
1690 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1691 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
ddcd0973 1692 .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
8268fdfc
YZ
1693 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1694 .num_shared_regs = 1,
ddcd0973
PZ
1695 .ops = &ivbep_uncore_pcu_ops,
1696 .format_group = &ivbep_uncore_pcu_format_group,
8268fdfc
YZ
1697};
1698
ddcd0973
PZ
1699static struct intel_uncore_type *ivbep_msr_uncores[] = {
1700 &ivbep_uncore_ubox,
1701 &ivbep_uncore_cbox,
1702 &ivbep_uncore_pcu,
8268fdfc
YZ
1703 NULL,
1704};
1705
ddcd0973 1706void ivbep_uncore_cpu_init(void)
8268fdfc 1707{
ddcd0973
PZ
1708 if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1709 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1710 uncore_msr_uncores = ivbep_msr_uncores;
8268fdfc
YZ
1711}
1712
ddcd0973 1713static struct intel_uncore_type ivbep_uncore_ha = {
8268fdfc
YZ
1714 .name = "ha",
1715 .num_counters = 4,
1716 .num_boxes = 2,
1717 .perf_ctr_bits = 48,
ddcd0973 1718 IVBEP_UNCORE_PCI_COMMON_INIT(),
8268fdfc
YZ
1719};
1720
ddcd0973 1721static struct intel_uncore_type ivbep_uncore_imc = {
8268fdfc
YZ
1722 .name = "imc",
1723 .num_counters = 4,
1724 .num_boxes = 8,
1725 .perf_ctr_bits = 48,
1726 .fixed_ctr_bits = 48,
1727 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1728 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
85a16ef6 1729 .event_descs = snbep_uncore_imc_events,
ddcd0973 1730 IVBEP_UNCORE_PCI_COMMON_INIT(),
8268fdfc
YZ
1731};
1732
1733/* registers in IRP boxes are not properly aligned */
ddcd0973
PZ
1734static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1735static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
8268fdfc 1736
ddcd0973 1737static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
8268fdfc
YZ
1738{
1739 struct pci_dev *pdev = box->pci_dev;
1740 struct hw_perf_event *hwc = &event->hw;
1741
ddcd0973 1742 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
8268fdfc
YZ
1743 hwc->config | SNBEP_PMON_CTL_EN);
1744}
1745
ddcd0973 1746static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
8268fdfc
YZ
1747{
1748 struct pci_dev *pdev = box->pci_dev;
1749 struct hw_perf_event *hwc = &event->hw;
1750
ddcd0973 1751 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
8268fdfc
YZ
1752}
1753
ddcd0973 1754static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
8268fdfc
YZ
1755{
1756 struct pci_dev *pdev = box->pci_dev;
1757 struct hw_perf_event *hwc = &event->hw;
1758 u64 count = 0;
1759
ddcd0973
PZ
1760 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1761 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
8268fdfc
YZ
1762
1763 return count;
1764}
1765
ddcd0973
PZ
1766static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1767 .init_box = ivbep_uncore_pci_init_box,
8268fdfc
YZ
1768 .disable_box = snbep_uncore_pci_disable_box,
1769 .enable_box = snbep_uncore_pci_enable_box,
ddcd0973
PZ
1770 .disable_event = ivbep_uncore_irp_disable_event,
1771 .enable_event = ivbep_uncore_irp_enable_event,
1772 .read_counter = ivbep_uncore_irp_read_counter,
8268fdfc
YZ
1773};
1774
ddcd0973 1775static struct intel_uncore_type ivbep_uncore_irp = {
8268fdfc
YZ
1776 .name = "irp",
1777 .num_counters = 4,
1778 .num_boxes = 1,
1779 .perf_ctr_bits = 48,
ddcd0973 1780 .event_mask = IVBEP_PMON_RAW_EVENT_MASK,
8268fdfc 1781 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
ddcd0973
PZ
1782 .ops = &ivbep_uncore_irp_ops,
1783 .format_group = &ivbep_uncore_format_group,
8268fdfc
YZ
1784};
1785
ddcd0973
PZ
1786static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1787 .init_box = ivbep_uncore_pci_init_box,
8268fdfc
YZ
1788 .disable_box = snbep_uncore_pci_disable_box,
1789 .enable_box = snbep_uncore_pci_enable_box,
1790 .disable_event = snbep_uncore_pci_disable_event,
1791 .enable_event = snbep_qpi_enable_event,
1792 .read_counter = snbep_uncore_pci_read_counter,
1793 .hw_config = snbep_qpi_hw_config,
1794 .get_constraint = uncore_get_constraint,
1795 .put_constraint = uncore_put_constraint,
1796};
1797
ddcd0973 1798static struct intel_uncore_type ivbep_uncore_qpi = {
8268fdfc
YZ
1799 .name = "qpi",
1800 .num_counters = 4,
1801 .num_boxes = 3,
1802 .perf_ctr_bits = 48,
1803 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1804 .event_ctl = SNBEP_PCI_PMON_CTL0,
ddcd0973 1805 .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
8268fdfc
YZ
1806 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1807 .num_shared_regs = 1,
ddcd0973
PZ
1808 .ops = &ivbep_uncore_qpi_ops,
1809 .format_group = &ivbep_uncore_qpi_format_group,
8268fdfc
YZ
1810};
1811
ddcd0973 1812static struct intel_uncore_type ivbep_uncore_r2pcie = {
8268fdfc
YZ
1813 .name = "r2pcie",
1814 .num_counters = 4,
1815 .num_boxes = 1,
1816 .perf_ctr_bits = 44,
1817 .constraints = snbep_uncore_r2pcie_constraints,
ddcd0973 1818 IVBEP_UNCORE_PCI_COMMON_INIT(),
8268fdfc
YZ
1819};
1820
ddcd0973 1821static struct intel_uncore_type ivbep_uncore_r3qpi = {
8268fdfc
YZ
1822 .name = "r3qpi",
1823 .num_counters = 3,
1824 .num_boxes = 2,
1825 .perf_ctr_bits = 44,
1826 .constraints = snbep_uncore_r3qpi_constraints,
ddcd0973 1827 IVBEP_UNCORE_PCI_COMMON_INIT(),
8268fdfc
YZ
1828};
1829
1830enum {
ddcd0973
PZ
1831 IVBEP_PCI_UNCORE_HA,
1832 IVBEP_PCI_UNCORE_IMC,
1833 IVBEP_PCI_UNCORE_IRP,
1834 IVBEP_PCI_UNCORE_QPI,
1835 IVBEP_PCI_UNCORE_R2PCIE,
1836 IVBEP_PCI_UNCORE_R3QPI,
1837};
1838
1839static struct intel_uncore_type *ivbep_pci_uncores[] = {
1840 [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha,
1841 [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc,
1842 [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp,
1843 [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi,
1844 [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie,
1845 [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi,
8268fdfc
YZ
1846 NULL,
1847};
1848
83bc90e1 1849static const struct pci_device_id ivbep_uncore_pci_ids[] = {
8268fdfc
YZ
1850 { /* Home Agent 0 */
1851 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
ddcd0973 1852 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
8268fdfc
YZ
1853 },
1854 { /* Home Agent 1 */
1855 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
ddcd0973 1856 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
8268fdfc
YZ
1857 },
1858 { /* MC0 Channel 0 */
1859 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
ddcd0973 1860 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
8268fdfc
YZ
1861 },
1862 { /* MC0 Channel 1 */
1863 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
ddcd0973 1864 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
8268fdfc
YZ
1865 },
1866 { /* MC0 Channel 3 */
1867 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
ddcd0973 1868 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
8268fdfc
YZ
1869 },
1870 { /* MC0 Channel 4 */
1871 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
ddcd0973 1872 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
8268fdfc
YZ
1873 },
1874 { /* MC1 Channel 0 */
1875 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
ddcd0973 1876 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
8268fdfc
YZ
1877 },
1878 { /* MC1 Channel 1 */
1879 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
ddcd0973 1880 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
8268fdfc
YZ
1881 },
1882 { /* MC1 Channel 3 */
1883 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
ddcd0973 1884 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
8268fdfc
YZ
1885 },
1886 { /* MC1 Channel 4 */
1887 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
ddcd0973 1888 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
8268fdfc
YZ
1889 },
1890 { /* IRP */
1891 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
ddcd0973 1892 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
8268fdfc
YZ
1893 },
1894 { /* QPI0 Port 0 */
1895 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
ddcd0973 1896 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
8268fdfc
YZ
1897 },
1898 { /* QPI0 Port 1 */
1899 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
ddcd0973 1900 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
8268fdfc
YZ
1901 },
1902 { /* QPI1 Port 2 */
1903 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
ddcd0973 1904 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
8268fdfc
YZ
1905 },
1906 { /* R2PCIe */
1907 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
ddcd0973 1908 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
8268fdfc
YZ
1909 },
1910 { /* R3QPI0 Link 0 */
1911 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
ddcd0973 1912 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
8268fdfc
YZ
1913 },
1914 { /* R3QPI0 Link 1 */
1915 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
ddcd0973 1916 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
8268fdfc
YZ
1917 },
1918 { /* R3QPI1 Link 2 */
1919 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
ddcd0973 1920 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
8268fdfc
YZ
1921 },
1922 { /* QPI Port 0 filter */
1923 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1924 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1925 SNBEP_PCI_QPI_PORT0_FILTER),
1926 },
1927 { /* QPI Port 0 filter */
1928 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1929 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1930 SNBEP_PCI_QPI_PORT1_FILTER),
1931 },
1932 { /* end: all zeroes */ }
1933};
1934
ddcd0973
PZ
1935static struct pci_driver ivbep_uncore_pci_driver = {
1936 .name = "ivbep_uncore",
1937 .id_table = ivbep_uncore_pci_ids,
8268fdfc
YZ
1938};
1939
ddcd0973 1940int ivbep_uncore_pci_init(void)
8268fdfc 1941{
68ce4a0d 1942 int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
8268fdfc
YZ
1943 if (ret)
1944 return ret;
ddcd0973
PZ
1945 uncore_pci_uncores = ivbep_pci_uncores;
1946 uncore_pci_driver = &ivbep_uncore_pci_driver;
8268fdfc
YZ
1947 return 0;
1948}
1949/* end of IvyTown uncore support */
e735b9db 1950
77af0037
HC
1951/* KNL uncore support */
1952static struct attribute *knl_uncore_ubox_formats_attr[] = {
1953 &format_attr_event.attr,
1954 &format_attr_umask.attr,
1955 &format_attr_edge.attr,
1956 &format_attr_tid_en.attr,
1957 &format_attr_inv.attr,
1958 &format_attr_thresh5.attr,
1959 NULL,
1960};
1961
45bd07ad 1962static const struct attribute_group knl_uncore_ubox_format_group = {
77af0037
HC
1963 .name = "format",
1964 .attrs = knl_uncore_ubox_formats_attr,
1965};
1966
1967static struct intel_uncore_type knl_uncore_ubox = {
1968 .name = "ubox",
1969 .num_counters = 2,
1970 .num_boxes = 1,
1971 .perf_ctr_bits = 48,
1972 .fixed_ctr_bits = 48,
1973 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
1974 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
1975 .event_mask = KNL_U_MSR_PMON_RAW_EVENT_MASK,
1976 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
1977 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
1978 .ops = &snbep_uncore_msr_ops,
1979 .format_group = &knl_uncore_ubox_format_group,
1980};
1981
1982static struct attribute *knl_uncore_cha_formats_attr[] = {
1983 &format_attr_event.attr,
1984 &format_attr_umask.attr,
1985 &format_attr_qor.attr,
1986 &format_attr_edge.attr,
1987 &format_attr_tid_en.attr,
1988 &format_attr_inv.attr,
1989 &format_attr_thresh8.attr,
1990 &format_attr_filter_tid4.attr,
1991 &format_attr_filter_link3.attr,
1992 &format_attr_filter_state4.attr,
1993 &format_attr_filter_local.attr,
1994 &format_attr_filter_all_op.attr,
1995 &format_attr_filter_nnm.attr,
1996 &format_attr_filter_opc3.attr,
1997 &format_attr_filter_nc.attr,
1998 &format_attr_filter_isoc.attr,
1999 NULL,
2000};
2001
45bd07ad 2002static const struct attribute_group knl_uncore_cha_format_group = {
77af0037
HC
2003 .name = "format",
2004 .attrs = knl_uncore_cha_formats_attr,
2005};
2006
2007static struct event_constraint knl_uncore_cha_constraints[] = {
2008 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2009 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2010 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2011 EVENT_CONSTRAINT_END
2012};
2013
2014static struct extra_reg knl_uncore_cha_extra_regs[] = {
2015 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2016 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2017 SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2018 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2019 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2020 EVENT_EXTRA_END
2021};
2022
2023static u64 knl_cha_filter_mask(int fields)
2024{
2025 u64 mask = 0;
2026
2027 if (fields & 0x1)
2028 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2029 if (fields & 0x2)
2030 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2031 if (fields & 0x4)
2032 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2033 return mask;
2034}
2035
2036static struct event_constraint *
2037knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2038{
2039 return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2040}
2041
2042static int knl_cha_hw_config(struct intel_uncore_box *box,
2043 struct perf_event *event)
2044{
2045 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2046 struct extra_reg *er;
2047 int idx = 0;
2048
2049 for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2050 if (er->event != (event->hw.config & er->config_mask))
2051 continue;
2052 idx |= er->idx;
2053 }
2054
2055 if (idx) {
2056 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2057 KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2058 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
ec336c87 2059
2060 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2061 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2062 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
77af0037
HC
2063 reg1->idx = idx;
2064 }
2065 return 0;
2066}
2067
2068static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2069 struct perf_event *event);
2070
2071static struct intel_uncore_ops knl_uncore_cha_ops = {
2072 .init_box = snbep_uncore_msr_init_box,
2073 .disable_box = snbep_uncore_msr_disable_box,
2074 .enable_box = snbep_uncore_msr_enable_box,
2075 .disable_event = snbep_uncore_msr_disable_event,
2076 .enable_event = hswep_cbox_enable_event,
2077 .read_counter = uncore_msr_read_counter,
2078 .hw_config = knl_cha_hw_config,
2079 .get_constraint = knl_cha_get_constraint,
2080 .put_constraint = snbep_cbox_put_constraint,
2081};
2082
2083static struct intel_uncore_type knl_uncore_cha = {
2084 .name = "cha",
2085 .num_counters = 4,
2086 .num_boxes = 38,
2087 .perf_ctr_bits = 48,
2088 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2089 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2090 .event_mask = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2091 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2092 .msr_offset = KNL_CHA_MSR_OFFSET,
2093 .num_shared_regs = 1,
2094 .constraints = knl_uncore_cha_constraints,
2095 .ops = &knl_uncore_cha_ops,
2096 .format_group = &knl_uncore_cha_format_group,
2097};
2098
2099static struct attribute *knl_uncore_pcu_formats_attr[] = {
2100 &format_attr_event2.attr,
2101 &format_attr_use_occ_ctr.attr,
2102 &format_attr_occ_sel.attr,
2103 &format_attr_edge.attr,
2104 &format_attr_tid_en.attr,
2105 &format_attr_inv.attr,
2106 &format_attr_thresh6.attr,
2107 &format_attr_occ_invert.attr,
2108 &format_attr_occ_edge_det.attr,
2109 NULL,
2110};
2111
45bd07ad 2112static const struct attribute_group knl_uncore_pcu_format_group = {
77af0037
HC
2113 .name = "format",
2114 .attrs = knl_uncore_pcu_formats_attr,
2115};
2116
2117static struct intel_uncore_type knl_uncore_pcu = {
2118 .name = "pcu",
2119 .num_counters = 4,
2120 .num_boxes = 1,
2121 .perf_ctr_bits = 48,
2122 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2123 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2124 .event_mask = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2125 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2126 .ops = &snbep_uncore_msr_ops,
2127 .format_group = &knl_uncore_pcu_format_group,
2128};
2129
2130static struct intel_uncore_type *knl_msr_uncores[] = {
2131 &knl_uncore_ubox,
2132 &knl_uncore_cha,
2133 &knl_uncore_pcu,
2134 NULL,
2135};
2136
2137void knl_uncore_cpu_init(void)
2138{
2139 uncore_msr_uncores = knl_msr_uncores;
2140}
2141
2142static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2143{
2144 struct pci_dev *pdev = box->pci_dev;
2145 int box_ctl = uncore_pci_box_ctl(box);
2146
2147 pci_write_config_dword(pdev, box_ctl, 0);
2148}
2149
2150static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2151 struct perf_event *event)
2152{
2153 struct pci_dev *pdev = box->pci_dev;
2154 struct hw_perf_event *hwc = &event->hw;
2155
2156 if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2157 == UNCORE_FIXED_EVENT)
2158 pci_write_config_dword(pdev, hwc->config_base,
2159 hwc->config | KNL_PMON_FIXED_CTL_EN);
2160 else
2161 pci_write_config_dword(pdev, hwc->config_base,
2162 hwc->config | SNBEP_PMON_CTL_EN);
2163}
2164
2165static struct intel_uncore_ops knl_uncore_imc_ops = {
2166 .init_box = snbep_uncore_pci_init_box,
2167 .disable_box = snbep_uncore_pci_disable_box,
2168 .enable_box = knl_uncore_imc_enable_box,
2169 .read_counter = snbep_uncore_pci_read_counter,
2170 .enable_event = knl_uncore_imc_enable_event,
2171 .disable_event = snbep_uncore_pci_disable_event,
2172};
2173
2174static struct intel_uncore_type knl_uncore_imc_uclk = {
2175 .name = "imc_uclk",
2176 .num_counters = 4,
2177 .num_boxes = 2,
2178 .perf_ctr_bits = 48,
2179 .fixed_ctr_bits = 48,
2180 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2181 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2182 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2183 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2184 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2185 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2186 .ops = &knl_uncore_imc_ops,
2187 .format_group = &snbep_uncore_format_group,
2188};
2189
2190static struct intel_uncore_type knl_uncore_imc_dclk = {
2191 .name = "imc",
2192 .num_counters = 4,
2193 .num_boxes = 6,
2194 .perf_ctr_bits = 48,
2195 .fixed_ctr_bits = 48,
2196 .perf_ctr = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2197 .event_ctl = KNL_MC0_CH0_MSR_PMON_CTL0,
2198 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2199 .fixed_ctr = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2200 .fixed_ctl = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2201 .box_ctl = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2202 .ops = &knl_uncore_imc_ops,
2203 .format_group = &snbep_uncore_format_group,
2204};
2205
2206static struct intel_uncore_type knl_uncore_edc_uclk = {
2207 .name = "edc_uclk",
2208 .num_counters = 4,
2209 .num_boxes = 8,
2210 .perf_ctr_bits = 48,
2211 .fixed_ctr_bits = 48,
2212 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2213 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2214 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2215 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2216 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2217 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2218 .ops = &knl_uncore_imc_ops,
2219 .format_group = &snbep_uncore_format_group,
2220};
2221
2222static struct intel_uncore_type knl_uncore_edc_eclk = {
2223 .name = "edc_eclk",
2224 .num_counters = 4,
2225 .num_boxes = 8,
2226 .perf_ctr_bits = 48,
2227 .fixed_ctr_bits = 48,
2228 .perf_ctr = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2229 .event_ctl = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2230 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2231 .fixed_ctr = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2232 .fixed_ctl = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2233 .box_ctl = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2234 .ops = &knl_uncore_imc_ops,
2235 .format_group = &snbep_uncore_format_group,
2236};
2237
2238static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2239 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2240 EVENT_CONSTRAINT_END
2241};
2242
2243static struct intel_uncore_type knl_uncore_m2pcie = {
2244 .name = "m2pcie",
2245 .num_counters = 4,
2246 .num_boxes = 1,
2247 .perf_ctr_bits = 48,
2248 .constraints = knl_uncore_m2pcie_constraints,
2249 SNBEP_UNCORE_PCI_COMMON_INIT(),
2250};
2251
2252static struct attribute *knl_uncore_irp_formats_attr[] = {
2253 &format_attr_event.attr,
2254 &format_attr_umask.attr,
2255 &format_attr_qor.attr,
2256 &format_attr_edge.attr,
2257 &format_attr_inv.attr,
2258 &format_attr_thresh8.attr,
2259 NULL,
2260};
2261
45bd07ad 2262static const struct attribute_group knl_uncore_irp_format_group = {
77af0037
HC
2263 .name = "format",
2264 .attrs = knl_uncore_irp_formats_attr,
2265};
2266
2267static struct intel_uncore_type knl_uncore_irp = {
2268 .name = "irp",
2269 .num_counters = 2,
2270 .num_boxes = 1,
2271 .perf_ctr_bits = 48,
2272 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2273 .event_ctl = SNBEP_PCI_PMON_CTL0,
2274 .event_mask = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2275 .box_ctl = KNL_IRP_PCI_PMON_BOX_CTL,
2276 .ops = &snbep_uncore_pci_ops,
2277 .format_group = &knl_uncore_irp_format_group,
2278};
2279
2280enum {
2281 KNL_PCI_UNCORE_MC_UCLK,
2282 KNL_PCI_UNCORE_MC_DCLK,
2283 KNL_PCI_UNCORE_EDC_UCLK,
2284 KNL_PCI_UNCORE_EDC_ECLK,
2285 KNL_PCI_UNCORE_M2PCIE,
2286 KNL_PCI_UNCORE_IRP,
2287};
2288
2289static struct intel_uncore_type *knl_pci_uncores[] = {
2290 [KNL_PCI_UNCORE_MC_UCLK] = &knl_uncore_imc_uclk,
2291 [KNL_PCI_UNCORE_MC_DCLK] = &knl_uncore_imc_dclk,
2292 [KNL_PCI_UNCORE_EDC_UCLK] = &knl_uncore_edc_uclk,
2293 [KNL_PCI_UNCORE_EDC_ECLK] = &knl_uncore_edc_eclk,
2294 [KNL_PCI_UNCORE_M2PCIE] = &knl_uncore_m2pcie,
2295 [KNL_PCI_UNCORE_IRP] = &knl_uncore_irp,
2296 NULL,
2297};
2298
2299/*
2300 * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2301 * device type. prior to KNL, each instance of a PMU device type had a unique
2302 * device ID.
2303 *
2304 * PCI Device ID Uncore PMU Devices
2305 * ----------------------------------
2306 * 0x7841 MC0 UClk, MC1 UClk
2307 * 0x7843 MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2308 * MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2309 * 0x7833 EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2310 * EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2311 * 0x7835 EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2312 * EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2313 * 0x7817 M2PCIe
2314 * 0x7814 IRP
2315*/
2316
2317static const struct pci_device_id knl_uncore_pci_ids[] = {
a54fa079 2318 { /* MC0 UClk */
77af0037 2319 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
a54fa079 2320 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
77af0037 2321 },
a54fa079
KL
2322 { /* MC1 UClk */
2323 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2324 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2325 },
2326 { /* MC0 DClk CH 0 */
2327 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2328 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2329 },
2330 { /* MC0 DClk CH 1 */
2331 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2332 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2333 },
2334 { /* MC0 DClk CH 2 */
2335 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2336 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2337 },
2338 { /* MC1 DClk CH 0 */
2339 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2340 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2341 },
2342 { /* MC1 DClk CH 1 */
77af0037 2343 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
a54fa079
KL
2344 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2345 },
2346 { /* MC1 DClk CH 2 */
2347 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2348 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2349 },
2350 { /* EDC0 UClk */
2351 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2352 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2353 },
2354 { /* EDC1 UClk */
2355 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2356 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2357 },
2358 { /* EDC2 UClk */
2359 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2360 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2361 },
2362 { /* EDC3 UClk */
2363 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2364 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
77af0037 2365 },
a54fa079 2366 { /* EDC4 UClk */
77af0037 2367 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
a54fa079
KL
2368 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2369 },
2370 { /* EDC5 UClk */
2371 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2372 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2373 },
2374 { /* EDC6 UClk */
2375 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2376 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2377 },
2378 { /* EDC7 UClk */
2379 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2380 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2381 },
2382 { /* EDC0 EClk */
2383 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2384 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2385 },
2386 { /* EDC1 EClk */
2387 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2388 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2389 },
2390 { /* EDC2 EClk */
2391 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2392 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2393 },
2394 { /* EDC3 EClk */
2395 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2396 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2397 },
2398 { /* EDC4 EClk */
2399 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2400 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2401 },
2402 { /* EDC5 EClk */
2403 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2404 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2405 },
2406 { /* EDC6 EClk */
2407 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2408 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
77af0037 2409 },
a54fa079 2410 { /* EDC7 EClk */
77af0037 2411 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
a54fa079 2412 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
77af0037
HC
2413 },
2414 { /* M2PCIe */
2415 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2416 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2417 },
2418 { /* IRP */
2419 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2420 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2421 },
2422 { /* end: all zeroes */ }
2423};
2424
2425static struct pci_driver knl_uncore_pci_driver = {
2426 .name = "knl_uncore",
2427 .id_table = knl_uncore_pci_ids,
2428};
2429
2430int knl_uncore_pci_init(void)
2431{
2432 int ret;
2433
2434 /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2435 ret = snb_pci2phy_map_init(0x7814); /* IRP */
2436 if (ret)
2437 return ret;
2438 ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2439 if (ret)
2440 return ret;
2441 uncore_pci_uncores = knl_pci_uncores;
2442 uncore_pci_driver = &knl_uncore_pci_driver;
2443 return 0;
2444}
2445
2446/* end of KNL uncore support */
2447
e735b9db
YZ
2448/* Haswell-EP uncore support */
2449static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2450 &format_attr_event.attr,
2451 &format_attr_umask.attr,
2452 &format_attr_edge.attr,
2453 &format_attr_inv.attr,
2454 &format_attr_thresh5.attr,
2455 &format_attr_filter_tid2.attr,
2456 &format_attr_filter_cid.attr,
2457 NULL,
2458};
2459
45bd07ad 2460static const struct attribute_group hswep_uncore_ubox_format_group = {
e735b9db
YZ
2461 .name = "format",
2462 .attrs = hswep_uncore_ubox_formats_attr,
2463};
2464
2465static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2466{
2467 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2468 reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2469 reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2470 reg1->idx = 0;
2471 return 0;
2472}
2473
2474static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2475 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2476 .hw_config = hswep_ubox_hw_config,
2477 .get_constraint = uncore_get_constraint,
2478 .put_constraint = uncore_put_constraint,
2479};
2480
2481static struct intel_uncore_type hswep_uncore_ubox = {
2482 .name = "ubox",
2483 .num_counters = 2,
2484 .num_boxes = 1,
2485 .perf_ctr_bits = 44,
2486 .fixed_ctr_bits = 48,
2487 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2488 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2489 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2490 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2491 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2492 .num_shared_regs = 1,
2493 .ops = &hswep_uncore_ubox_ops,
2494 .format_group = &hswep_uncore_ubox_format_group,
2495};
2496
2497static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2498 &format_attr_event.attr,
2499 &format_attr_umask.attr,
2500 &format_attr_edge.attr,
2501 &format_attr_tid_en.attr,
2502 &format_attr_thresh8.attr,
2503 &format_attr_filter_tid3.attr,
2504 &format_attr_filter_link2.attr,
2505 &format_attr_filter_state3.attr,
2506 &format_attr_filter_nid2.attr,
2507 &format_attr_filter_opc2.attr,
2508 &format_attr_filter_nc.attr,
2509 &format_attr_filter_c6.attr,
2510 &format_attr_filter_isoc.attr,
2511 NULL,
2512};
2513
45bd07ad 2514static const struct attribute_group hswep_uncore_cbox_format_group = {
e735b9db
YZ
2515 .name = "format",
2516 .attrs = hswep_uncore_cbox_formats_attr,
2517};
2518
2519static struct event_constraint hswep_uncore_cbox_constraints[] = {
2520 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2521 UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2522 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2523 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2524 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2525 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2526 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2527 EVENT_CONSTRAINT_END
2528};
2529
2530static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2531 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2532 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2533 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2534 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2535 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2536 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2537 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2538 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2539 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2540 SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2541 SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2542 SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2543 SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2544 SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2545 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2546 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2547 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2548 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2549 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2550 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2551 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2552 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2553 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2554 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2555 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2556 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2557 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2558 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2559 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2560 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2561 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2562 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2563 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2564 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2565 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2566 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2567 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2568 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2569 EVENT_EXTRA_END
2570};
2571
2572static u64 hswep_cbox_filter_mask(int fields)
2573{
2574 u64 mask = 0;
2575 if (fields & 0x1)
2576 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2577 if (fields & 0x2)
2578 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2579 if (fields & 0x4)
2580 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2581 if (fields & 0x8)
2582 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2583 if (fields & 0x10) {
2584 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2585 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2586 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2587 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2588 }
2589 return mask;
2590}
2591
2592static struct event_constraint *
2593hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2594{
2595 return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2596}
2597
2598static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2599{
2600 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2601 struct extra_reg *er;
2602 int idx = 0;
2603
2604 for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2605 if (er->event != (event->hw.config & er->config_mask))
2606 continue;
2607 idx |= er->idx;
2608 }
2609
2610 if (idx) {
2611 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2612 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2613 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2614 reg1->idx = idx;
2615 }
2616 return 0;
2617}
2618
2619static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2620 struct perf_event *event)
2621{
2622 struct hw_perf_event *hwc = &event->hw;
2623 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2624
2625 if (reg1->idx != EXTRA_REG_NONE) {
2626 u64 filter = uncore_shared_reg_config(box, 0);
2627 wrmsrl(reg1->reg, filter & 0xffffffff);
2628 wrmsrl(reg1->reg + 1, filter >> 32);
2629 }
2630
2631 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2632}
2633
2634static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2635 .init_box = snbep_uncore_msr_init_box,
2636 .disable_box = snbep_uncore_msr_disable_box,
2637 .enable_box = snbep_uncore_msr_enable_box,
2638 .disable_event = snbep_uncore_msr_disable_event,
2639 .enable_event = hswep_cbox_enable_event,
2640 .read_counter = uncore_msr_read_counter,
2641 .hw_config = hswep_cbox_hw_config,
2642 .get_constraint = hswep_cbox_get_constraint,
2643 .put_constraint = snbep_cbox_put_constraint,
2644};
2645
2646static struct intel_uncore_type hswep_uncore_cbox = {
2647 .name = "cbox",
2648 .num_counters = 4,
2649 .num_boxes = 18,
8cf1a3de 2650 .perf_ctr_bits = 48,
e735b9db
YZ
2651 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2652 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2653 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2654 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2655 .msr_offset = HSWEP_CBO_MSR_OFFSET,
2656 .num_shared_regs = 1,
2657 .constraints = hswep_uncore_cbox_constraints,
2658 .ops = &hswep_uncore_cbox_ops,
2659 .format_group = &hswep_uncore_cbox_format_group,
2660};
2661
68055915
AK
2662/*
2663 * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2664 */
2665static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2666{
2667 unsigned msr = uncore_msr_box_ctl(box);
2668
2669 if (msr) {
2670 u64 init = SNBEP_PMON_BOX_CTL_INT;
2671 u64 flags = 0;
2672 int i;
2673
2674 for_each_set_bit(i, (unsigned long *)&init, 64) {
2675 flags |= (1ULL << i);
2676 wrmsrl(msr, flags);
2677 }
2678 }
2679}
2680
2681static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2682 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2683 .init_box = hswep_uncore_sbox_msr_init_box
2684};
2685
e735b9db
YZ
2686static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2687 &format_attr_event.attr,
2688 &format_attr_umask.attr,
2689 &format_attr_edge.attr,
2690 &format_attr_tid_en.attr,
2691 &format_attr_inv.attr,
2692 &format_attr_thresh8.attr,
2693 NULL,
2694};
2695
45bd07ad 2696static const struct attribute_group hswep_uncore_sbox_format_group = {
e735b9db
YZ
2697 .name = "format",
2698 .attrs = hswep_uncore_sbox_formats_attr,
2699};
2700
2701static struct intel_uncore_type hswep_uncore_sbox = {
2702 .name = "sbox",
2703 .num_counters = 4,
2704 .num_boxes = 4,
2705 .perf_ctr_bits = 44,
2706 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
2707 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
2708 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2709 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
2710 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
68055915 2711 .ops = &hswep_uncore_sbox_msr_ops,
e735b9db
YZ
2712 .format_group = &hswep_uncore_sbox_format_group,
2713};
2714
2715static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2716{
2717 struct hw_perf_event *hwc = &event->hw;
2718 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2719 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2720
2721 if (ev_sel >= 0xb && ev_sel <= 0xe) {
2722 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2723 reg1->idx = ev_sel - 0xb;
2724 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2725 }
2726 return 0;
2727}
2728
2729static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2730 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2731 .hw_config = hswep_pcu_hw_config,
2732 .get_constraint = snbep_pcu_get_constraint,
2733 .put_constraint = snbep_pcu_put_constraint,
2734};
2735
2736static struct intel_uncore_type hswep_uncore_pcu = {
2737 .name = "pcu",
2738 .num_counters = 4,
2739 .num_boxes = 1,
2740 .perf_ctr_bits = 48,
2741 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2742 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2743 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2744 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2745 .num_shared_regs = 1,
2746 .ops = &hswep_uncore_pcu_ops,
2747 .format_group = &snbep_uncore_pcu_format_group,
2748};
2749
2750static struct intel_uncore_type *hswep_msr_uncores[] = {
2751 &hswep_uncore_ubox,
2752 &hswep_uncore_cbox,
2753 &hswep_uncore_sbox,
2754 &hswep_uncore_pcu,
2755 NULL,
2756};
2757
2758void hswep_uncore_cpu_init(void)
2759{
6d6daa20 2760 int pkg = boot_cpu_data.logical_proc_id;
cf6d445f 2761
e735b9db
YZ
2762 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2763 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
5306c31c
AK
2764
2765 /* Detect 6-8 core systems with only two SBOXes */
cf6d445f 2766 if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
5306c31c
AK
2767 u32 capid4;
2768
cf6d445f 2769 pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
5306c31c
AK
2770 0x94, &capid4);
2771 if (((capid4 >> 6) & 0x3) == 0)
2772 hswep_uncore_sbox.num_boxes = 2;
2773 }
2774
e735b9db
YZ
2775 uncore_msr_uncores = hswep_msr_uncores;
2776}
2777
2778static struct intel_uncore_type hswep_uncore_ha = {
2779 .name = "ha",
10e9e7bd 2780 .num_counters = 4,
e735b9db
YZ
2781 .num_boxes = 2,
2782 .perf_ctr_bits = 48,
2783 SNBEP_UNCORE_PCI_COMMON_INIT(),
2784};
2785
2786static struct uncore_event_desc hswep_uncore_imc_events[] = {
2787 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
2788 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
c0737ce4
AK
2789 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2790 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
e735b9db 2791 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
c0737ce4
AK
2792 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2793 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
e735b9db
YZ
2794 { /* end: all zeroes */ },
2795};
2796
2797static struct intel_uncore_type hswep_uncore_imc = {
2798 .name = "imc",
10e9e7bd 2799 .num_counters = 4,
e735b9db
YZ
2800 .num_boxes = 8,
2801 .perf_ctr_bits = 48,
2802 .fixed_ctr_bits = 48,
2803 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2804 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2805 .event_descs = hswep_uncore_imc_events,
2806 SNBEP_UNCORE_PCI_COMMON_INIT(),
2807};
2808
41a134a5
AK
2809static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2810
2811static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2812{
2813 struct pci_dev *pdev = box->pci_dev;
2814 struct hw_perf_event *hwc = &event->hw;
2815 u64 count = 0;
2816
2817 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2818 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2819
2820 return count;
2821}
2822
e735b9db
YZ
2823static struct intel_uncore_ops hswep_uncore_irp_ops = {
2824 .init_box = snbep_uncore_pci_init_box,
2825 .disable_box = snbep_uncore_pci_disable_box,
2826 .enable_box = snbep_uncore_pci_enable_box,
2827 .disable_event = ivbep_uncore_irp_disable_event,
2828 .enable_event = ivbep_uncore_irp_enable_event,
41a134a5 2829 .read_counter = hswep_uncore_irp_read_counter,
e735b9db
YZ
2830};
2831
2832static struct intel_uncore_type hswep_uncore_irp = {
2833 .name = "irp",
2834 .num_counters = 4,
2835 .num_boxes = 1,
2836 .perf_ctr_bits = 48,
2837 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2838 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2839 .ops = &hswep_uncore_irp_ops,
2840 .format_group = &snbep_uncore_format_group,
2841};
2842
2843static struct intel_uncore_type hswep_uncore_qpi = {
2844 .name = "qpi",
10e9e7bd 2845 .num_counters = 4,
e735b9db
YZ
2846 .num_boxes = 3,
2847 .perf_ctr_bits = 48,
2848 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2849 .event_ctl = SNBEP_PCI_PMON_CTL0,
2850 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2851 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2852 .num_shared_regs = 1,
2853 .ops = &snbep_uncore_qpi_ops,
2854 .format_group = &snbep_uncore_qpi_format_group,
2855};
2856
2857static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2858 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2859 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2860 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2861 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2862 UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2863 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2864 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2865 UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2866 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2867 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2868 UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2869 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2870 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2871 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2872 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2873 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2874 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2875 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2876 EVENT_CONSTRAINT_END
2877};
2878
2879static struct intel_uncore_type hswep_uncore_r2pcie = {
2880 .name = "r2pcie",
2881 .num_counters = 4,
2882 .num_boxes = 1,
2883 .perf_ctr_bits = 48,
2884 .constraints = hswep_uncore_r2pcie_constraints,
2885 SNBEP_UNCORE_PCI_COMMON_INIT(),
2886};
2887
2888static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2889 UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2890 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2891 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2892 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2893 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2894 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2895 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2896 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2897 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2898 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2899 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2900 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2901 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2902 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2903 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2904 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2905 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2906 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2907 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2908 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2909 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2910 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2911 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2912 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2913 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2914 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2915 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2916 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2917 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2918 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2919 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2920 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2921 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2922 EVENT_CONSTRAINT_END
2923};
2924
2925static struct intel_uncore_type hswep_uncore_r3qpi = {
2926 .name = "r3qpi",
10e9e7bd 2927 .num_counters = 3,
e735b9db
YZ
2928 .num_boxes = 3,
2929 .perf_ctr_bits = 44,
2930 .constraints = hswep_uncore_r3qpi_constraints,
2931 SNBEP_UNCORE_PCI_COMMON_INIT(),
2932};
2933
2934enum {
2935 HSWEP_PCI_UNCORE_HA,
2936 HSWEP_PCI_UNCORE_IMC,
2937 HSWEP_PCI_UNCORE_IRP,
2938 HSWEP_PCI_UNCORE_QPI,
2939 HSWEP_PCI_UNCORE_R2PCIE,
2940 HSWEP_PCI_UNCORE_R3QPI,
2941};
2942
2943static struct intel_uncore_type *hswep_pci_uncores[] = {
2944 [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha,
2945 [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc,
2946 [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp,
2947 [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi,
2948 [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie,
2949 [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi,
2950 NULL,
2951};
2952
070a7cdf 2953static const struct pci_device_id hswep_uncore_pci_ids[] = {
e735b9db
YZ
2954 { /* Home Agent 0 */
2955 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
2956 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
2957 },
2958 { /* Home Agent 1 */
2959 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
2960 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
2961 },
2962 { /* MC0 Channel 0 */
2963 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
2964 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
2965 },
2966 { /* MC0 Channel 1 */
2967 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
2968 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
2969 },
2970 { /* MC0 Channel 2 */
2971 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
2972 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
2973 },
2974 { /* MC0 Channel 3 */
2975 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
2976 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
2977 },
2978 { /* MC1 Channel 0 */
2979 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
2980 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
2981 },
2982 { /* MC1 Channel 1 */
2983 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
2984 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
2985 },
2986 { /* MC1 Channel 2 */
2987 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
2988 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
2989 },
2990 { /* MC1 Channel 3 */
2991 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
2992 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
2993 },
2994 { /* IRP */
2995 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
2996 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
2997 },
2998 { /* QPI0 Port 0 */
2999 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3000 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3001 },
3002 { /* QPI0 Port 1 */
3003 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3004 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3005 },
3006 { /* QPI1 Port 2 */
3007 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3008 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3009 },
3010 { /* R2PCIe */
3011 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3012 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3013 },
3014 { /* R3QPI0 Link 0 */
3015 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3016 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3017 },
3018 { /* R3QPI0 Link 1 */
3019 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3020 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3021 },
3022 { /* R3QPI1 Link 2 */
3023 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3024 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3025 },
3026 { /* QPI Port 0 filter */
3027 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3028 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3029 SNBEP_PCI_QPI_PORT0_FILTER),
3030 },
3031 { /* QPI Port 1 filter */
3032 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3033 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3034 SNBEP_PCI_QPI_PORT1_FILTER),
3035 },
5306c31c
AK
3036 { /* PCU.3 (for Capability registers) */
3037 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
3038 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3039 HSWEP_PCI_PCU_3),
3040 },
e735b9db
YZ
3041 { /* end: all zeroes */ }
3042};
3043
3044static struct pci_driver hswep_uncore_pci_driver = {
3045 .name = "hswep_uncore",
3046 .id_table = hswep_uncore_pci_ids,
3047};
3048
3049int hswep_uncore_pci_init(void)
3050{
68ce4a0d 3051 int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
e735b9db
YZ
3052 if (ret)
3053 return ret;
3054 uncore_pci_uncores = hswep_pci_uncores;
3055 uncore_pci_driver = &hswep_uncore_pci_driver;
3056 return 0;
3057}
3058/* end of Haswell-EP uncore support */
070e9887 3059
d6980ef3 3060/* BDX uncore support */
070e9887
KL
3061
3062static struct intel_uncore_type bdx_uncore_ubox = {
3063 .name = "ubox",
3064 .num_counters = 2,
3065 .num_boxes = 1,
3066 .perf_ctr_bits = 48,
3067 .fixed_ctr_bits = 48,
3068 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3069 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3070 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3071 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3072 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3073 .num_shared_regs = 1,
3074 .ops = &ivbep_uncore_msr_ops,
3075 .format_group = &ivbep_uncore_ubox_format_group,
3076};
3077
3078static struct event_constraint bdx_uncore_cbox_constraints[] = {
3079 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3080 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3081 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
d6980ef3 3082 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
070e9887
KL
3083 EVENT_CONSTRAINT_END
3084};
3085
3086static struct intel_uncore_type bdx_uncore_cbox = {
3087 .name = "cbox",
3088 .num_counters = 4,
d6980ef3 3089 .num_boxes = 24,
070e9887
KL
3090 .perf_ctr_bits = 48,
3091 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3092 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3093 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3094 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3095 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3096 .num_shared_regs = 1,
3097 .constraints = bdx_uncore_cbox_constraints,
3098 .ops = &hswep_uncore_cbox_ops,
3099 .format_group = &hswep_uncore_cbox_format_group,
3100};
3101
d7717587
SE
3102static struct intel_uncore_type bdx_uncore_sbox = {
3103 .name = "sbox",
3104 .num_counters = 4,
3105 .num_boxes = 4,
3106 .perf_ctr_bits = 48,
3107 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
3108 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
3109 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3110 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
3111 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
3112 .ops = &hswep_uncore_sbox_msr_ops,
3113 .format_group = &hswep_uncore_sbox_format_group,
3114};
3115
3116#define BDX_MSR_UNCORE_SBOX 3
3117
070e9887
KL
3118static struct intel_uncore_type *bdx_msr_uncores[] = {
3119 &bdx_uncore_ubox,
3120 &bdx_uncore_cbox,
3121 &hswep_uncore_pcu,
d7717587 3122 &bdx_uncore_sbox,
070e9887
KL
3123 NULL,
3124};
3125
bb9fbe1b
KL
3126/* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3127static struct event_constraint bdx_uncore_pcu_constraints[] = {
3128 EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3129 EVENT_CONSTRAINT_END
3130};
3131
070e9887
KL
3132void bdx_uncore_cpu_init(void)
3133{
6265adb9 3134 int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
15a3e845 3135
070e9887
KL
3136 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3137 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3138 uncore_msr_uncores = bdx_msr_uncores;
bb9fbe1b 3139
d7717587 3140 /* BDX-DE doesn't have SBOX */
15a3e845 3141 if (boot_cpu_data.x86_model == 86) {
d7717587 3142 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
15a3e845
OS
3143 /* Detect systems with no SBOXes */
3144 } else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
3145 struct pci_dev *pdev;
3146 u32 capid4;
d7717587 3147
15a3e845
OS
3148 pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3];
3149 pci_read_config_dword(pdev, 0x94, &capid4);
3150 if (((capid4 >> 6) & 0x3) == 0)
3151 bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3152 }
bb9fbe1b 3153 hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
070e9887
KL
3154}
3155
3156static struct intel_uncore_type bdx_uncore_ha = {
3157 .name = "ha",
3158 .num_counters = 4,
d6980ef3 3159 .num_boxes = 2,
070e9887
KL
3160 .perf_ctr_bits = 48,
3161 SNBEP_UNCORE_PCI_COMMON_INIT(),
3162};
3163
3164static struct intel_uncore_type bdx_uncore_imc = {
3165 .name = "imc",
10e9e7bd 3166 .num_counters = 4,
d6980ef3 3167 .num_boxes = 8,
070e9887
KL
3168 .perf_ctr_bits = 48,
3169 .fixed_ctr_bits = 48,
3170 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3171 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3172 .event_descs = hswep_uncore_imc_events,
3173 SNBEP_UNCORE_PCI_COMMON_INIT(),
3174};
3175
3176static struct intel_uncore_type bdx_uncore_irp = {
3177 .name = "irp",
3178 .num_counters = 4,
3179 .num_boxes = 1,
3180 .perf_ctr_bits = 48,
3181 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3182 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3183 .ops = &hswep_uncore_irp_ops,
3184 .format_group = &snbep_uncore_format_group,
3185};
3186
d6980ef3
KL
3187static struct intel_uncore_type bdx_uncore_qpi = {
3188 .name = "qpi",
3189 .num_counters = 4,
3190 .num_boxes = 3,
3191 .perf_ctr_bits = 48,
3192 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3193 .event_ctl = SNBEP_PCI_PMON_CTL0,
3194 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3195 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3196 .num_shared_regs = 1,
3197 .ops = &snbep_uncore_qpi_ops,
3198 .format_group = &snbep_uncore_qpi_format_group,
3199};
070e9887
KL
3200
3201static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3202 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3203 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3204 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3205 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3206 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3207 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
d6980ef3
KL
3208 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3209 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
070e9887
KL
3210 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3211 EVENT_CONSTRAINT_END
3212};
3213
3214static struct intel_uncore_type bdx_uncore_r2pcie = {
3215 .name = "r2pcie",
3216 .num_counters = 4,
3217 .num_boxes = 1,
3218 .perf_ctr_bits = 48,
3219 .constraints = bdx_uncore_r2pcie_constraints,
3220 SNBEP_UNCORE_PCI_COMMON_INIT(),
3221};
3222
d6980ef3
KL
3223static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3224 UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3225 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3226 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3227 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3228 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3229 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3230 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3231 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3232 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3233 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3234 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3235 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3236 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3237 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3238 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3239 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3240 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3241 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3242 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3243 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3244 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3245 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3246 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3247 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3248 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3249 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3250 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3251 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3252 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3253 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3254 EVENT_CONSTRAINT_END
3255};
3256
3257static struct intel_uncore_type bdx_uncore_r3qpi = {
3258 .name = "r3qpi",
3259 .num_counters = 3,
3260 .num_boxes = 3,
3261 .perf_ctr_bits = 48,
3262 .constraints = bdx_uncore_r3qpi_constraints,
3263 SNBEP_UNCORE_PCI_COMMON_INIT(),
3264};
3265
070e9887
KL
3266enum {
3267 BDX_PCI_UNCORE_HA,
3268 BDX_PCI_UNCORE_IMC,
3269 BDX_PCI_UNCORE_IRP,
d6980ef3 3270 BDX_PCI_UNCORE_QPI,
070e9887 3271 BDX_PCI_UNCORE_R2PCIE,
d6980ef3 3272 BDX_PCI_UNCORE_R3QPI,
070e9887
KL
3273};
3274
3275static struct intel_uncore_type *bdx_pci_uncores[] = {
3276 [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha,
3277 [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc,
3278 [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp,
d6980ef3 3279 [BDX_PCI_UNCORE_QPI] = &bdx_uncore_qpi,
070e9887 3280 [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
d6980ef3 3281 [BDX_PCI_UNCORE_R3QPI] = &bdx_uncore_r3qpi,
070e9887
KL
3282 NULL,
3283};
3284
c2365b93 3285static const struct pci_device_id bdx_uncore_pci_ids[] = {
070e9887
KL
3286 { /* Home Agent 0 */
3287 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3288 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3289 },
d6980ef3
KL
3290 { /* Home Agent 1 */
3291 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3292 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3293 },
070e9887
KL
3294 { /* MC0 Channel 0 */
3295 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3296 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3297 },
3298 { /* MC0 Channel 1 */
3299 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3300 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3301 },
d6980ef3
KL
3302 { /* MC0 Channel 2 */
3303 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3304 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3305 },
3306 { /* MC0 Channel 3 */
3307 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3308 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3309 },
3310 { /* MC1 Channel 0 */
3311 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3312 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3313 },
3314 { /* MC1 Channel 1 */
3315 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3316 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3317 },
3318 { /* MC1 Channel 2 */
3319 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3320 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3321 },
3322 { /* MC1 Channel 3 */
3323 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3324 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3325 },
070e9887
KL
3326 { /* IRP */
3327 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3328 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3329 },
d6980ef3
KL
3330 { /* QPI0 Port 0 */
3331 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3332 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3333 },
3334 { /* QPI0 Port 1 */
3335 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3336 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3337 },
3338 { /* QPI1 Port 2 */
3339 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3340 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3341 },
070e9887
KL
3342 { /* R2PCIe */
3343 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3344 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3345 },
d6980ef3
KL
3346 { /* R3QPI0 Link 0 */
3347 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3348 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3349 },
3350 { /* R3QPI0 Link 1 */
3351 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3352 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3353 },
3354 { /* R3QPI1 Link 2 */
3355 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3356 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3357 },
3358 { /* QPI Port 0 filter */
3359 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
156c8b58
KL
3360 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3361 SNBEP_PCI_QPI_PORT0_FILTER),
d6980ef3
KL
3362 },
3363 { /* QPI Port 1 filter */
3364 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
156c8b58
KL
3365 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3366 SNBEP_PCI_QPI_PORT1_FILTER),
d6980ef3
KL
3367 },
3368 { /* QPI Port 2 filter */
3369 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
156c8b58
KL
3370 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3371 BDX_PCI_QPI_PORT2_FILTER),
d6980ef3 3372 },
15a3e845
OS
3373 { /* PCU.3 (for Capability registers) */
3374 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
3375 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3376 HSWEP_PCI_PCU_3),
3377 },
070e9887
KL
3378 { /* end: all zeroes */ }
3379};
3380
3381static struct pci_driver bdx_uncore_pci_driver = {
3382 .name = "bdx_uncore",
3383 .id_table = bdx_uncore_pci_ids,
3384};
3385
3386int bdx_uncore_pci_init(void)
3387{
68ce4a0d 3388 int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
070e9887
KL
3389
3390 if (ret)
3391 return ret;
3392 uncore_pci_uncores = bdx_pci_uncores;
3393 uncore_pci_driver = &bdx_uncore_pci_driver;
3394 return 0;
3395}
3396
d6980ef3 3397/* end of BDX uncore support */
cd34cd97
KL
3398
3399/* SKX uncore support */
3400
3401static struct intel_uncore_type skx_uncore_ubox = {
3402 .name = "ubox",
3403 .num_counters = 2,
3404 .num_boxes = 1,
3405 .perf_ctr_bits = 48,
3406 .fixed_ctr_bits = 48,
3407 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3408 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3409 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3410 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3411 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3412 .ops = &ivbep_uncore_msr_ops,
3413 .format_group = &ivbep_uncore_ubox_format_group,
3414};
3415
3416static struct attribute *skx_uncore_cha_formats_attr[] = {
3417 &format_attr_event.attr,
3418 &format_attr_umask.attr,
3419 &format_attr_edge.attr,
3420 &format_attr_tid_en.attr,
3421 &format_attr_inv.attr,
3422 &format_attr_thresh8.attr,
3423 &format_attr_filter_tid4.attr,
cd34cd97
KL
3424 &format_attr_filter_state5.attr,
3425 &format_attr_filter_rem.attr,
3426 &format_attr_filter_loc.attr,
3427 &format_attr_filter_nm.attr,
3428 &format_attr_filter_all_op.attr,
3429 &format_attr_filter_not_nm.attr,
3430 &format_attr_filter_opc_0.attr,
3431 &format_attr_filter_opc_1.attr,
3432 &format_attr_filter_nc.attr,
cd34cd97
KL
3433 &format_attr_filter_isoc.attr,
3434 NULL,
3435};
3436
45bd07ad 3437static const struct attribute_group skx_uncore_chabox_format_group = {
cd34cd97
KL
3438 .name = "format",
3439 .attrs = skx_uncore_cha_formats_attr,
3440};
3441
3442static struct event_constraint skx_uncore_chabox_constraints[] = {
3443 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3444 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3445 EVENT_CONSTRAINT_END
3446};
3447
3448static struct extra_reg skx_uncore_cha_extra_regs[] = {
3449 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3450 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3451 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3452 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
c3f02682
KL
3453 SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3454 SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
8aa7b7b4
SE
3455 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3456 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
e340895c 3457 SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
ba883b4a 3458 EVENT_EXTRA_END
cd34cd97
KL
3459};
3460
3461static u64 skx_cha_filter_mask(int fields)
3462{
3463 u64 mask = 0;
3464
3465 if (fields & 0x1)
3466 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3467 if (fields & 0x2)
3468 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3469 if (fields & 0x4)
3470 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
8aa7b7b4
SE
3471 if (fields & 0x8) {
3472 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3473 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3474 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3475 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3476 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3477 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3478 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3479 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3480 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3481 }
cd34cd97
KL
3482 return mask;
3483}
3484
3485static struct event_constraint *
3486skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3487{
3488 return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3489}
3490
3491static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3492{
3493 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3494 struct extra_reg *er;
3495 int idx = 0;
3496
3497 for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3498 if (er->event != (event->hw.config & er->config_mask))
3499 continue;
3500 idx |= er->idx;
3501 }
3502
3503 if (idx) {
3504 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3505 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3506 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3507 reg1->idx = idx;
3508 }
3509 return 0;
3510}
3511
3512static struct intel_uncore_ops skx_uncore_chabox_ops = {
3513 /* There is no frz_en for chabox ctl */
3514 .init_box = ivbep_uncore_msr_init_box,
3515 .disable_box = snbep_uncore_msr_disable_box,
3516 .enable_box = snbep_uncore_msr_enable_box,
3517 .disable_event = snbep_uncore_msr_disable_event,
3518 .enable_event = hswep_cbox_enable_event,
3519 .read_counter = uncore_msr_read_counter,
3520 .hw_config = skx_cha_hw_config,
3521 .get_constraint = skx_cha_get_constraint,
3522 .put_constraint = snbep_cbox_put_constraint,
3523};
3524
3525static struct intel_uncore_type skx_uncore_chabox = {
3526 .name = "cha",
3527 .num_counters = 4,
3528 .perf_ctr_bits = 48,
3529 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3530 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3531 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3532 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3533 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3534 .num_shared_regs = 1,
3535 .constraints = skx_uncore_chabox_constraints,
3536 .ops = &skx_uncore_chabox_ops,
3537 .format_group = &skx_uncore_chabox_format_group,
3538};
3539
3540static struct attribute *skx_uncore_iio_formats_attr[] = {
3541 &format_attr_event.attr,
3542 &format_attr_umask.attr,
3543 &format_attr_edge.attr,
3544 &format_attr_inv.attr,
3545 &format_attr_thresh9.attr,
3546 &format_attr_ch_mask.attr,
3547 &format_attr_fc_mask.attr,
3548 NULL,
3549};
3550
45bd07ad 3551static const struct attribute_group skx_uncore_iio_format_group = {
cd34cd97
KL
3552 .name = "format",
3553 .attrs = skx_uncore_iio_formats_attr,
3554};
3555
3556static struct event_constraint skx_uncore_iio_constraints[] = {
3557 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3558 UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3559 UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3560 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3561 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3562 UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3563 EVENT_CONSTRAINT_END
3564};
3565
3566static void skx_iio_enable_event(struct intel_uncore_box *box,
3567 struct perf_event *event)
3568{
3569 struct hw_perf_event *hwc = &event->hw;
3570
3571 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3572}
3573
3574static struct intel_uncore_ops skx_uncore_iio_ops = {
3575 .init_box = ivbep_uncore_msr_init_box,
3576 .disable_box = snbep_uncore_msr_disable_box,
3577 .enable_box = snbep_uncore_msr_enable_box,
3578 .disable_event = snbep_uncore_msr_disable_event,
3579 .enable_event = skx_iio_enable_event,
3580 .read_counter = uncore_msr_read_counter,
3581};
3582
3583static struct intel_uncore_type skx_uncore_iio = {
3584 .name = "iio",
3585 .num_counters = 4,
29b46dfb 3586 .num_boxes = 6,
cd34cd97
KL
3587 .perf_ctr_bits = 48,
3588 .event_ctl = SKX_IIO0_MSR_PMON_CTL0,
3589 .perf_ctr = SKX_IIO0_MSR_PMON_CTR0,
3590 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
3591 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3592 .box_ctl = SKX_IIO0_MSR_PMON_BOX_CTL,
3593 .msr_offset = SKX_IIO_MSR_OFFSET,
3594 .constraints = skx_uncore_iio_constraints,
3595 .ops = &skx_uncore_iio_ops,
3596 .format_group = &skx_uncore_iio_format_group,
3597};
3598
0f519f03
KL
3599enum perf_uncore_iio_freerunning_type_id {
3600 SKX_IIO_MSR_IOCLK = 0,
3601 SKX_IIO_MSR_BW = 1,
3602 SKX_IIO_MSR_UTIL = 2,
3603
3604 SKX_IIO_FREERUNNING_TYPE_MAX,
3605};
3606
3607
3608static struct freerunning_counters skx_iio_freerunning[] = {
3609 [SKX_IIO_MSR_IOCLK] = { 0xa45, 0x1, 0x20, 1, 36 },
3610 [SKX_IIO_MSR_BW] = { 0xb00, 0x1, 0x10, 8, 36 },
3611 [SKX_IIO_MSR_UTIL] = { 0xb08, 0x1, 0x10, 8, 36 },
3612};
3613
3614static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3615 /* Free-Running IO CLOCKS Counter */
3616 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
3617 /* Free-Running IIO BANDWIDTH Counters */
3618 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
3619 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
3620 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
3621 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
3622 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
3623 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
3624 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
3625 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
3626 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
3627 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
3628 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
3629 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
3630 INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x24"),
3631 INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"),
3632 INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"),
3633 INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x25"),
3634 INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"),
3635 INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"),
3636 INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x26"),
3637 INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"),
3638 INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"),
3639 INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x27"),
3640 INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"),
3641 INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"),
3642 /* Free-running IIO UTILIZATION Counters */
3643 INTEL_UNCORE_EVENT_DESC(util_in_port0, "event=0xff,umask=0x30"),
3644 INTEL_UNCORE_EVENT_DESC(util_out_port0, "event=0xff,umask=0x31"),
3645 INTEL_UNCORE_EVENT_DESC(util_in_port1, "event=0xff,umask=0x32"),
3646 INTEL_UNCORE_EVENT_DESC(util_out_port1, "event=0xff,umask=0x33"),
3647 INTEL_UNCORE_EVENT_DESC(util_in_port2, "event=0xff,umask=0x34"),
3648 INTEL_UNCORE_EVENT_DESC(util_out_port2, "event=0xff,umask=0x35"),
3649 INTEL_UNCORE_EVENT_DESC(util_in_port3, "event=0xff,umask=0x36"),
3650 INTEL_UNCORE_EVENT_DESC(util_out_port3, "event=0xff,umask=0x37"),
3651 { /* end: all zeroes */ },
3652};
3653
3654static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3655 .read_counter = uncore_msr_read_counter,
543ac280 3656 .hw_config = uncore_freerunning_hw_config,
0f519f03
KL
3657};
3658
3659static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3660 &format_attr_event.attr,
3661 &format_attr_umask.attr,
3662 NULL,
3663};
3664
3665static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3666 .name = "format",
3667 .attrs = skx_uncore_iio_freerunning_formats_attr,
3668};
3669
3670static struct intel_uncore_type skx_uncore_iio_free_running = {
3671 .name = "iio_free_running",
3672 .num_counters = 17,
3673 .num_boxes = 6,
3674 .num_freerunning_types = SKX_IIO_FREERUNNING_TYPE_MAX,
3675 .freerunning = skx_iio_freerunning,
3676 .ops = &skx_uncore_iio_freerunning_ops,
3677 .event_descs = skx_uncore_iio_freerunning_events,
3678 .format_group = &skx_uncore_iio_freerunning_format_group,
3679};
3680
cd34cd97
KL
3681static struct attribute *skx_uncore_formats_attr[] = {
3682 &format_attr_event.attr,
3683 &format_attr_umask.attr,
3684 &format_attr_edge.attr,
3685 &format_attr_inv.attr,
3686 &format_attr_thresh8.attr,
3687 NULL,
3688};
3689
45bd07ad 3690static const struct attribute_group skx_uncore_format_group = {
cd34cd97
KL
3691 .name = "format",
3692 .attrs = skx_uncore_formats_attr,
3693};
3694
3695static struct intel_uncore_type skx_uncore_irp = {
3696 .name = "irp",
3697 .num_counters = 2,
29b46dfb 3698 .num_boxes = 6,
cd34cd97
KL
3699 .perf_ctr_bits = 48,
3700 .event_ctl = SKX_IRP0_MSR_PMON_CTL0,
3701 .perf_ctr = SKX_IRP0_MSR_PMON_CTR0,
3702 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3703 .box_ctl = SKX_IRP0_MSR_PMON_BOX_CTL,
3704 .msr_offset = SKX_IRP_MSR_OFFSET,
3705 .ops = &skx_uncore_iio_ops,
3706 .format_group = &skx_uncore_format_group,
3707};
3708
bab4e569
KL
3709static struct attribute *skx_uncore_pcu_formats_attr[] = {
3710 &format_attr_event.attr,
3711 &format_attr_umask.attr,
3712 &format_attr_edge.attr,
3713 &format_attr_inv.attr,
3714 &format_attr_thresh8.attr,
3715 &format_attr_occ_invert.attr,
3716 &format_attr_occ_edge_det.attr,
3717 &format_attr_filter_band0.attr,
3718 &format_attr_filter_band1.attr,
3719 &format_attr_filter_band2.attr,
3720 &format_attr_filter_band3.attr,
3721 NULL,
3722};
3723
3724static struct attribute_group skx_uncore_pcu_format_group = {
3725 .name = "format",
3726 .attrs = skx_uncore_pcu_formats_attr,
3727};
3728
cd34cd97
KL
3729static struct intel_uncore_ops skx_uncore_pcu_ops = {
3730 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3731 .hw_config = hswep_pcu_hw_config,
3732 .get_constraint = snbep_pcu_get_constraint,
3733 .put_constraint = snbep_pcu_put_constraint,
3734};
3735
3736static struct intel_uncore_type skx_uncore_pcu = {
3737 .name = "pcu",
3738 .num_counters = 4,
3739 .num_boxes = 1,
3740 .perf_ctr_bits = 48,
3741 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
3742 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
3743 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
3744 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
3745 .num_shared_regs = 1,
3746 .ops = &skx_uncore_pcu_ops,
bab4e569 3747 .format_group = &skx_uncore_pcu_format_group,
cd34cd97
KL
3748};
3749
3750static struct intel_uncore_type *skx_msr_uncores[] = {
3751 &skx_uncore_ubox,
3752 &skx_uncore_chabox,
3753 &skx_uncore_iio,
0f519f03 3754 &skx_uncore_iio_free_running,
cd34cd97
KL
3755 &skx_uncore_irp,
3756 &skx_uncore_pcu,
3757 NULL,
3758};
3759
320b0651
KL
3760/*
3761 * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
3762 * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
3763 */
3764#define SKX_CAPID6 0x9c
3765#define SKX_CHA_BIT_MASK GENMASK(27, 0)
3766
cd34cd97
KL
3767static int skx_count_chabox(void)
3768{
320b0651
KL
3769 struct pci_dev *dev = NULL;
3770 u32 val = 0;
3771
3772 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
3773 if (!dev)
3774 goto out;
3775
3776 pci_read_config_dword(dev, SKX_CAPID6, &val);
3777 val &= SKX_CHA_BIT_MASK;
3778out:
3779 pci_dev_put(dev);
3780 return hweight32(val);
cd34cd97
KL
3781}
3782
3783void skx_uncore_cpu_init(void)
3784{
3785 skx_uncore_chabox.num_boxes = skx_count_chabox();
3786 uncore_msr_uncores = skx_msr_uncores;
3787}
3788
3789static struct intel_uncore_type skx_uncore_imc = {
3790 .name = "imc",
3791 .num_counters = 4,
3792 .num_boxes = 6,
3793 .perf_ctr_bits = 48,
3794 .fixed_ctr_bits = 48,
3795 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3796 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3797 .event_descs = hswep_uncore_imc_events,
3798 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3799 .event_ctl = SNBEP_PCI_PMON_CTL0,
3800 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3801 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3802 .ops = &ivbep_uncore_pci_ops,
3803 .format_group = &skx_uncore_format_group,
3804};
3805
3806static struct attribute *skx_upi_uncore_formats_attr[] = {
31766094 3807 &format_attr_event.attr,
cd34cd97
KL
3808 &format_attr_umask_ext.attr,
3809 &format_attr_edge.attr,
3810 &format_attr_inv.attr,
3811 &format_attr_thresh8.attr,
3812 NULL,
3813};
3814
45bd07ad 3815static const struct attribute_group skx_upi_uncore_format_group = {
cd34cd97
KL
3816 .name = "format",
3817 .attrs = skx_upi_uncore_formats_attr,
3818};
3819
3820static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
3821{
3822 struct pci_dev *pdev = box->pci_dev;
3823
3824 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3825 pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3826}
3827
3828static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
3829 .init_box = skx_upi_uncore_pci_init_box,
3830 .disable_box = snbep_uncore_pci_disable_box,
3831 .enable_box = snbep_uncore_pci_enable_box,
3832 .disable_event = snbep_uncore_pci_disable_event,
3833 .enable_event = snbep_uncore_pci_enable_event,
3834 .read_counter = snbep_uncore_pci_read_counter,
3835};
3836
3837static struct intel_uncore_type skx_uncore_upi = {
3838 .name = "upi",
3839 .num_counters = 4,
3840 .num_boxes = 3,
3841 .perf_ctr_bits = 48,
3842 .perf_ctr = SKX_UPI_PCI_PMON_CTR0,
3843 .event_ctl = SKX_UPI_PCI_PMON_CTL0,
b3625980
SE
3844 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3845 .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
cd34cd97
KL
3846 .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL,
3847 .ops = &skx_upi_uncore_pci_ops,
3848 .format_group = &skx_upi_uncore_format_group,
3849};
3850
3851static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
3852{
3853 struct pci_dev *pdev = box->pci_dev;
3854
3855 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3856 pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3857}
3858
3859static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
3860 .init_box = skx_m2m_uncore_pci_init_box,
3861 .disable_box = snbep_uncore_pci_disable_box,
3862 .enable_box = snbep_uncore_pci_enable_box,
3863 .disable_event = snbep_uncore_pci_disable_event,
3864 .enable_event = snbep_uncore_pci_enable_event,
3865 .read_counter = snbep_uncore_pci_read_counter,
3866};
3867
3868static struct intel_uncore_type skx_uncore_m2m = {
3869 .name = "m2m",
3870 .num_counters = 4,
3871 .num_boxes = 2,
3872 .perf_ctr_bits = 48,
3873 .perf_ctr = SKX_M2M_PCI_PMON_CTR0,
3874 .event_ctl = SKX_M2M_PCI_PMON_CTL0,
3875 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3876 .box_ctl = SKX_M2M_PCI_PMON_BOX_CTL,
3877 .ops = &skx_m2m_uncore_pci_ops,
3878 .format_group = &skx_uncore_format_group,
3879};
3880
3881static struct event_constraint skx_uncore_m2pcie_constraints[] = {
3882 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3883 EVENT_CONSTRAINT_END
3884};
3885
3886static struct intel_uncore_type skx_uncore_m2pcie = {
3887 .name = "m2pcie",
3888 .num_counters = 4,
3889 .num_boxes = 4,
3890 .perf_ctr_bits = 48,
3891 .constraints = skx_uncore_m2pcie_constraints,
3892 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3893 .event_ctl = SNBEP_PCI_PMON_CTL0,
3894 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3895 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3896 .ops = &ivbep_uncore_pci_ops,
3897 .format_group = &skx_uncore_format_group,
3898};
3899
3900static struct event_constraint skx_uncore_m3upi_constraints[] = {
3901 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
3902 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
3903 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
3904 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
3905 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
3906 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
3907 UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
3908 UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
3909 EVENT_CONSTRAINT_END
3910};
3911
3912static struct intel_uncore_type skx_uncore_m3upi = {
3913 .name = "m3upi",
3914 .num_counters = 3,
3915 .num_boxes = 3,
3916 .perf_ctr_bits = 48,
3917 .constraints = skx_uncore_m3upi_constraints,
3918 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3919 .event_ctl = SNBEP_PCI_PMON_CTL0,
3920 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3921 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3922 .ops = &ivbep_uncore_pci_ops,
3923 .format_group = &skx_uncore_format_group,
3924};
3925
3926enum {
3927 SKX_PCI_UNCORE_IMC,
3928 SKX_PCI_UNCORE_M2M,
3929 SKX_PCI_UNCORE_UPI,
3930 SKX_PCI_UNCORE_M2PCIE,
3931 SKX_PCI_UNCORE_M3UPI,
3932};
3933
3934static struct intel_uncore_type *skx_pci_uncores[] = {
3935 [SKX_PCI_UNCORE_IMC] = &skx_uncore_imc,
3936 [SKX_PCI_UNCORE_M2M] = &skx_uncore_m2m,
3937 [SKX_PCI_UNCORE_UPI] = &skx_uncore_upi,
3938 [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
3939 [SKX_PCI_UNCORE_M3UPI] = &skx_uncore_m3upi,
3940 NULL,
3941};
3942
3943static const struct pci_device_id skx_uncore_pci_ids[] = {
3944 { /* MC0 Channel 0 */
3945 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3946 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
3947 },
3948 { /* MC0 Channel 1 */
3949 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3950 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
3951 },
3952 { /* MC0 Channel 2 */
3953 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3954 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
3955 },
3956 { /* MC1 Channel 0 */
3957 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3958 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
3959 },
3960 { /* MC1 Channel 1 */
3961 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3962 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
3963 },
3964 { /* MC1 Channel 2 */
3965 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3966 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
3967 },
3968 { /* M2M0 */
3969 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3970 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
3971 },
3972 { /* M2M1 */
3973 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3974 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
3975 },
3976 { /* UPI0 Link 0 */
3977 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3978 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
3979 },
3980 { /* UPI0 Link 1 */
3981 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3982 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
3983 },
3984 { /* UPI1 Link 2 */
3985 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3986 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
3987 },
3988 { /* M2PCIe 0 */
3989 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3990 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
3991 },
3992 { /* M2PCIe 1 */
3993 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3994 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
3995 },
3996 { /* M2PCIe 2 */
3997 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3998 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
3999 },
4000 { /* M2PCIe 3 */
4001 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4002 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4003 },
4004 { /* M3UPI0 Link 0 */
9d92cfea
KL
4005 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4006 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
cd34cd97
KL
4007 },
4008 { /* M3UPI0 Link 1 */
9d92cfea
KL
4009 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4010 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
cd34cd97
KL
4011 },
4012 { /* M3UPI1 Link 2 */
9d92cfea
KL
4013 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4014 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
cd34cd97
KL
4015 },
4016 { /* end: all zeroes */ }
4017};
4018
4019
4020static struct pci_driver skx_uncore_pci_driver = {
4021 .name = "skx_uncore",
4022 .id_table = skx_uncore_pci_ids,
4023};
4024
4025int skx_uncore_pci_init(void)
4026{
4027 /* need to double check pci address */
4028 int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4029
4030 if (ret)
4031 return ret;
4032
4033 uncore_pci_uncores = skx_pci_uncores;
4034 uncore_pci_driver = &skx_uncore_pci_driver;
4035 return 0;
4036}
4037
4038/* end of SKX uncore support */
210cc5f9
KL
4039
4040/* SNR uncore support */
4041
4042static struct intel_uncore_type snr_uncore_ubox = {
4043 .name = "ubox",
4044 .num_counters = 2,
4045 .num_boxes = 1,
4046 .perf_ctr_bits = 48,
4047 .fixed_ctr_bits = 48,
4048 .perf_ctr = SNR_U_MSR_PMON_CTR0,
4049 .event_ctl = SNR_U_MSR_PMON_CTL0,
4050 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4051 .fixed_ctr = SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4052 .fixed_ctl = SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4053 .ops = &ivbep_uncore_msr_ops,
4054 .format_group = &ivbep_uncore_format_group,
4055};
4056
4057static struct attribute *snr_uncore_cha_formats_attr[] = {
4058 &format_attr_event.attr,
4059 &format_attr_umask_ext2.attr,
4060 &format_attr_edge.attr,
4061 &format_attr_tid_en.attr,
4062 &format_attr_inv.attr,
4063 &format_attr_thresh8.attr,
4064 &format_attr_filter_tid5.attr,
4065 NULL,
4066};
4067static const struct attribute_group snr_uncore_chabox_format_group = {
4068 .name = "format",
4069 .attrs = snr_uncore_cha_formats_attr,
4070};
4071
4072static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4073{
4074 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4075
4076 reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4077 box->pmu->type->msr_offset * box->pmu->pmu_idx;
4078 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4079 reg1->idx = 0;
4080
4081 return 0;
4082}
4083
4084static void snr_cha_enable_event(struct intel_uncore_box *box,
4085 struct perf_event *event)
4086{
4087 struct hw_perf_event *hwc = &event->hw;
4088 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4089
4090 if (reg1->idx != EXTRA_REG_NONE)
4091 wrmsrl(reg1->reg, reg1->config);
4092
4093 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4094}
4095
4096static struct intel_uncore_ops snr_uncore_chabox_ops = {
4097 .init_box = ivbep_uncore_msr_init_box,
4098 .disable_box = snbep_uncore_msr_disable_box,
4099 .enable_box = snbep_uncore_msr_enable_box,
4100 .disable_event = snbep_uncore_msr_disable_event,
4101 .enable_event = snr_cha_enable_event,
4102 .read_counter = uncore_msr_read_counter,
4103 .hw_config = snr_cha_hw_config,
4104};
4105
4106static struct intel_uncore_type snr_uncore_chabox = {
4107 .name = "cha",
4108 .num_counters = 4,
4109 .num_boxes = 6,
4110 .perf_ctr_bits = 48,
4111 .event_ctl = SNR_CHA_MSR_PMON_CTL0,
4112 .perf_ctr = SNR_CHA_MSR_PMON_CTR0,
4113 .box_ctl = SNR_CHA_MSR_PMON_BOX_CTL,
4114 .msr_offset = HSWEP_CBO_MSR_OFFSET,
4115 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4116 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
4117 .ops = &snr_uncore_chabox_ops,
4118 .format_group = &snr_uncore_chabox_format_group,
4119};
4120
4121static struct attribute *snr_uncore_iio_formats_attr[] = {
4122 &format_attr_event.attr,
4123 &format_attr_umask.attr,
4124 &format_attr_edge.attr,
4125 &format_attr_inv.attr,
4126 &format_attr_thresh9.attr,
4127 &format_attr_ch_mask2.attr,
4128 &format_attr_fc_mask2.attr,
4129 NULL,
4130};
4131
4132static const struct attribute_group snr_uncore_iio_format_group = {
4133 .name = "format",
4134 .attrs = snr_uncore_iio_formats_attr,
4135};
4136
4137static struct intel_uncore_type snr_uncore_iio = {
4138 .name = "iio",
4139 .num_counters = 4,
4140 .num_boxes = 5,
4141 .perf_ctr_bits = 48,
4142 .event_ctl = SNR_IIO_MSR_PMON_CTL0,
4143 .perf_ctr = SNR_IIO_MSR_PMON_CTR0,
4144 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4145 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4146 .box_ctl = SNR_IIO_MSR_PMON_BOX_CTL,
4147 .msr_offset = SNR_IIO_MSR_OFFSET,
4148 .ops = &ivbep_uncore_msr_ops,
4149 .format_group = &snr_uncore_iio_format_group,
4150};
4151
4152static struct intel_uncore_type snr_uncore_irp = {
4153 .name = "irp",
4154 .num_counters = 2,
4155 .num_boxes = 5,
4156 .perf_ctr_bits = 48,
4157 .event_ctl = SNR_IRP0_MSR_PMON_CTL0,
4158 .perf_ctr = SNR_IRP0_MSR_PMON_CTR0,
4159 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4160 .box_ctl = SNR_IRP0_MSR_PMON_BOX_CTL,
4161 .msr_offset = SNR_IRP_MSR_OFFSET,
4162 .ops = &ivbep_uncore_msr_ops,
4163 .format_group = &ivbep_uncore_format_group,
4164};
4165
4166static struct intel_uncore_type snr_uncore_m2pcie = {
4167 .name = "m2pcie",
4168 .num_counters = 4,
4169 .num_boxes = 5,
4170 .perf_ctr_bits = 48,
4171 .event_ctl = SNR_M2PCIE_MSR_PMON_CTL0,
4172 .perf_ctr = SNR_M2PCIE_MSR_PMON_CTR0,
4173 .box_ctl = SNR_M2PCIE_MSR_PMON_BOX_CTL,
4174 .msr_offset = SNR_M2PCIE_MSR_OFFSET,
4175 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4176 .ops = &ivbep_uncore_msr_ops,
4177 .format_group = &ivbep_uncore_format_group,
4178};
4179
4180static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4181{
4182 struct hw_perf_event *hwc = &event->hw;
4183 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4184 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4185
4186 if (ev_sel >= 0xb && ev_sel <= 0xe) {
4187 reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4188 reg1->idx = ev_sel - 0xb;
4189 reg1->config = event->attr.config1 & (0xff << reg1->idx);
4190 }
4191 return 0;
4192}
4193
4194static struct intel_uncore_ops snr_uncore_pcu_ops = {
4195 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4196 .hw_config = snr_pcu_hw_config,
4197 .get_constraint = snbep_pcu_get_constraint,
4198 .put_constraint = snbep_pcu_put_constraint,
4199};
4200
4201static struct intel_uncore_type snr_uncore_pcu = {
4202 .name = "pcu",
4203 .num_counters = 4,
4204 .num_boxes = 1,
4205 .perf_ctr_bits = 48,
4206 .perf_ctr = SNR_PCU_MSR_PMON_CTR0,
4207 .event_ctl = SNR_PCU_MSR_PMON_CTL0,
4208 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4209 .box_ctl = SNR_PCU_MSR_PMON_BOX_CTL,
4210 .num_shared_regs = 1,
4211 .ops = &snr_uncore_pcu_ops,
4212 .format_group = &skx_uncore_pcu_format_group,
4213};
4214
4215enum perf_uncore_snr_iio_freerunning_type_id {
4216 SNR_IIO_MSR_IOCLK,
4217 SNR_IIO_MSR_BW_IN,
4218
4219 SNR_IIO_FREERUNNING_TYPE_MAX,
4220};
4221
4222static struct freerunning_counters snr_iio_freerunning[] = {
4223 [SNR_IIO_MSR_IOCLK] = { 0x1eac, 0x1, 0x10, 1, 48 },
4224 [SNR_IIO_MSR_BW_IN] = { 0x1f00, 0x1, 0x10, 8, 48 },
4225};
4226
4227static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4228 /* Free-Running IIO CLOCKS Counter */
4229 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
4230 /* Free-Running IIO BANDWIDTH IN Counters */
4231 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
4232 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
4233 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
4234 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
4235 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
4236 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
4237 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
4238 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
4239 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
4240 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
4241 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
4242 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
4243 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
4244 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
4245 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
4246 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
4247 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
4248 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
4249 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
4250 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
4251 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
4252 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
4253 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
4254 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
4255 { /* end: all zeroes */ },
4256};
4257
4258static struct intel_uncore_type snr_uncore_iio_free_running = {
4259 .name = "iio_free_running",
4260 .num_counters = 9,
4261 .num_boxes = 5,
4262 .num_freerunning_types = SNR_IIO_FREERUNNING_TYPE_MAX,
4263 .freerunning = snr_iio_freerunning,
4264 .ops = &skx_uncore_iio_freerunning_ops,
4265 .event_descs = snr_uncore_iio_freerunning_events,
4266 .format_group = &skx_uncore_iio_freerunning_format_group,
4267};
4268
4269static struct intel_uncore_type *snr_msr_uncores[] = {
4270 &snr_uncore_ubox,
4271 &snr_uncore_chabox,
4272 &snr_uncore_iio,
4273 &snr_uncore_irp,
4274 &snr_uncore_m2pcie,
4275 &snr_uncore_pcu,
4276 &snr_uncore_iio_free_running,
4277 NULL,
4278};
4279
4280void snr_uncore_cpu_init(void)
4281{
4282 uncore_msr_uncores = snr_msr_uncores;
4283}
4284
4285static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4286{
4287 struct pci_dev *pdev = box->pci_dev;
4288 int box_ctl = uncore_pci_box_ctl(box);
4289
4290 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4291 pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4292}
4293
4294static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4295 .init_box = snr_m2m_uncore_pci_init_box,
4296 .disable_box = snbep_uncore_pci_disable_box,
4297 .enable_box = snbep_uncore_pci_enable_box,
4298 .disable_event = snbep_uncore_pci_disable_event,
4299 .enable_event = snbep_uncore_pci_enable_event,
4300 .read_counter = snbep_uncore_pci_read_counter,
4301};
4302
4303static struct attribute *snr_m2m_uncore_formats_attr[] = {
4304 &format_attr_event.attr,
4305 &format_attr_umask_ext3.attr,
4306 &format_attr_edge.attr,
4307 &format_attr_inv.attr,
4308 &format_attr_thresh8.attr,
4309 NULL,
4310};
4311
4312static const struct attribute_group snr_m2m_uncore_format_group = {
4313 .name = "format",
4314 .attrs = snr_m2m_uncore_formats_attr,
4315};
4316
4317static struct intel_uncore_type snr_uncore_m2m = {
4318 .name = "m2m",
4319 .num_counters = 4,
4320 .num_boxes = 1,
4321 .perf_ctr_bits = 48,
4322 .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
4323 .event_ctl = SNR_M2M_PCI_PMON_CTL0,
4324 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4325 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
4326 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
4327 .ops = &snr_m2m_uncore_pci_ops,
4328 .format_group = &snr_m2m_uncore_format_group,
4329};
4330
4331static struct intel_uncore_type snr_uncore_pcie3 = {
4332 .name = "pcie3",
4333 .num_counters = 4,
4334 .num_boxes = 1,
4335 .perf_ctr_bits = 48,
4336 .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0,
4337 .event_ctl = SNR_PCIE3_PCI_PMON_CTL0,
4338 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4339 .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL,
4340 .ops = &ivbep_uncore_pci_ops,
4341 .format_group = &ivbep_uncore_format_group,
4342};
4343
4344enum {
4345 SNR_PCI_UNCORE_M2M,
4346 SNR_PCI_UNCORE_PCIE3,
4347};
4348
4349static struct intel_uncore_type *snr_pci_uncores[] = {
4350 [SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m,
4351 [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3,
4352 NULL,
4353};
4354
4355static const struct pci_device_id snr_uncore_pci_ids[] = {
4356 { /* M2M */
4357 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4358 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4359 },
4360 { /* PCIe3 */
4361 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
4362 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
4363 },
4364 { /* end: all zeroes */ }
4365};
4366
4367static struct pci_driver snr_uncore_pci_driver = {
4368 .name = "snr_uncore",
4369 .id_table = snr_uncore_pci_ids,
4370};
4371
4372int snr_uncore_pci_init(void)
4373{
4374 /* SNR UBOX DID */
4375 int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4376 SKX_GIDNIDMAP, true);
4377
4378 if (ret)
4379 return ret;
4380
4381 uncore_pci_uncores = snr_pci_uncores;
4382 uncore_pci_driver = &snr_uncore_pci_driver;
4383 return 0;
4384}
4385
ee49532b
KL
4386static struct pci_dev *snr_uncore_get_mc_dev(int id)
4387{
4388 struct pci_dev *mc_dev = NULL;
4389 int phys_id, pkg;
4390
4391 while (1) {
4392 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
4393 if (!mc_dev)
4394 break;
4395 phys_id = uncore_pcibus_to_physid(mc_dev->bus);
4396 if (phys_id < 0)
4397 continue;
4398 pkg = topology_phys_to_logical_pkg(phys_id);
4399 if (pkg < 0)
4400 continue;
4401 else if (pkg == id)
4402 break;
4403 }
4404 return mc_dev;
4405}
4406
4407static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4408{
4409 struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
4410 unsigned int box_ctl = uncore_mmio_box_ctl(box);
4411 resource_size_t addr;
4412 u32 pci_dword;
4413
4414 if (!pdev)
4415 return;
4416
4417 pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4418 addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4419
4420 pci_read_config_dword(pdev, SNR_IMC_MMIO_MEM0_OFFSET, &pci_dword);
4421 addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4422
4423 addr += box_ctl;
4424
4425 box->io_addr = ioremap(addr, SNR_IMC_MMIO_SIZE);
4426 if (!box->io_addr)
4427 return;
4428
4429 writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4430}
4431
4432static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4433{
4434 u32 config;
4435
4436 if (!box->io_addr)
4437 return;
4438
4439 config = readl(box->io_addr);
4440 config |= SNBEP_PMON_BOX_CTL_FRZ;
4441 writel(config, box->io_addr);
4442}
4443
4444static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4445{
4446 u32 config;
4447
4448 if (!box->io_addr)
4449 return;
4450
4451 config = readl(box->io_addr);
4452 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4453 writel(config, box->io_addr);
4454}
4455
4456static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4457 struct perf_event *event)
4458{
4459 struct hw_perf_event *hwc = &event->hw;
4460
4461 if (!box->io_addr)
4462 return;
4463
4464 writel(hwc->config | SNBEP_PMON_CTL_EN,
4465 box->io_addr + hwc->config_base);
4466}
4467
4468static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4469 struct perf_event *event)
4470{
4471 struct hw_perf_event *hwc = &event->hw;
4472
4473 if (!box->io_addr)
4474 return;
4475
4476 writel(hwc->config, box->io_addr + hwc->config_base);
4477}
4478
4479static struct intel_uncore_ops snr_uncore_mmio_ops = {
4480 .init_box = snr_uncore_mmio_init_box,
4481 .exit_box = uncore_mmio_exit_box,
4482 .disable_box = snr_uncore_mmio_disable_box,
4483 .enable_box = snr_uncore_mmio_enable_box,
4484 .disable_event = snr_uncore_mmio_disable_event,
4485 .enable_event = snr_uncore_mmio_enable_event,
4486 .read_counter = uncore_mmio_read_counter,
4487};
4488
4489static struct uncore_event_desc snr_uncore_imc_events[] = {
4490 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
4491 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x0f"),
4492 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4493 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4494 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4495 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4496 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4497 { /* end: all zeroes */ },
4498};
4499
4500static struct intel_uncore_type snr_uncore_imc = {
4501 .name = "imc",
4502 .num_counters = 4,
4503 .num_boxes = 2,
4504 .perf_ctr_bits = 48,
4505 .fixed_ctr_bits = 48,
4506 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
4507 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
4508 .event_descs = snr_uncore_imc_events,
4509 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
4510 .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
4511 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4512 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
4513 .mmio_offset = SNR_IMC_MMIO_OFFSET,
4514 .ops = &snr_uncore_mmio_ops,
4515 .format_group = &skx_uncore_format_group,
4516};
4517
4518enum perf_uncore_snr_imc_freerunning_type_id {
4519 SNR_IMC_DCLK,
4520 SNR_IMC_DDR,
4521
4522 SNR_IMC_FREERUNNING_TYPE_MAX,
4523};
4524
4525static struct freerunning_counters snr_imc_freerunning[] = {
4526 [SNR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
4527 [SNR_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
4528};
4529
4530static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
4531 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
4532
4533 INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
4534 INTEL_UNCORE_EVENT_DESC(read.scale, "3.814697266e-6"),
4535 INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
4536 INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
4537 INTEL_UNCORE_EVENT_DESC(write.scale, "3.814697266e-6"),
4538 INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
4539};
4540
4541static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
4542 .init_box = snr_uncore_mmio_init_box,
4543 .exit_box = uncore_mmio_exit_box,
4544 .read_counter = uncore_mmio_read_counter,
4545 .hw_config = uncore_freerunning_hw_config,
4546};
4547
4548static struct intel_uncore_type snr_uncore_imc_free_running = {
4549 .name = "imc_free_running",
4550 .num_counters = 3,
4551 .num_boxes = 1,
4552 .num_freerunning_types = SNR_IMC_FREERUNNING_TYPE_MAX,
4553 .freerunning = snr_imc_freerunning,
4554 .ops = &snr_uncore_imc_freerunning_ops,
4555 .event_descs = snr_uncore_imc_freerunning_events,
4556 .format_group = &skx_uncore_iio_freerunning_format_group,
4557};
4558
4559static struct intel_uncore_type *snr_mmio_uncores[] = {
4560 &snr_uncore_imc,
4561 &snr_uncore_imc_free_running,
4562 NULL,
4563};
4564
4565void snr_uncore_mmio_init(void)
4566{
4567 uncore_mmio_uncores = snr_mmio_uncores;
4568}
4569
210cc5f9 4570/* end of SNR uncore support */