]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | /* |
2 | * Copyright (c) 2016 QLogic Corporation. | |
3 | * All rights reserved. | |
4 | * www.qlogic.com | |
5 | * | |
6 | * See LICENSE.qede_pmd for copyright and licensing details. | |
7 | */ | |
8 | ||
9 | #ifndef __ECORE_HSI_COMMON__ | |
10 | #define __ECORE_HSI_COMMON__ | |
11 | /********************************/ | |
12 | /* Add include to common target */ | |
13 | /********************************/ | |
14 | #include "common_hsi.h" | |
15 | ||
16 | ||
17 | /* | |
18 | * opcodes for the event ring | |
19 | */ | |
20 | enum common_event_opcode { | |
21 | COMMON_EVENT_PF_START, | |
22 | COMMON_EVENT_PF_STOP, | |
23 | COMMON_EVENT_VF_START, | |
24 | COMMON_EVENT_VF_STOP, | |
25 | COMMON_EVENT_VF_PF_CHANNEL, | |
26 | COMMON_EVENT_VF_FLR, | |
27 | COMMON_EVENT_PF_UPDATE, | |
28 | COMMON_EVENT_MALICIOUS_VF, | |
29 | COMMON_EVENT_RL_UPDATE, | |
30 | COMMON_EVENT_EMPTY, | |
31 | MAX_COMMON_EVENT_OPCODE | |
32 | }; | |
33 | ||
34 | ||
35 | /* | |
36 | * Common Ramrod Command IDs | |
37 | */ | |
38 | enum common_ramrod_cmd_id { | |
39 | COMMON_RAMROD_UNUSED, | |
40 | COMMON_RAMROD_PF_START /* PF Function Start Ramrod */, | |
41 | COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */, | |
42 | COMMON_RAMROD_VF_START /* VF Function Start */, | |
43 | COMMON_RAMROD_VF_STOP /* VF Function Stop Ramrod */, | |
44 | COMMON_RAMROD_PF_UPDATE /* PF update Ramrod */, | |
45 | COMMON_RAMROD_RL_UPDATE /* QCN/DCQCN RL update Ramrod */, | |
46 | COMMON_RAMROD_EMPTY /* Empty Ramrod */, | |
47 | MAX_COMMON_RAMROD_CMD_ID | |
48 | }; | |
49 | ||
50 | ||
51 | /* | |
52 | * The core storm context for the Ystorm | |
53 | */ | |
54 | struct ystorm_core_conn_st_ctx { | |
55 | __le32 reserved[4]; | |
56 | }; | |
57 | ||
58 | /* | |
59 | * The core storm context for the Pstorm | |
60 | */ | |
61 | struct pstorm_core_conn_st_ctx { | |
62 | __le32 reserved[4]; | |
63 | }; | |
64 | ||
65 | /* | |
66 | * Core Slowpath Connection storm context of Xstorm | |
67 | */ | |
68 | struct xstorm_core_conn_st_ctx { | |
69 | __le32 spq_base_lo /* SPQ Ring Base Address low dword */; | |
70 | __le32 spq_base_hi /* SPQ Ring Base Address high dword */; | |
71 | /* Consolidation Ring Base Address */ | |
72 | struct regpair consolid_base_addr; | |
73 | __le16 spq_cons /* SPQ Ring Consumer */; | |
74 | __le16 consolid_cons /* Consolidation Ring Consumer */; | |
75 | __le32 reserved0[55] /* Pad to 15 cycles */; | |
76 | }; | |
77 | ||
78 | struct xstorm_core_conn_ag_ctx { | |
79 | u8 reserved0 /* cdu_validation */; | |
80 | u8 core_state /* state */; | |
81 | u8 flags0; | |
82 | /* exist_in_qm0 */ | |
83 | #define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 | |
84 | #define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 | |
85 | /* exist_in_qm1 */ | |
86 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1 | |
87 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1 | |
88 | /* exist_in_qm2 */ | |
89 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1 | |
90 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2 | |
91 | /* exist_in_qm3 */ | |
92 | #define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 | |
93 | #define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 | |
94 | /* bit4 */ | |
95 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1 | |
96 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4 | |
97 | /* cf_array_active */ | |
98 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1 | |
99 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5 | |
100 | /* bit6 */ | |
101 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 | |
102 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6 | |
103 | /* bit7 */ | |
104 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 | |
105 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7 | |
106 | u8 flags1; | |
107 | /* bit8 */ | |
108 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 | |
109 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0 | |
110 | /* bit9 */ | |
111 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 | |
112 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1 | |
113 | /* bit10 */ | |
114 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 | |
115 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2 | |
116 | /* bit11 */ | |
117 | #define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 | |
118 | #define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3 | |
119 | /* bit12 */ | |
120 | #define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 | |
121 | #define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4 | |
122 | /* bit13 */ | |
123 | #define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 | |
124 | #define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5 | |
125 | /* bit14 */ | |
126 | #define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 | |
127 | #define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 | |
128 | /* bit15 */ | |
129 | #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 | |
130 | #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 | |
131 | u8 flags2; | |
132 | /* timer0cf */ | |
133 | #define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 | |
134 | #define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0 | |
135 | /* timer1cf */ | |
136 | #define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 | |
137 | #define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2 | |
138 | /* timer2cf */ | |
139 | #define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 | |
140 | #define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4 | |
141 | /* timer_stop_all */ | |
142 | #define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 | |
143 | #define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6 | |
144 | u8 flags3; | |
145 | #define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ | |
146 | #define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0 | |
147 | #define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ | |
148 | #define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2 | |
149 | #define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ | |
150 | #define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4 | |
151 | #define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ | |
152 | #define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6 | |
153 | u8 flags4; | |
154 | #define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ | |
155 | #define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0 | |
156 | #define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ | |
157 | #define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2 | |
158 | /* cf10 */ | |
159 | #define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 | |
160 | #define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4 | |
161 | /* cf11 */ | |
162 | #define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 | |
163 | #define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6 | |
164 | u8 flags5; | |
165 | /* cf12 */ | |
166 | #define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 | |
167 | #define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0 | |
168 | /* cf13 */ | |
169 | #define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 | |
170 | #define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2 | |
171 | /* cf14 */ | |
172 | #define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 | |
173 | #define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4 | |
174 | /* cf15 */ | |
175 | #define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 | |
176 | #define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6 | |
177 | u8 flags6; | |
178 | /* cf16 */ | |
179 | #define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 | |
180 | #define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0 | |
181 | /* cf_array_cf */ | |
182 | #define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3 | |
183 | #define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2 | |
184 | /* cf18 */ | |
185 | #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 | |
186 | #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4 | |
187 | /* cf19 */ | |
188 | #define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 | |
189 | #define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 | |
190 | u8 flags7; | |
191 | /* cf20 */ | |
192 | #define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 | |
193 | #define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 | |
194 | /* cf21 */ | |
195 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 | |
196 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2 | |
197 | /* cf22 */ | |
198 | #define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 | |
199 | #define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 | |
200 | /* cf0en */ | |
201 | #define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 | |
202 | #define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6 | |
203 | /* cf1en */ | |
204 | #define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 | |
205 | #define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7 | |
206 | u8 flags8; | |
207 | /* cf2en */ | |
208 | #define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 | |
209 | #define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0 | |
210 | /* cf3en */ | |
211 | #define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 | |
212 | #define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1 | |
213 | /* cf4en */ | |
214 | #define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 | |
215 | #define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2 | |
216 | /* cf5en */ | |
217 | #define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 | |
218 | #define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3 | |
219 | /* cf6en */ | |
220 | #define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 | |
221 | #define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4 | |
222 | /* cf7en */ | |
223 | #define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 | |
224 | #define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5 | |
225 | /* cf8en */ | |
226 | #define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 | |
227 | #define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6 | |
228 | /* cf9en */ | |
229 | #define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 | |
230 | #define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7 | |
231 | u8 flags9; | |
232 | /* cf10en */ | |
233 | #define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 | |
234 | #define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0 | |
235 | /* cf11en */ | |
236 | #define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 | |
237 | #define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1 | |
238 | /* cf12en */ | |
239 | #define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 | |
240 | #define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2 | |
241 | /* cf13en */ | |
242 | #define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 | |
243 | #define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3 | |
244 | /* cf14en */ | |
245 | #define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 | |
246 | #define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4 | |
247 | /* cf15en */ | |
248 | #define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 | |
249 | #define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5 | |
250 | /* cf16en */ | |
251 | #define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 | |
252 | #define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6 | |
253 | /* cf_array_cf_en */ | |
254 | #define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1 | |
255 | #define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7 | |
256 | u8 flags10; | |
257 | /* cf18en */ | |
258 | #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 | |
259 | #define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 | |
260 | /* cf19en */ | |
261 | #define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 | |
262 | #define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 | |
263 | /* cf20en */ | |
264 | #define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 | |
265 | #define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 | |
266 | /* cf21en */ | |
267 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 | |
268 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3 | |
269 | /* cf22en */ | |
270 | #define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 | |
271 | #define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 | |
272 | /* cf23en */ | |
273 | #define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 | |
274 | #define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5 | |
275 | /* rule0en */ | |
276 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 | |
277 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6 | |
278 | /* rule1en */ | |
279 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 | |
280 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7 | |
281 | u8 flags11; | |
282 | /* rule2en */ | |
283 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 | |
284 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0 | |
285 | /* rule3en */ | |
286 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 | |
287 | #define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1 | |
288 | /* rule4en */ | |
289 | #define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 | |
290 | #define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 | |
291 | /* rule5en */ | |
292 | #define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 | |
293 | #define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3 | |
294 | /* rule6en */ | |
295 | #define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 | |
296 | #define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4 | |
297 | /* rule7en */ | |
298 | #define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 | |
299 | #define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5 | |
300 | /* rule8en */ | |
301 | #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 | |
302 | #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 | |
303 | /* rule9en */ | |
304 | #define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 | |
305 | #define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7 | |
306 | u8 flags12; | |
307 | /* rule10en */ | |
308 | #define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 | |
309 | #define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0 | |
310 | /* rule11en */ | |
311 | #define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 | |
312 | #define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1 | |
313 | /* rule12en */ | |
314 | #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 | |
315 | #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 | |
316 | /* rule13en */ | |
317 | #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 | |
318 | #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 | |
319 | /* rule14en */ | |
320 | #define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 | |
321 | #define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4 | |
322 | /* rule15en */ | |
323 | #define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 | |
324 | #define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5 | |
325 | /* rule16en */ | |
326 | #define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 | |
327 | #define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6 | |
328 | /* rule17en */ | |
329 | #define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 | |
330 | #define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7 | |
331 | u8 flags13; | |
332 | /* rule18en */ | |
333 | #define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 | |
334 | #define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0 | |
335 | /* rule19en */ | |
336 | #define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 | |
337 | #define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1 | |
338 | /* rule20en */ | |
339 | #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 | |
340 | #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 | |
341 | /* rule21en */ | |
342 | #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 | |
343 | #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 | |
344 | /* rule22en */ | |
345 | #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 | |
346 | #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 | |
347 | /* rule23en */ | |
348 | #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 | |
349 | #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 | |
350 | /* rule24en */ | |
351 | #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 | |
352 | #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 | |
353 | /* rule25en */ | |
354 | #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 | |
355 | #define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 | |
356 | u8 flags14; | |
357 | /* bit16 */ | |
358 | #define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 | |
359 | #define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0 | |
360 | /* bit17 */ | |
361 | #define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 | |
362 | #define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1 | |
363 | /* bit18 */ | |
364 | #define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 | |
365 | #define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2 | |
366 | /* bit19 */ | |
367 | #define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 | |
368 | #define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3 | |
369 | /* bit20 */ | |
370 | #define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 | |
371 | #define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4 | |
372 | /* bit21 */ | |
373 | #define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 | |
374 | #define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5 | |
375 | /* cf23 */ | |
376 | #define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 | |
377 | #define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6 | |
378 | u8 byte2 /* byte2 */; | |
379 | __le16 physical_q0 /* physical_q0 */; | |
380 | __le16 consolid_prod /* physical_q1 */; | |
381 | __le16 reserved16 /* physical_q2 */; | |
382 | __le16 tx_bd_cons /* word3 */; | |
383 | __le16 tx_bd_or_spq_prod /* word4 */; | |
384 | __le16 word5 /* word5 */; | |
385 | __le16 conn_dpi /* conn_dpi */; | |
386 | u8 byte3 /* byte3 */; | |
387 | u8 byte4 /* byte4 */; | |
388 | u8 byte5 /* byte5 */; | |
389 | u8 byte6 /* byte6 */; | |
390 | __le32 reg0 /* reg0 */; | |
391 | __le32 reg1 /* reg1 */; | |
392 | __le32 reg2 /* reg2 */; | |
393 | __le32 reg3 /* reg3 */; | |
394 | __le32 reg4 /* reg4 */; | |
395 | __le32 reg5 /* cf_array0 */; | |
396 | __le32 reg6 /* cf_array1 */; | |
397 | __le16 word7 /* word7 */; | |
398 | __le16 word8 /* word8 */; | |
399 | __le16 word9 /* word9 */; | |
400 | __le16 word10 /* word10 */; | |
401 | __le32 reg7 /* reg7 */; | |
402 | __le32 reg8 /* reg8 */; | |
403 | __le32 reg9 /* reg9 */; | |
404 | u8 byte7 /* byte7 */; | |
405 | u8 byte8 /* byte8 */; | |
406 | u8 byte9 /* byte9 */; | |
407 | u8 byte10 /* byte10 */; | |
408 | u8 byte11 /* byte11 */; | |
409 | u8 byte12 /* byte12 */; | |
410 | u8 byte13 /* byte13 */; | |
411 | u8 byte14 /* byte14 */; | |
412 | u8 byte15 /* byte15 */; | |
413 | u8 byte16 /* byte16 */; | |
414 | __le16 word11 /* word11 */; | |
415 | __le32 reg10 /* reg10 */; | |
416 | __le32 reg11 /* reg11 */; | |
417 | __le32 reg12 /* reg12 */; | |
418 | __le32 reg13 /* reg13 */; | |
419 | __le32 reg14 /* reg14 */; | |
420 | __le32 reg15 /* reg15 */; | |
421 | __le32 reg16 /* reg16 */; | |
422 | __le32 reg17 /* reg17 */; | |
423 | __le32 reg18 /* reg18 */; | |
424 | __le32 reg19 /* reg19 */; | |
425 | __le16 word12 /* word12 */; | |
426 | __le16 word13 /* word13 */; | |
427 | __le16 word14 /* word14 */; | |
428 | __le16 word15 /* word15 */; | |
429 | }; | |
430 | ||
431 | struct tstorm_core_conn_ag_ctx { | |
432 | u8 byte0 /* cdu_validation */; | |
433 | u8 byte1 /* state */; | |
434 | u8 flags0; | |
435 | #define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ | |
436 | #define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 | |
437 | #define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ | |
438 | #define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 | |
439 | #define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ | |
440 | #define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 | |
441 | #define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ | |
442 | #define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 | |
443 | #define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ | |
444 | #define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 | |
445 | #define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ | |
446 | #define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 | |
447 | #define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ | |
448 | #define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 | |
449 | u8 flags1; | |
450 | #define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ | |
451 | #define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 | |
452 | #define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ | |
453 | #define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 | |
454 | #define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ | |
455 | #define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 | |
456 | #define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ | |
457 | #define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 | |
458 | u8 flags2; | |
459 | #define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ | |
460 | #define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 | |
461 | #define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ | |
462 | #define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 | |
463 | #define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ | |
464 | #define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 | |
465 | #define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ | |
466 | #define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 | |
467 | u8 flags3; | |
468 | #define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ | |
469 | #define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 | |
470 | #define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ | |
471 | #define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 | |
472 | #define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ | |
473 | #define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 | |
474 | #define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ | |
475 | #define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 | |
476 | #define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ | |
477 | #define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 | |
478 | #define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ | |
479 | #define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 | |
480 | u8 flags4; | |
481 | #define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ | |
482 | #define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 | |
483 | #define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ | |
484 | #define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 | |
485 | #define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ | |
486 | #define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 | |
487 | #define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ | |
488 | #define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 | |
489 | #define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ | |
490 | #define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 | |
491 | #define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ | |
492 | #define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 | |
493 | #define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ | |
494 | #define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 | |
495 | #define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ | |
496 | #define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 | |
497 | u8 flags5; | |
498 | #define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ | |
499 | #define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 | |
500 | #define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ | |
501 | #define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 | |
502 | #define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ | |
503 | #define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 | |
504 | #define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ | |
505 | #define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 | |
506 | #define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ | |
507 | #define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 | |
508 | #define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ | |
509 | #define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 | |
510 | #define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ | |
511 | #define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 | |
512 | #define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ | |
513 | #define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 | |
514 | __le32 reg0 /* reg0 */; | |
515 | __le32 reg1 /* reg1 */; | |
516 | __le32 reg2 /* reg2 */; | |
517 | __le32 reg3 /* reg3 */; | |
518 | __le32 reg4 /* reg4 */; | |
519 | __le32 reg5 /* reg5 */; | |
520 | __le32 reg6 /* reg6 */; | |
521 | __le32 reg7 /* reg7 */; | |
522 | __le32 reg8 /* reg8 */; | |
523 | u8 byte2 /* byte2 */; | |
524 | u8 byte3 /* byte3 */; | |
525 | __le16 word0 /* word0 */; | |
526 | u8 byte4 /* byte4 */; | |
527 | u8 byte5 /* byte5 */; | |
528 | __le16 word1 /* word1 */; | |
529 | __le16 word2 /* conn_dpi */; | |
530 | __le16 word3 /* word3 */; | |
531 | __le32 reg9 /* reg9 */; | |
532 | __le32 reg10 /* reg10 */; | |
533 | }; | |
534 | ||
535 | struct ustorm_core_conn_ag_ctx { | |
536 | u8 reserved /* cdu_validation */; | |
537 | u8 byte1 /* state */; | |
538 | u8 flags0; | |
539 | #define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ | |
540 | #define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 | |
541 | #define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ | |
542 | #define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 | |
543 | #define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ | |
544 | #define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 | |
545 | #define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ | |
546 | #define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 | |
547 | #define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ | |
548 | #define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 | |
549 | u8 flags1; | |
550 | #define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ | |
551 | #define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 | |
552 | #define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ | |
553 | #define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 | |
554 | #define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ | |
555 | #define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 | |
556 | #define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ | |
557 | #define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 | |
558 | u8 flags2; | |
559 | #define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ | |
560 | #define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 | |
561 | #define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ | |
562 | #define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 | |
563 | #define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ | |
564 | #define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 | |
565 | #define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ | |
566 | #define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 | |
567 | #define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ | |
568 | #define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 | |
569 | #define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ | |
570 | #define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 | |
571 | #define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ | |
572 | #define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 | |
573 | #define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ | |
574 | #define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 | |
575 | u8 flags3; | |
576 | #define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ | |
577 | #define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 | |
578 | #define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ | |
579 | #define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 | |
580 | #define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ | |
581 | #define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 | |
582 | #define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ | |
583 | #define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 | |
584 | #define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ | |
585 | #define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 | |
586 | #define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ | |
587 | #define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 | |
588 | #define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ | |
589 | #define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 | |
590 | #define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ | |
591 | #define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 | |
592 | u8 byte2 /* byte2 */; | |
593 | u8 byte3 /* byte3 */; | |
594 | __le16 word0 /* conn_dpi */; | |
595 | __le16 word1 /* word1 */; | |
596 | __le32 rx_producers /* reg0 */; | |
597 | __le32 reg1 /* reg1 */; | |
598 | __le32 reg2 /* reg2 */; | |
599 | __le32 reg3 /* reg3 */; | |
600 | __le16 word2 /* word2 */; | |
601 | __le16 word3 /* word3 */; | |
602 | }; | |
603 | ||
604 | /* | |
605 | * The core storm context for the Mstorm | |
606 | */ | |
607 | struct mstorm_core_conn_st_ctx { | |
608 | __le32 reserved[24]; | |
609 | }; | |
610 | ||
611 | /* | |
612 | * The core storm context for the Ustorm | |
613 | */ | |
614 | struct ustorm_core_conn_st_ctx { | |
615 | __le32 reserved[4]; | |
616 | }; | |
617 | ||
618 | /* | |
619 | * core connection context | |
620 | */ | |
621 | struct core_conn_context { | |
622 | /* ystorm storm context */ | |
623 | struct ystorm_core_conn_st_ctx ystorm_st_context; | |
624 | struct regpair ystorm_st_padding[2] /* padding */; | |
625 | /* pstorm storm context */ | |
626 | struct pstorm_core_conn_st_ctx pstorm_st_context; | |
627 | struct regpair pstorm_st_padding[2] /* padding */; | |
628 | /* xstorm storm context */ | |
629 | struct xstorm_core_conn_st_ctx xstorm_st_context; | |
630 | /* xstorm aggregative context */ | |
631 | struct xstorm_core_conn_ag_ctx xstorm_ag_context; | |
632 | /* tstorm aggregative context */ | |
633 | struct tstorm_core_conn_ag_ctx tstorm_ag_context; | |
634 | /* ustorm aggregative context */ | |
635 | struct ustorm_core_conn_ag_ctx ustorm_ag_context; | |
636 | /* mstorm storm context */ | |
637 | struct mstorm_core_conn_st_ctx mstorm_st_context; | |
638 | /* ustorm storm context */ | |
639 | struct ustorm_core_conn_st_ctx ustorm_st_context; | |
640 | struct regpair ustorm_st_padding[2] /* padding */; | |
641 | }; | |
642 | ||
643 | ||
644 | /* | |
645 | * How ll2 should deal with packet upon errors | |
646 | */ | |
647 | enum core_error_handle { | |
648 | LL2_DROP_PACKET /* If error occurs drop packet */, | |
649 | LL2_DO_NOTHING /* If error occurs do nothing */, | |
650 | LL2_ASSERT /* If error occurs assert */, | |
651 | MAX_CORE_ERROR_HANDLE | |
652 | }; | |
653 | ||
654 | ||
655 | /* | |
656 | * opcodes for the event ring | |
657 | */ | |
658 | enum core_event_opcode { | |
659 | CORE_EVENT_TX_QUEUE_START, | |
660 | CORE_EVENT_TX_QUEUE_STOP, | |
661 | CORE_EVENT_RX_QUEUE_START, | |
662 | CORE_EVENT_RX_QUEUE_STOP, | |
663 | MAX_CORE_EVENT_OPCODE | |
664 | }; | |
665 | ||
666 | ||
667 | /* | |
668 | * The L4 pseudo checksum mode for Core | |
669 | */ | |
670 | enum core_l4_pseudo_checksum_mode { | |
671 | /* Pseudo Checksum on packet is calculated with the correct packet length. */ | |
672 | CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH, | |
673 | /* Pseudo Checksum on packet is calculated with zero length. */ | |
674 | CORE_L4_PSEUDO_CSUM_ZERO_LENGTH, | |
675 | MAX_CORE_L4_PSEUDO_CHECKSUM_MODE | |
676 | }; | |
677 | ||
678 | ||
679 | /* | |
680 | * Light-L2 RX Producers in Tstorm RAM | |
681 | */ | |
682 | struct core_ll2_port_stats { | |
683 | struct regpair gsi_invalid_hdr; | |
684 | struct regpair gsi_invalid_pkt_length; | |
685 | struct regpair gsi_unsupported_pkt_typ; | |
686 | struct regpair gsi_crcchksm_error; | |
687 | }; | |
688 | ||
689 | ||
690 | /* | |
691 | * Ethernet TX Per Queue Stats | |
692 | */ | |
693 | struct core_ll2_pstorm_per_queue_stat { | |
694 | /* number of total bytes sent without errors */ | |
695 | struct regpair sent_ucast_bytes; | |
696 | /* number of total bytes sent without errors */ | |
697 | struct regpair sent_mcast_bytes; | |
698 | /* number of total bytes sent without errors */ | |
699 | struct regpair sent_bcast_bytes; | |
700 | /* number of total packets sent without errors */ | |
701 | struct regpair sent_ucast_pkts; | |
702 | /* number of total packets sent without errors */ | |
703 | struct regpair sent_mcast_pkts; | |
704 | /* number of total packets sent without errors */ | |
705 | struct regpair sent_bcast_pkts; | |
706 | }; | |
707 | ||
708 | ||
709 | /* | |
710 | * Light-L2 RX Producers in Tstorm RAM | |
711 | */ | |
712 | struct core_ll2_rx_prod { | |
713 | __le16 bd_prod /* BD Producer */; | |
714 | __le16 cqe_prod /* CQE Producer */; | |
715 | __le32 reserved; | |
716 | }; | |
717 | ||
718 | ||
719 | struct core_ll2_tstorm_per_queue_stat { | |
720 | /* Number of packets discarded because they are bigger than MTU */ | |
721 | struct regpair packet_too_big_discard; | |
722 | /* Number of packets discarded due to lack of host buffers */ | |
723 | struct regpair no_buff_discard; | |
724 | }; | |
725 | ||
726 | ||
727 | struct core_ll2_ustorm_per_queue_stat { | |
728 | struct regpair rcv_ucast_bytes; | |
729 | struct regpair rcv_mcast_bytes; | |
730 | struct regpair rcv_bcast_bytes; | |
731 | struct regpair rcv_ucast_pkts; | |
732 | struct regpair rcv_mcast_pkts; | |
733 | struct regpair rcv_bcast_pkts; | |
734 | }; | |
735 | ||
736 | ||
737 | /* | |
738 | * Core Ramrod Command IDs (light L2) | |
739 | */ | |
740 | enum core_ramrod_cmd_id { | |
741 | CORE_RAMROD_UNUSED, | |
742 | CORE_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */, | |
743 | CORE_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */, | |
744 | CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */, | |
745 | CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */, | |
746 | MAX_CORE_RAMROD_CMD_ID | |
747 | }; | |
748 | ||
749 | ||
750 | /* | |
751 | * Core RX CQE Type for Light L2 | |
752 | */ | |
753 | enum core_roce_flavor_type { | |
754 | CORE_ROCE, | |
755 | CORE_RROCE, | |
756 | MAX_CORE_ROCE_FLAVOR_TYPE | |
757 | }; | |
758 | ||
759 | ||
760 | /* | |
761 | * Specifies how ll2 should deal with packets errors: packet_too_big and no_buff | |
762 | */ | |
763 | struct core_rx_action_on_error { | |
764 | u8 error_type; | |
765 | /* ll2 how to handle error packet_too_big (use enum core_error_handle) */ | |
766 | #define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3 | |
767 | #define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0 | |
768 | /* ll2 how to handle error with no_buff (use enum core_error_handle) */ | |
769 | #define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3 | |
770 | #define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2 | |
771 | #define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF | |
772 | #define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4 | |
773 | }; | |
774 | ||
775 | ||
776 | /* | |
777 | * Core RX BD for Light L2 | |
778 | */ | |
779 | struct core_rx_bd { | |
780 | struct regpair addr; | |
781 | __le16 reserved[4]; | |
782 | }; | |
783 | ||
784 | ||
785 | /* | |
786 | * Core RX CM offload BD for Light L2 | |
787 | */ | |
788 | struct core_rx_bd_with_buff_len { | |
789 | struct regpair addr; | |
790 | __le16 buff_length; | |
791 | __le16 reserved[3]; | |
792 | }; | |
793 | ||
794 | /* | |
795 | * Core RX CM offload BD for Light L2 | |
796 | */ | |
797 | union core_rx_bd_union { | |
798 | struct core_rx_bd rx_bd /* Core Rx Bd static buffer size */; | |
799 | /* Core Rx Bd with dynamic buffer length */ | |
800 | struct core_rx_bd_with_buff_len rx_bd_with_len; | |
801 | }; | |
802 | ||
803 | ||
804 | ||
805 | /* | |
806 | * Opaque Data for Light L2 RX CQE . | |
807 | */ | |
808 | struct core_rx_cqe_opaque_data { | |
809 | __le32 data[2] /* Opaque CQE Data */; | |
810 | }; | |
811 | ||
812 | ||
813 | /* | |
814 | * Core RX CQE Type for Light L2 | |
815 | */ | |
816 | enum core_rx_cqe_type { | |
817 | CORE_RX_CQE_ILLIGAL_TYPE /* Bad RX Cqe type */, | |
818 | CORE_RX_CQE_TYPE_REGULAR /* Regular Core RX CQE */, | |
819 | CORE_RX_CQE_TYPE_GSI_OFFLOAD /* Fp Gsi offload RX CQE */, | |
820 | CORE_RX_CQE_TYPE_SLOW_PATH /* Slow path Core RX CQE */, | |
821 | MAX_CORE_RX_CQE_TYPE | |
822 | }; | |
823 | ||
824 | ||
825 | /* | |
826 | * Core RX CQE for Light L2 . | |
827 | */ | |
828 | struct core_rx_fast_path_cqe { | |
829 | u8 type /* CQE type */; | |
830 | /* Offset (in bytes) of the packet from start of the buffer */ | |
831 | u8 placement_offset; | |
832 | /* Parsing and error flags from the parser */ | |
833 | struct parsing_and_err_flags parse_flags; | |
834 | __le16 packet_length /* Total packet length (from the parser) */; | |
835 | __le16 vlan /* 802.1q VLAN tag */; | |
836 | struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */; | |
837 | __le32 reserved[4]; | |
838 | }; | |
839 | ||
840 | /* | |
841 | * Core Rx CM offload CQE . | |
842 | */ | |
843 | struct core_rx_gsi_offload_cqe { | |
844 | u8 type /* CQE type */; | |
845 | u8 data_length_error /* set if gsi data is bigger than buff */; | |
846 | /* Parsing and error flags from the parser */ | |
847 | struct parsing_and_err_flags parse_flags; | |
848 | __le16 data_length /* Total packet length (from the parser) */; | |
849 | __le16 vlan /* 802.1q VLAN tag */; | |
850 | __le32 src_mac_addrhi /* hi 4 bytes source mac address */; | |
851 | __le16 src_mac_addrlo /* lo 2 bytes of source mac address */; | |
852 | u8 reserved1[2]; | |
853 | __le32 gid_dst[4] /* Gid destination address */; | |
854 | }; | |
855 | ||
856 | /* | |
857 | * Core RX CQE for Light L2 . | |
858 | */ | |
859 | struct core_rx_slow_path_cqe { | |
860 | u8 type /* CQE type */; | |
861 | u8 ramrod_cmd_id; | |
862 | __le16 echo; | |
863 | __le32 reserved1[7]; | |
864 | }; | |
865 | ||
866 | /* | |
867 | * Core RX CM offload BD for Light L2 | |
868 | */ | |
869 | union core_rx_cqe_union { | |
870 | struct core_rx_fast_path_cqe rx_cqe_fp /* Fast path CQE */; | |
871 | struct core_rx_gsi_offload_cqe rx_cqe_gsi /* GSI offload CQE */; | |
872 | struct core_rx_slow_path_cqe rx_cqe_sp /* Slow path CQE */; | |
873 | }; | |
874 | ||
875 | ||
876 | ||
877 | ||
878 | ||
879 | /* | |
880 | * Ramrod data for rx queue start ramrod | |
881 | */ | |
882 | struct core_rx_start_ramrod_data { | |
883 | struct regpair bd_base /* bd address of the first bd page */; | |
884 | struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */; | |
885 | __le16 mtu /* Maximum transmission unit */; | |
886 | __le16 sb_id /* Status block ID */; | |
887 | u8 sb_index /* index of the protocol index */; | |
888 | u8 complete_cqe_flg /* post completion to the CQE ring if set */; | |
889 | u8 complete_event_flg /* post completion to the event ring if set */; | |
890 | u8 drop_ttl0_flg /* drop packet with ttl0 if set */; | |
891 | __le16 num_of_pbl_pages /* Num of pages in CQE PBL */; | |
892 | /* if set, 802.1q tags will be removed and copied to CQE */ | |
893 | u8 inner_vlan_removal_en; | |
894 | u8 queue_id /* Light L2 RX Queue ID */; | |
895 | u8 main_func_queue /* Is this the main queue for the PF */; | |
896 | /* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if | |
897 | * main_func_queue is set. | |
898 | */ | |
899 | u8 mf_si_bcast_accept_all; | |
900 | /* Duplicate multicast packets to LL2 main queue in mf_si mode. Valid if | |
901 | * main_func_queue is set. | |
902 | */ | |
903 | u8 mf_si_mcast_accept_all; | |
904 | /* Specifies how ll2 should deal with packets errors: packet_too_big and | |
905 | * no_buff | |
906 | */ | |
907 | struct core_rx_action_on_error action_on_error; | |
908 | /* set when in GSI offload mode on ROCE connection */ | |
909 | u8 gsi_offload_flag; | |
910 | u8 reserved[7]; | |
911 | }; | |
912 | ||
913 | ||
914 | /* | |
915 | * Ramrod data for rx queue stop ramrod | |
916 | */ | |
917 | struct core_rx_stop_ramrod_data { | |
918 | u8 complete_cqe_flg /* post completion to the CQE ring if set */; | |
919 | u8 complete_event_flg /* post completion to the event ring if set */; | |
920 | u8 queue_id /* Light L2 RX Queue ID */; | |
921 | u8 reserved1; | |
922 | __le16 reserved2[2]; | |
923 | }; | |
924 | ||
925 | ||
926 | /* | |
927 | * Flags for Core TX BD | |
928 | */ | |
929 | struct core_tx_bd_flags { | |
930 | u8 as_bitfield; | |
931 | /* Do not allow additional VLAN manipulations on this packet (DCB) */ | |
932 | #define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 | |
933 | #define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0 | |
934 | /* Insert VLAN into packet */ | |
935 | #define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK 0x1 | |
936 | #define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT 1 | |
937 | /* This is the first BD of the packet (for debug) */ | |
938 | #define CORE_TX_BD_FLAGS_START_BD_MASK 0x1 | |
939 | #define CORE_TX_BD_FLAGS_START_BD_SHIFT 2 | |
940 | /* Calculate the IP checksum for the packet */ | |
941 | #define CORE_TX_BD_FLAGS_IP_CSUM_MASK 0x1 | |
942 | #define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT 3 | |
943 | /* Calculate the L4 checksum for the packet */ | |
944 | #define CORE_TX_BD_FLAGS_L4_CSUM_MASK 0x1 | |
945 | #define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT 4 | |
946 | /* Packet is IPv6 with extensions */ | |
947 | #define CORE_TX_BD_FLAGS_IPV6_EXT_MASK 0x1 | |
948 | #define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT 5 | |
949 | /* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol: | |
950 | * 0-TCP, 1-UDP | |
951 | */ | |
952 | #define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK 0x1 | |
953 | #define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6 | |
954 | /* The pseudo checksum mode to place in the L4 checksum field. Required only | |
955 | * when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode) | |
956 | */ | |
957 | #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1 | |
958 | #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7 | |
959 | }; | |
960 | ||
961 | /* | |
962 | * Core TX BD for Light L2 | |
963 | */ | |
964 | struct core_tx_bd { | |
965 | struct regpair addr /* Buffer Address */; | |
966 | __le16 nbytes /* Number of Bytes in Buffer */; | |
967 | /* Network packets: VLAN to insert to packet (if insertion flag set) LoopBack | |
968 | * packets: echo data to pass to Rx | |
969 | */ | |
970 | __le16 nw_vlan_or_lb_echo; | |
971 | u8 bitfield0; | |
972 | /* Number of BDs that make up one packet - width wide enough to present | |
973 | * X_CORE_LL2_NUM_OF_BDS_ON_ST_CT | |
974 | */ | |
975 | #define CORE_TX_BD_NBDS_MASK 0xF | |
976 | #define CORE_TX_BD_NBDS_SHIFT 0 | |
977 | /* Use roce_flavor enum - Diffrentiate between Roce flavors is valid when | |
978 | * connType is ROCE (use enum core_roce_flavor_type) | |
979 | */ | |
980 | #define CORE_TX_BD_ROCE_FLAV_MASK 0x1 | |
981 | #define CORE_TX_BD_ROCE_FLAV_SHIFT 4 | |
982 | #define CORE_TX_BD_RESERVED0_MASK 0x7 | |
983 | #define CORE_TX_BD_RESERVED0_SHIFT 5 | |
984 | struct core_tx_bd_flags bd_flags /* BD Flags */; | |
985 | __le16 bitfield1; | |
986 | #define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF | |
987 | #define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0 | |
988 | /* Packet destination - Network, LB (use enum core_tx_dest) */ | |
989 | #define CORE_TX_BD_TX_DST_MASK 0x1 | |
990 | #define CORE_TX_BD_TX_DST_SHIFT 14 | |
991 | #define CORE_TX_BD_RESERVED1_MASK 0x1 | |
992 | #define CORE_TX_BD_RESERVED1_SHIFT 15 | |
993 | }; | |
994 | ||
995 | ||
996 | ||
997 | /* | |
998 | * Light L2 TX Destination | |
999 | */ | |
1000 | enum core_tx_dest { | |
1001 | CORE_TX_DEST_NW /* Light L2 TX Destination to the Network */, | |
1002 | CORE_TX_DEST_LB /* Light L2 TX Destination to the Loopback */, | |
1003 | MAX_CORE_TX_DEST | |
1004 | }; | |
1005 | ||
1006 | ||
1007 | /* | |
1008 | * Ramrod data for tx queue start ramrod | |
1009 | */ | |
1010 | struct core_tx_start_ramrod_data { | |
1011 | struct regpair pbl_base_addr /* Address of the pbl page */; | |
1012 | __le16 mtu /* Maximum transmission unit */; | |
1013 | __le16 sb_id /* Status block ID */; | |
1014 | u8 sb_index /* Status block protocol index */; | |
1015 | u8 stats_en /* Statistics Enable */; | |
1016 | u8 stats_id /* Statistics Counter ID */; | |
1017 | u8 conn_type /* connection type that loaded ll2 */; | |
1018 | __le16 pbl_size /* Number of BD pages pointed by PBL */; | |
1019 | __le16 qm_pq_id /* QM PQ ID */; | |
1020 | /* set when in GSI offload mode on ROCE connection */ | |
1021 | u8 gsi_offload_flag; | |
1022 | u8 resrved[3]; | |
1023 | }; | |
1024 | ||
1025 | ||
1026 | /* | |
1027 | * Ramrod data for tx queue stop ramrod | |
1028 | */ | |
1029 | struct core_tx_stop_ramrod_data { | |
1030 | __le32 reserved0[2]; | |
1031 | }; | |
1032 | ||
1033 | ||
1034 | /* | |
1035 | * Enum flag for what type of dcb data to update | |
1036 | */ | |
1037 | enum dcb_dhcp_update_flag { | |
1038 | /* use when no change should be done to dcb data */ | |
1039 | DONT_UPDATE_DCB_DHCP, | |
1040 | UPDATE_DCB /* use to update only l2 (vlan) priority */, | |
1041 | UPDATE_DSCP /* use to update only l3 dhcp */, | |
1042 | UPDATE_DCB_DSCP /* update vlan pri and dhcp */, | |
1043 | MAX_DCB_DHCP_UPDATE_FLAG | |
1044 | }; | |
1045 | ||
1046 | ||
1047 | struct eth_mstorm_per_pf_stat { | |
1048 | struct regpair gre_discard_pkts /* Dropped GRE RX packets */; | |
1049 | struct regpair vxlan_discard_pkts /* Dropped VXLAN RX packets */; | |
1050 | struct regpair geneve_discard_pkts /* Dropped GENEVE RX packets */; | |
1051 | struct regpair lb_discard_pkts /* Dropped Tx switched packets */; | |
1052 | }; | |
1053 | ||
1054 | ||
1055 | struct eth_mstorm_per_queue_stat { | |
1056 | /* Number of packets discarded because TTL=0 (in IPv4) or hopLimit=0 (IPv6) */ | |
1057 | struct regpair ttl0_discard; | |
1058 | /* Number of packets discarded because they are bigger than MTU */ | |
1059 | struct regpair packet_too_big_discard; | |
1060 | /* Number of packets discarded due to lack of host buffers (BDs/SGEs/CQEs) */ | |
1061 | struct regpair no_buff_discard; | |
1062 | /* Number of packets discarded because of no active Rx connection */ | |
1063 | struct regpair not_active_discard; | |
1064 | /* number of coalesced packets in all TPA aggregations */ | |
1065 | struct regpair tpa_coalesced_pkts; | |
1066 | /* total number of TPA aggregations */ | |
1067 | struct regpair tpa_coalesced_events; | |
1068 | /* number of aggregations, which abnormally ended */ | |
1069 | struct regpair tpa_aborts_num; | |
1070 | /* total TCP payload length in all TPA aggregations */ | |
1071 | struct regpair tpa_coalesced_bytes; | |
1072 | }; | |
1073 | ||
1074 | ||
1075 | /* | |
1076 | * Ethernet TX Per PF | |
1077 | */ | |
1078 | struct eth_pstorm_per_pf_stat { | |
1079 | /* number of total ucast bytes sent on loopback port without errors */ | |
1080 | struct regpair sent_lb_ucast_bytes; | |
1081 | /* number of total mcast bytes sent on loopback port without errors */ | |
1082 | struct regpair sent_lb_mcast_bytes; | |
1083 | /* number of total bcast bytes sent on loopback port without errors */ | |
1084 | struct regpair sent_lb_bcast_bytes; | |
1085 | /* number of total ucast packets sent on loopback port without errors */ | |
1086 | struct regpair sent_lb_ucast_pkts; | |
1087 | /* number of total mcast packets sent on loopback port without errors */ | |
1088 | struct regpair sent_lb_mcast_pkts; | |
1089 | /* number of total bcast packets sent on loopback port without errors */ | |
1090 | struct regpair sent_lb_bcast_pkts; | |
1091 | struct regpair sent_gre_bytes /* Sent GRE bytes */; | |
1092 | struct regpair sent_vxlan_bytes /* Sent VXLAN bytes */; | |
1093 | struct regpair sent_geneve_bytes /* Sent GENEVE bytes */; | |
1094 | struct regpair sent_gre_pkts /* Sent GRE packets */; | |
1095 | struct regpair sent_vxlan_pkts /* Sent VXLAN packets */; | |
1096 | struct regpair sent_geneve_pkts /* Sent GENEVE packets */; | |
1097 | struct regpair gre_drop_pkts /* Dropped GRE TX packets */; | |
1098 | struct regpair vxlan_drop_pkts /* Dropped VXLAN TX packets */; | |
1099 | struct regpair geneve_drop_pkts /* Dropped GENEVE TX packets */; | |
1100 | }; | |
1101 | ||
1102 | ||
1103 | /* | |
1104 | * Ethernet TX Per Queue Stats | |
1105 | */ | |
1106 | struct eth_pstorm_per_queue_stat { | |
1107 | /* number of total bytes sent without errors */ | |
1108 | struct regpair sent_ucast_bytes; | |
1109 | /* number of total bytes sent without errors */ | |
1110 | struct regpair sent_mcast_bytes; | |
1111 | /* number of total bytes sent without errors */ | |
1112 | struct regpair sent_bcast_bytes; | |
1113 | /* number of total packets sent without errors */ | |
1114 | struct regpair sent_ucast_pkts; | |
1115 | /* number of total packets sent without errors */ | |
1116 | struct regpair sent_mcast_pkts; | |
1117 | /* number of total packets sent without errors */ | |
1118 | struct regpair sent_bcast_pkts; | |
1119 | /* number of total packets dropped due to errors */ | |
1120 | struct regpair error_drop_pkts; | |
1121 | }; | |
1122 | ||
1123 | ||
1124 | /* | |
1125 | * ETH Rx producers data | |
1126 | */ | |
1127 | struct eth_rx_rate_limit { | |
1128 | /* Rate Limit Multiplier - (Storm Clock (MHz) * 8 / Desired Bandwidth (MB/s)) */ | |
1129 | __le16 mult; | |
1130 | /* Constant term to add (or subtract from number of cycles) */ | |
1131 | __le16 cnst; | |
1132 | u8 add_sub_cnst /* Add (1) or subtract (0) constant term */; | |
1133 | u8 reserved0; | |
1134 | __le16 reserved1; | |
1135 | }; | |
1136 | ||
1137 | ||
1138 | struct eth_ustorm_per_pf_stat { | |
1139 | /* number of total ucast bytes received on loopback port without errors */ | |
1140 | struct regpair rcv_lb_ucast_bytes; | |
1141 | /* number of total mcast bytes received on loopback port without errors */ | |
1142 | struct regpair rcv_lb_mcast_bytes; | |
1143 | /* number of total bcast bytes received on loopback port without errors */ | |
1144 | struct regpair rcv_lb_bcast_bytes; | |
1145 | /* number of total ucast packets received on loopback port without errors */ | |
1146 | struct regpair rcv_lb_ucast_pkts; | |
1147 | /* number of total mcast packets received on loopback port without errors */ | |
1148 | struct regpair rcv_lb_mcast_pkts; | |
1149 | /* number of total bcast packets received on loopback port without errors */ | |
1150 | struct regpair rcv_lb_bcast_pkts; | |
1151 | struct regpair rcv_gre_bytes /* Received GRE bytes */; | |
1152 | struct regpair rcv_vxlan_bytes /* Received VXLAN bytes */; | |
1153 | struct regpair rcv_geneve_bytes /* Received GENEVE bytes */; | |
1154 | struct regpair rcv_gre_pkts /* Received GRE packets */; | |
1155 | struct regpair rcv_vxlan_pkts /* Received VXLAN packets */; | |
1156 | struct regpair rcv_geneve_pkts /* Received GENEVE packets */; | |
1157 | }; | |
1158 | ||
1159 | ||
1160 | struct eth_ustorm_per_queue_stat { | |
1161 | struct regpair rcv_ucast_bytes; | |
1162 | struct regpair rcv_mcast_bytes; | |
1163 | struct regpair rcv_bcast_bytes; | |
1164 | struct regpair rcv_ucast_pkts; | |
1165 | struct regpair rcv_mcast_pkts; | |
1166 | struct regpair rcv_bcast_pkts; | |
1167 | }; | |
1168 | ||
1169 | ||
1170 | /* | |
1171 | * Event Ring Next Page Address | |
1172 | */ | |
1173 | struct event_ring_next_addr { | |
1174 | struct regpair addr /* Next Page Address */; | |
1175 | __le32 reserved[2] /* Reserved */; | |
1176 | }; | |
1177 | ||
1178 | /* | |
1179 | * Event Ring Element | |
1180 | */ | |
1181 | union event_ring_element { | |
1182 | struct event_ring_entry entry /* Event Ring Entry */; | |
1183 | /* Event Ring Next Page Address */ | |
1184 | struct event_ring_next_addr next_addr; | |
1185 | }; | |
1186 | ||
1187 | ||
1188 | ||
1189 | /* | |
1190 | * Ports mode | |
1191 | */ | |
1192 | enum fw_flow_ctrl_mode { | |
1193 | flow_ctrl_pause, | |
1194 | flow_ctrl_pfc, | |
1195 | MAX_FW_FLOW_CTRL_MODE | |
1196 | }; | |
1197 | ||
1198 | ||
1199 | /* | |
1200 | * Major and Minor hsi Versions | |
1201 | */ | |
1202 | struct hsi_fp_ver_struct { | |
1203 | u8 minor_ver_arr[2] /* Minor Version of hsi loading pf */; | |
1204 | u8 major_ver_arr[2] /* Major Version of driver loading pf */; | |
1205 | }; | |
1206 | ||
1207 | ||
1208 | /* | |
1209 | * Integration Phase | |
1210 | */ | |
1211 | enum integ_phase { | |
1212 | INTEG_PHASE_BB_A0_LATEST = 3 /* BB A0 latest integration phase */, | |
1213 | INTEG_PHASE_BB_B0_NO_MCP = 10 /* BB B0 without MCP */, | |
1214 | INTEG_PHASE_BB_B0_WITH_MCP = 11 /* BB B0 with MCP */, | |
1215 | MAX_INTEG_PHASE | |
1216 | }; | |
1217 | ||
1218 | ||
1219 | /* | |
1220 | * Ports mode | |
1221 | */ | |
1222 | enum iwarp_ll2_tx_queues { | |
1223 | /* LL2 queue for OOO packets sent in-order by the driver */ | |
1224 | IWARP_LL2_IN_ORDER_TX_QUEUE = 1, | |
1225 | /* LL2 queue for unaligned packets sent aligned by the driver */ | |
1226 | IWARP_LL2_ALIGNED_TX_QUEUE, | |
1227 | IWARP_LL2_ERROR /* Error indication */, | |
1228 | MAX_IWARP_LL2_TX_QUEUES | |
1229 | }; | |
1230 | ||
1231 | ||
1232 | /* | |
1233 | * Malicious VF error ID | |
1234 | */ | |
1235 | enum malicious_vf_error_id { | |
1236 | MALICIOUS_VF_NO_ERROR /* Zero placeholder value */, | |
1237 | /* Writing to VF/PF channel when it is not ready */ | |
1238 | VF_PF_CHANNEL_NOT_READY, | |
1239 | VF_ZONE_MSG_NOT_VALID /* VF channel message is not valid */, | |
1240 | VF_ZONE_FUNC_NOT_ENABLED /* Parent PF of VF channel is not active */, | |
1241 | /* TX packet is shorter then reported on BDs or from minimal size */ | |
1242 | ETH_PACKET_TOO_SMALL, | |
1243 | /* Tx packet with marked as insert VLAN when its illegal */ | |
1244 | ETH_ILLEGAL_VLAN_MODE, | |
1245 | ETH_MTU_VIOLATION /* TX packet is greater then MTU */, | |
1246 | /* TX packet has illegal inband tags marked */ | |
1247 | ETH_ILLEGAL_INBAND_TAGS, | |
1248 | /* Vlan cant be added to inband tag */ | |
1249 | ETH_VLAN_INSERT_AND_INBAND_VLAN, | |
1250 | /* indicated number of BDs for the packet is illegal */ | |
1251 | ETH_ILLEGAL_NBDS, | |
1252 | ETH_FIRST_BD_WO_SOP /* 1st BD must have start_bd flag set */, | |
1253 | /* There are not enough BDs for transmission of even one packet */ | |
1254 | ETH_INSUFFICIENT_BDS, | |
1255 | ETH_ILLEGAL_LSO_HDR_NBDS /* Header NBDs value is illegal */, | |
1256 | ETH_ILLEGAL_LSO_MSS /* LSO MSS value is more than allowed */, | |
1257 | /* empty BD (which not contains control flags) is illegal */ | |
1258 | ETH_ZERO_SIZE_BD, | |
1259 | ETH_ILLEGAL_LSO_HDR_LEN /* LSO header size is above the limit */, | |
1260 | /* In LSO its expected that on the local BD ring there will be at least MSS | |
1261 | * bytes of data | |
1262 | */ | |
1263 | ETH_INSUFFICIENT_PAYLOAD, | |
1264 | ETH_EDPM_OUT_OF_SYNC /* Valid BDs on local ring after EDPM L2 sync */, | |
1265 | /* Tunneled packet with IPv6+Ext without a proper number of BDs */ | |
1266 | ETH_TUNN_IPV6_EXT_NBD_ERR, | |
1267 | ETH_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */, | |
1268 | MAX_MALICIOUS_VF_ERROR_ID | |
1269 | }; | |
1270 | ||
1271 | ||
1272 | ||
1273 | /* | |
1274 | * Mstorm non-triggering VF zone | |
1275 | */ | |
1276 | struct mstorm_non_trigger_vf_zone { | |
1277 | /* VF statistic bucket */ | |
1278 | struct eth_mstorm_per_queue_stat eth_queue_stat; | |
1279 | /* VF RX queues producers */ | |
1280 | struct eth_rx_prod_data | |
1281 | eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD]; | |
1282 | }; | |
1283 | ||
1284 | ||
1285 | /* | |
1286 | * Mstorm VF zone | |
1287 | */ | |
1288 | struct mstorm_vf_zone { | |
1289 | /* non-interrupt-triggering zone */ | |
1290 | struct mstorm_non_trigger_vf_zone non_trigger; | |
1291 | }; | |
1292 | ||
1293 | ||
1294 | /* | |
1295 | * personality per PF | |
1296 | */ | |
1297 | enum personality_type { | |
1298 | BAD_PERSONALITY_TYP, | |
1299 | PERSONALITY_ISCSI /* iSCSI and LL2 */, | |
1300 | PERSONALITY_FCOE /* Fcoe and LL2 */, | |
1301 | PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp, Eth and LL2 */, | |
1302 | PERSONALITY_RDMA /* Roce and LL2 */, | |
1303 | PERSONALITY_CORE /* CORE(LL2) */, | |
1304 | PERSONALITY_ETH /* Ethernet */, | |
1305 | PERSONALITY_TOE /* Toe and LL2 */, | |
1306 | MAX_PERSONALITY_TYPE | |
1307 | }; | |
1308 | ||
1309 | ||
1310 | /* | |
1311 | * tunnel configuration | |
1312 | */ | |
1313 | struct pf_start_tunnel_config { | |
1314 | /* Set VXLAN tunnel UDP destination port. */ | |
1315 | u8 set_vxlan_udp_port_flg; | |
1316 | /* Set GENEVE tunnel UDP destination port. */ | |
1317 | u8 set_geneve_udp_port_flg; | |
1318 | u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */; | |
1319 | /* If set, enable l2 GENEVE tunnel in TX path. */ | |
1320 | u8 tx_enable_l2geneve; | |
1321 | /* If set, enable IP GENEVE tunnel in TX path. */ | |
1322 | u8 tx_enable_ipgeneve; | |
1323 | u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */; | |
1324 | u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */; | |
1325 | u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */; | |
1326 | /* Classification scheme for l2 GENEVE tunnel. */ | |
1327 | u8 tunnel_clss_l2geneve; | |
1328 | /* Classification scheme for ip GENEVE tunnel. */ | |
1329 | u8 tunnel_clss_ipgeneve; | |
1330 | u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */; | |
1331 | u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */; | |
1332 | __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */; | |
1333 | __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */; | |
1334 | }; | |
1335 | ||
1336 | /* | |
1337 | * Ramrod data for PF start ramrod | |
1338 | */ | |
1339 | struct pf_start_ramrod_data { | |
1340 | struct regpair event_ring_pbl_addr /* Address of event ring PBL */; | |
1341 | /* PBL address of consolidation queue */ | |
1342 | struct regpair consolid_q_pbl_addr; | |
1343 | /* tunnel configuration. */ | |
1344 | struct pf_start_tunnel_config tunnel_config; | |
1345 | __le16 event_ring_sb_id /* Status block ID */; | |
1346 | /* All VfIds owned by Pf will be from baseVfId till baseVfId+numVfs */ | |
1347 | u8 base_vf_id; | |
1348 | u8 num_vfs /* Amount of vfs owned by PF */; | |
1349 | u8 event_ring_num_pages /* Number of PBL pages in event ring */; | |
1350 | u8 event_ring_sb_index /* Status block index */; | |
1351 | u8 path_id /* HW path ID (engine ID) */; | |
1352 | u8 warning_as_error /* In FW asserts, treat warning as error */; | |
1353 | /* If not set - throw a warning for each ramrod (for debug) */ | |
1354 | u8 dont_log_ramrods; | |
1355 | u8 personality /* define what type of personality is new PF */; | |
1356 | /* Log type mask. Each bit set enables a corresponding event type logging. | |
1357 | * Event types are defined as ASSERT_LOG_TYPE_xxx | |
1358 | */ | |
1359 | __le16 log_type_mask; | |
1360 | u8 mf_mode /* Multi function mode */; | |
1361 | u8 integ_phase /* Integration phase */; | |
1362 | /* If set, inter-pf tx switching is allowed in Switch Independent func mode */ | |
1363 | u8 allow_npar_tx_switching; | |
1364 | /* Map from inner to outer priority. Set pri_map_valid when init map */ | |
1365 | u8 inner_to_outer_pri_map[8]; | |
1366 | /* If inner_to_outer_pri_map is initialize then set pri_map_valid */ | |
1367 | u8 pri_map_valid; | |
1368 | /* In case mf_mode is MF_OVLAN, this field specifies the outer vlan | |
1369 | * (lower 16 bits) and ethType to use (higher 16 bits) | |
1370 | */ | |
1371 | __le32 outer_tag; | |
1372 | /* FP HSI version to be used by FW */ | |
1373 | struct hsi_fp_ver_struct hsi_fp_ver; | |
1374 | }; | |
1375 | ||
1376 | ||
1377 | ||
1378 | /* | |
1379 | * Data for port update ramrod | |
1380 | */ | |
1381 | struct protocol_dcb_data { | |
1382 | u8 dcb_enable_flag /* dcbEnable flag value */; | |
1383 | u8 dscp_enable_flag /* If set use dscp value */; | |
1384 | u8 dcb_priority /* dcbPri flag value */; | |
1385 | u8 dcb_tc /* dcb TC value */; | |
1386 | u8 dscp_val /* dscp value to write if dscp_enable_flag is set */; | |
1387 | u8 reserved0; | |
1388 | }; | |
1389 | ||
1390 | /* | |
1391 | * Update tunnel configuration | |
1392 | */ | |
1393 | struct pf_update_tunnel_config { | |
1394 | /* Update RX per PF tunnel classification scheme. */ | |
1395 | u8 update_rx_pf_clss; | |
1396 | /* Update per PORT default tunnel RX classification scheme for traffic with | |
1397 | * unknown unicast outer MAC in NPAR mode. | |
1398 | */ | |
1399 | u8 update_rx_def_ucast_clss; | |
1400 | /* Update per PORT default tunnel RX classification scheme for traffic with non | |
1401 | * unicast outer MAC in NPAR mode. | |
1402 | */ | |
1403 | u8 update_rx_def_non_ucast_clss; | |
1404 | /* Update TX per PF tunnel classification scheme. used by pf update. */ | |
1405 | u8 update_tx_pf_clss; | |
1406 | /* Update VXLAN tunnel UDP destination port. */ | |
1407 | u8 set_vxlan_udp_port_flg; | |
1408 | /* Update GENEVE tunnel UDP destination port. */ | |
1409 | u8 set_geneve_udp_port_flg; | |
1410 | u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */; | |
1411 | /* If set, enable l2 GENEVE tunnel in TX path. */ | |
1412 | u8 tx_enable_l2geneve; | |
1413 | /* If set, enable IP GENEVE tunnel in TX path. */ | |
1414 | u8 tx_enable_ipgeneve; | |
1415 | u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */; | |
1416 | u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */; | |
1417 | u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */; | |
1418 | /* Classification scheme for l2 GENEVE tunnel. */ | |
1419 | u8 tunnel_clss_l2geneve; | |
1420 | /* Classification scheme for ip GENEVE tunnel. */ | |
1421 | u8 tunnel_clss_ipgeneve; | |
1422 | u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */; | |
1423 | u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */; | |
1424 | __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */; | |
1425 | __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */; | |
1426 | __le16 reserved[2]; | |
1427 | }; | |
1428 | ||
1429 | /* | |
1430 | * Data for port update ramrod | |
1431 | */ | |
1432 | struct pf_update_ramrod_data { | |
1433 | u8 pf_id; | |
1434 | u8 update_eth_dcb_data_flag /* Update Eth DCB data indication */; | |
1435 | u8 update_fcoe_dcb_data_flag /* Update FCOE DCB data indication */; | |
1436 | u8 update_iscsi_dcb_data_flag /* Update iSCSI DCB data indication */; | |
1437 | u8 update_roce_dcb_data_flag /* Update ROCE DCB data indication */; | |
1438 | /* Update RROCE (RoceV2) DCB data indication */ | |
1439 | u8 update_rroce_dcb_data_flag; | |
1440 | u8 update_iwarp_dcb_data_flag /* Update IWARP DCB data indication */; | |
1441 | u8 update_mf_vlan_flag /* Update MF outer vlan Id */; | |
1442 | struct protocol_dcb_data eth_dcb_data /* core eth related fields */; | |
1443 | struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */; | |
1444 | /* core iscsi related fields */ | |
1445 | struct protocol_dcb_data iscsi_dcb_data; | |
1446 | struct protocol_dcb_data roce_dcb_data /* core roce related fields */; | |
1447 | /* core roce related fields */ | |
1448 | struct protocol_dcb_data rroce_dcb_data; | |
1449 | /* core iwarp related fields */ | |
1450 | struct protocol_dcb_data iwarp_dcb_data; | |
1451 | __le16 mf_vlan /* new outer vlan id value */; | |
1452 | __le16 reserved; | |
1453 | /* tunnel configuration. */ | |
1454 | struct pf_update_tunnel_config tunnel_config; | |
1455 | }; | |
1456 | ||
1457 | ||
1458 | ||
1459 | /* | |
1460 | * Ports mode | |
1461 | */ | |
1462 | enum ports_mode { | |
1463 | ENGX2_PORTX1 /* 2 engines x 1 port */, | |
1464 | ENGX2_PORTX2 /* 2 engines x 2 ports */, | |
1465 | ENGX1_PORTX1 /* 1 engine x 1 port */, | |
1466 | ENGX1_PORTX2 /* 1 engine x 2 ports */, | |
1467 | ENGX1_PORTX4 /* 1 engine x 4 ports */, | |
1468 | MAX_PORTS_MODE | |
1469 | }; | |
1470 | ||
1471 | ||
1472 | ||
1473 | /* | |
1474 | * use to index in hsi_fp_[major|minor]_ver_arr per protocol | |
1475 | */ | |
1476 | enum protocol_version_array_key { | |
1477 | ETH_VER_KEY = 0, | |
1478 | ROCE_VER_KEY, | |
1479 | MAX_PROTOCOL_VERSION_ARRAY_KEY | |
1480 | }; | |
1481 | ||
1482 | ||
1483 | ||
1484 | /* | |
1485 | * RDMA TX Stats | |
1486 | */ | |
1487 | struct rdma_sent_stats { | |
1488 | struct regpair sent_bytes /* number of total RDMA bytes sent */; | |
1489 | struct regpair sent_pkts /* number of total RDMA packets sent */; | |
1490 | }; | |
1491 | ||
1492 | /* | |
1493 | * Pstorm non-triggering VF zone | |
1494 | */ | |
1495 | struct pstorm_non_trigger_vf_zone { | |
1496 | /* VF statistic bucket */ | |
1497 | struct eth_pstorm_per_queue_stat eth_queue_stat; | |
1498 | struct rdma_sent_stats rdma_stats /* RoCE sent statistics */; | |
1499 | }; | |
1500 | ||
1501 | ||
1502 | /* | |
1503 | * Pstorm VF zone | |
1504 | */ | |
1505 | struct pstorm_vf_zone { | |
1506 | /* non-interrupt-triggering zone */ | |
1507 | struct pstorm_non_trigger_vf_zone non_trigger; | |
1508 | struct regpair reserved[7] /* vf_zone size mus be power of 2 */; | |
1509 | }; | |
1510 | ||
1511 | ||
1512 | /* | |
1513 | * Ramrod Header of SPQE | |
1514 | */ | |
1515 | struct ramrod_header { | |
1516 | __le32 cid /* Slowpath Connection CID */; | |
1517 | u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */; | |
1518 | u8 protocol_id /* Ramrod Protocol ID */; | |
1519 | __le16 echo /* Ramrod echo */; | |
1520 | }; | |
1521 | ||
1522 | ||
1523 | /* | |
1524 | * RDMA RX Stats | |
1525 | */ | |
1526 | struct rdma_rcv_stats { | |
1527 | struct regpair rcv_bytes /* number of total RDMA bytes received */; | |
1528 | struct regpair rcv_pkts /* number of total RDMA packets received */; | |
1529 | }; | |
1530 | ||
1531 | ||
1532 | ||
1533 | /* | |
1534 | * Data for update QCN/DCQCN RL ramrod | |
1535 | */ | |
1536 | struct rl_update_ramrod_data { | |
1537 | u8 qcn_update_param_flg /* Update QCN global params: timeout. */; | |
1538 | /* Update DCQCN global params: timeout, g, k. */ | |
1539 | u8 dcqcn_update_param_flg; | |
1540 | u8 rl_init_flg /* Init RL parameters, when RL disabled. */; | |
1541 | u8 rl_start_flg /* Start RL in IDLE state. Set rate to maximum. */; | |
1542 | u8 rl_stop_flg /* Stop RL. */; | |
1543 | u8 rl_id_first /* ID of first or single RL, that will be updated. */; | |
1544 | /* ID of last RL, that will be updated. If clear, single RL will updated. */ | |
1545 | u8 rl_id_last; | |
1546 | u8 rl_dc_qcn_flg /* If set, RL will used for DCQCN. */; | |
1547 | __le32 rl_bc_rate /* Byte Counter Limit. */; | |
1548 | __le16 rl_max_rate /* Maximum rate in 1.6 Mbps resolution. */; | |
1549 | __le16 rl_r_ai /* Active increase rate. */; | |
1550 | __le16 rl_r_hai /* Hyper active increase rate. */; | |
1551 | __le16 dcqcn_g /* DCQCN Alpha update gain in 1/64K resolution . */; | |
1552 | __le32 dcqcn_k_us /* DCQCN Alpha update interval. */; | |
1553 | __le32 dcqcn_timeuot_us /* DCQCN timeout. */; | |
1554 | __le32 qcn_timeuot_us /* QCN timeout. */; | |
1555 | __le32 reserved[2]; | |
1556 | }; | |
1557 | ||
1558 | ||
1559 | /* | |
1560 | * Slowpath Element (SPQE) | |
1561 | */ | |
1562 | struct slow_path_element { | |
1563 | struct ramrod_header hdr /* Ramrod Header */; | |
1564 | struct regpair data_ptr /* Pointer to the Ramrod Data on the Host */; | |
1565 | }; | |
1566 | ||
1567 | ||
1568 | /* | |
1569 | * Tstorm non-triggering VF zone | |
1570 | */ | |
1571 | struct tstorm_non_trigger_vf_zone { | |
1572 | struct rdma_rcv_stats rdma_stats /* RoCE received statistics */; | |
1573 | }; | |
1574 | ||
1575 | ||
1576 | struct tstorm_per_port_stat { | |
1577 | /* packet is dropped because it was truncated in NIG */ | |
1578 | struct regpair trunc_error_discard; | |
1579 | /* packet is dropped because of Ethernet FCS error */ | |
1580 | struct regpair mac_error_discard; | |
1581 | /* packet is dropped because classification was unsuccessful */ | |
1582 | struct regpair mftag_filter_discard; | |
1583 | /* packet was passed to Ethernet and dropped because of no mac filter match */ | |
1584 | struct regpair eth_mac_filter_discard; | |
1585 | /* packet passed to Light L2 and dropped because Light L2 is not configured for | |
1586 | * this PF | |
1587 | */ | |
1588 | struct regpair ll2_mac_filter_discard; | |
1589 | /* packet passed to Light L2 and dropped because Light L2 is not configured for | |
1590 | * this PF | |
1591 | */ | |
1592 | struct regpair ll2_conn_disabled_discard; | |
1593 | /* packet is an ISCSI irregular packet */ | |
1594 | struct regpair iscsi_irregular_pkt; | |
1595 | /* packet is an FCOE irregular packet */ | |
1596 | struct regpair fcoe_irregular_pkt; | |
1597 | /* packet is an ROCE irregular packet */ | |
1598 | struct regpair roce_irregular_pkt; | |
1599 | /* packet is an ETH irregular packet */ | |
1600 | struct regpair eth_irregular_pkt; | |
1601 | /* packet is an TOE irregular packet */ | |
1602 | struct regpair toe_irregular_pkt; | |
1603 | /* packet is an PREROCE irregular packet */ | |
1604 | struct regpair preroce_irregular_pkt; | |
1605 | struct regpair eth_gre_tunn_filter_discard /* GRE dropped packets */; | |
1606 | /* VXLAN dropped packets */ | |
1607 | struct regpair eth_vxlan_tunn_filter_discard; | |
1608 | /* GENEVE dropped packets */ | |
1609 | struct regpair eth_geneve_tunn_filter_discard; | |
1610 | }; | |
1611 | ||
1612 | ||
1613 | /* | |
1614 | * Tstorm VF zone | |
1615 | */ | |
1616 | struct tstorm_vf_zone { | |
1617 | /* non-interrupt-triggering zone */ | |
1618 | struct tstorm_non_trigger_vf_zone non_trigger; | |
1619 | }; | |
1620 | ||
1621 | ||
1622 | /* | |
1623 | * Tunnel classification scheme | |
1624 | */ | |
1625 | enum tunnel_clss { | |
1626 | /* Use MAC and VLAN from first L2 header for vport classification. */ | |
1627 | TUNNEL_CLSS_MAC_VLAN = 0, | |
1628 | /* Use MAC from first L2 header and VNI from tunnel header for vport | |
1629 | * classification | |
1630 | */ | |
1631 | TUNNEL_CLSS_MAC_VNI, | |
1632 | /* Use MAC and VLAN from last L2 header for vport classification */ | |
1633 | TUNNEL_CLSS_INNER_MAC_VLAN, | |
1634 | /* Use MAC from last L2 header and VNI from tunnel header for vport | |
1635 | * classification | |
1636 | */ | |
1637 | TUNNEL_CLSS_INNER_MAC_VNI, | |
1638 | /* Use MAC and VLAN from last L2 header for vport classification. If no exact | |
1639 | * match, use MAC and VLAN from first L2 header for classification. | |
1640 | */ | |
1641 | TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE, | |
1642 | MAX_TUNNEL_CLSS | |
1643 | }; | |
1644 | ||
1645 | ||
1646 | ||
1647 | /* | |
1648 | * Ustorm non-triggering VF zone | |
1649 | */ | |
1650 | struct ustorm_non_trigger_vf_zone { | |
1651 | /* VF statistic bucket */ | |
1652 | struct eth_ustorm_per_queue_stat eth_queue_stat; | |
1653 | struct regpair vf_pf_msg_addr /* VF-PF message address */; | |
1654 | }; | |
1655 | ||
1656 | ||
1657 | /* | |
1658 | * Ustorm triggering VF zone | |
1659 | */ | |
1660 | struct ustorm_trigger_vf_zone { | |
1661 | u8 vf_pf_msg_valid /* VF-PF message valid flag */; | |
1662 | u8 reserved[7]; | |
1663 | }; | |
1664 | ||
1665 | ||
1666 | /* | |
1667 | * Ustorm VF zone | |
1668 | */ | |
1669 | struct ustorm_vf_zone { | |
1670 | /* non-interrupt-triggering zone */ | |
1671 | struct ustorm_non_trigger_vf_zone non_trigger; | |
1672 | struct ustorm_trigger_vf_zone trigger /* interrupt triggering zone */; | |
1673 | }; | |
1674 | ||
1675 | ||
1676 | /* | |
1677 | * VF-PF channel data | |
1678 | */ | |
1679 | struct vf_pf_channel_data { | |
1680 | /* 0: VF-PF Channel NOT ready. Waiting for ack from PF driver. 1: VF-PF Channel | |
1681 | * is ready for a new transaction. | |
1682 | */ | |
1683 | __le32 ready; | |
1684 | /* 0: VF-PF Channel is invalid because of malicious VF. 1: VF-PF Channel is | |
1685 | * valid. | |
1686 | */ | |
1687 | u8 valid; | |
1688 | u8 reserved0; | |
1689 | __le16 reserved1; | |
1690 | }; | |
1691 | ||
1692 | ||
1693 | /* | |
1694 | * Ramrod data for VF start ramrod | |
1695 | */ | |
1696 | struct vf_start_ramrod_data { | |
1697 | u8 vf_id /* VF ID */; | |
1698 | /* If set, initial cleanup ack will be sent to parent PF SP event queue */ | |
1699 | u8 enable_flr_ack; | |
1700 | __le16 opaque_fid /* VF opaque FID */; | |
1701 | u8 personality /* define what type of personality is new VF */; | |
1702 | u8 reserved[7]; | |
1703 | /* FP HSI version to be used by FW */ | |
1704 | struct hsi_fp_ver_struct hsi_fp_ver; | |
1705 | }; | |
1706 | ||
1707 | ||
1708 | /* | |
1709 | * Ramrod data for VF start ramrod | |
1710 | */ | |
1711 | struct vf_stop_ramrod_data { | |
1712 | u8 vf_id /* VF ID */; | |
1713 | u8 reserved0; | |
1714 | __le16 reserved1; | |
1715 | __le32 reserved2; | |
1716 | }; | |
1717 | ||
1718 | ||
1719 | /* | |
1720 | * VF zone size mode. | |
1721 | */ | |
1722 | enum vf_zone_size_mode { | |
1723 | /* Default VF zone size. Up to 192 VF supported. */ | |
1724 | VF_ZONE_SIZE_MODE_DEFAULT, | |
1725 | /* Doubled VF zone size. Up to 96 VF supported. */ | |
1726 | VF_ZONE_SIZE_MODE_DOUBLE, | |
1727 | /* Quad VF zone size. Up to 48 VF supported. */ | |
1728 | VF_ZONE_SIZE_MODE_QUAD, | |
1729 | MAX_VF_ZONE_SIZE_MODE | |
1730 | }; | |
1731 | ||
1732 | ||
1733 | ||
1734 | ||
1735 | /* | |
1736 | * Attentions status block | |
1737 | */ | |
1738 | struct atten_status_block { | |
1739 | __le32 atten_bits; | |
1740 | __le32 atten_ack; | |
1741 | __le16 reserved0; | |
1742 | __le16 sb_index /* status block running index */; | |
1743 | __le32 reserved1; | |
1744 | }; | |
1745 | ||
1746 | ||
1747 | /* | |
1748 | * Igu cleanup bit values to distinguish between clean or producer consumer | |
1749 | * update. | |
1750 | */ | |
1751 | enum command_type_bit { | |
1752 | IGU_COMMAND_TYPE_NOP = 0, | |
1753 | IGU_COMMAND_TYPE_SET = 1, | |
1754 | MAX_COMMAND_TYPE_BIT | |
1755 | }; | |
1756 | ||
1757 | ||
1758 | /* | |
1759 | * DMAE command | |
1760 | */ | |
1761 | struct dmae_cmd { | |
1762 | __le32 opcode; | |
1763 | /* DMA Source. 0 - PCIe, 1 - GRC (use enum dmae_cmd_src_enum) */ | |
1764 | #define DMAE_CMD_SRC_MASK 0x1 | |
1765 | #define DMAE_CMD_SRC_SHIFT 0 | |
1766 | /* DMA destination. 0 - None, 1 - PCIe, 2 - GRC, 3 - None | |
1767 | * (use enum dmae_cmd_dst_enum) | |
1768 | */ | |
1769 | #define DMAE_CMD_DST_MASK 0x3 | |
1770 | #define DMAE_CMD_DST_SHIFT 1 | |
1771 | /* Completion destination. 0 - PCie, 1 - GRC (use enum dmae_cmd_c_dst_enum) */ | |
1772 | #define DMAE_CMD_C_DST_MASK 0x1 | |
1773 | #define DMAE_CMD_C_DST_SHIFT 3 | |
1774 | /* Reset the CRC result (do not use the previous result as the seed) */ | |
1775 | #define DMAE_CMD_CRC_RESET_MASK 0x1 | |
1776 | #define DMAE_CMD_CRC_RESET_SHIFT 4 | |
1777 | /* Reset the source address in the next go to the same source address of the | |
1778 | * previous go | |
1779 | */ | |
1780 | #define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1 | |
1781 | #define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5 | |
1782 | /* Reset the destination address in the next go to the same destination address | |
1783 | * of the previous go | |
1784 | */ | |
1785 | #define DMAE_CMD_DST_ADDR_RESET_MASK 0x1 | |
1786 | #define DMAE_CMD_DST_ADDR_RESET_SHIFT 6 | |
1787 | /* 0 completion function is the same as src function, 1 - 0 completion | |
1788 | * function is the same as dst function (use enum dmae_cmd_comp_func_enum) | |
1789 | */ | |
1790 | #define DMAE_CMD_COMP_FUNC_MASK 0x1 | |
1791 | #define DMAE_CMD_COMP_FUNC_SHIFT 7 | |
1792 | /* 0 - Do not write a completion word, 1 - Write a completion word | |
1793 | * (use enum dmae_cmd_comp_word_en_enum) | |
1794 | */ | |
1795 | #define DMAE_CMD_COMP_WORD_EN_MASK 0x1 | |
1796 | #define DMAE_CMD_COMP_WORD_EN_SHIFT 8 | |
1797 | /* 0 - Do not write a CRC word, 1 - Write a CRC word | |
1798 | * (use enum dmae_cmd_comp_crc_en_enum) | |
1799 | */ | |
1800 | #define DMAE_CMD_COMP_CRC_EN_MASK 0x1 | |
1801 | #define DMAE_CMD_COMP_CRC_EN_SHIFT 9 | |
1802 | /* The CRC word should be taken from the DMAE address space from address 9+X, | |
1803 | * where X is the value in these bits. | |
1804 | */ | |
1805 | #define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7 | |
1806 | #define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10 | |
1807 | #define DMAE_CMD_RESERVED1_MASK 0x1 | |
1808 | #define DMAE_CMD_RESERVED1_SHIFT 13 | |
1809 | #define DMAE_CMD_ENDIANITY_MODE_MASK 0x3 | |
1810 | #define DMAE_CMD_ENDIANITY_MODE_SHIFT 14 | |
1811 | /* The field specifies how the completion word is affected by PCIe read error. 0 | |
1812 | * Send a regular completion, 1 - Send a completion with an error indication, | |
1813 | * 2 do not send a completion (use enum dmae_cmd_error_handling_enum) | |
1814 | */ | |
1815 | #define DMAE_CMD_ERR_HANDLING_MASK 0x3 | |
1816 | #define DMAE_CMD_ERR_HANDLING_SHIFT 16 | |
1817 | /* The port ID to be placed on the RF FID field of the GRC bus. this field is | |
1818 | * used both when GRC is the destination and when it is the source of the DMAE | |
1819 | * transaction. | |
1820 | */ | |
1821 | #define DMAE_CMD_PORT_ID_MASK 0x3 | |
1822 | #define DMAE_CMD_PORT_ID_SHIFT 18 | |
1823 | /* Source PCI function number [3:0] */ | |
1824 | #define DMAE_CMD_SRC_PF_ID_MASK 0xF | |
1825 | #define DMAE_CMD_SRC_PF_ID_SHIFT 20 | |
1826 | /* Destination PCI function number [3:0] */ | |
1827 | #define DMAE_CMD_DST_PF_ID_MASK 0xF | |
1828 | #define DMAE_CMD_DST_PF_ID_SHIFT 24 | |
1829 | #define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1 /* Source VFID valid */ | |
1830 | #define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28 | |
1831 | #define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1 /* Destination VFID valid */ | |
1832 | #define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29 | |
1833 | #define DMAE_CMD_RESERVED2_MASK 0x3 | |
1834 | #define DMAE_CMD_RESERVED2_SHIFT 30 | |
1835 | /* PCIe source address low in bytes or GRC source address in DW */ | |
1836 | __le32 src_addr_lo; | |
1837 | /* PCIe source address high in bytes or reserved (if source is GRC) */ | |
1838 | __le32 src_addr_hi; | |
1839 | /* PCIe destination address low in bytes or GRC destination address in DW */ | |
1840 | __le32 dst_addr_lo; | |
1841 | /* PCIe destination address high in bytes or reserved (if destination is GRC) */ | |
1842 | __le32 dst_addr_hi; | |
1843 | __le16 length_dw /* Length in DW */; | |
1844 | __le16 opcode_b; | |
1845 | #define DMAE_CMD_SRC_VF_ID_MASK 0xFF /* Source VF id */ | |
1846 | #define DMAE_CMD_SRC_VF_ID_SHIFT 0 | |
1847 | #define DMAE_CMD_DST_VF_ID_MASK 0xFF /* Destination VF id */ | |
1848 | #define DMAE_CMD_DST_VF_ID_SHIFT 8 | |
1849 | __le32 comp_addr_lo /* PCIe completion address low or grc address */; | |
1850 | /* PCIe completion address high or reserved (if completion address is in GRC) */ | |
1851 | __le32 comp_addr_hi; | |
1852 | __le32 comp_val /* Value to write to completion address */; | |
1853 | __le32 crc32 /* crc16 result */; | |
1854 | __le32 crc_32_c /* crc32_c result */; | |
1855 | __le16 crc16 /* crc16 result */; | |
1856 | __le16 crc16_c /* crc16_c result */; | |
1857 | __le16 crc10 /* crc_t10 result */; | |
1858 | __le16 reserved; | |
1859 | __le16 xsum16 /* checksum16 result */; | |
1860 | __le16 xsum8 /* checksum8 result */; | |
1861 | }; | |
1862 | ||
1863 | ||
1864 | enum dmae_cmd_comp_crc_en_enum { | |
1865 | dmae_cmd_comp_crc_disabled /* Do not write a CRC word */, | |
1866 | dmae_cmd_comp_crc_enabled /* Write a CRC word */, | |
1867 | MAX_DMAE_CMD_COMP_CRC_EN_ENUM | |
1868 | }; | |
1869 | ||
1870 | ||
1871 | enum dmae_cmd_comp_func_enum { | |
1872 | /* completion word and/or CRC will be sent to SRC-PCI function/SRC VFID */ | |
1873 | dmae_cmd_comp_func_to_src, | |
1874 | /* completion word and/or CRC will be sent to DST-PCI function/DST VFID */ | |
1875 | dmae_cmd_comp_func_to_dst, | |
1876 | MAX_DMAE_CMD_COMP_FUNC_ENUM | |
1877 | }; | |
1878 | ||
1879 | ||
1880 | enum dmae_cmd_comp_word_en_enum { | |
1881 | dmae_cmd_comp_word_disabled /* Do not write a completion word */, | |
1882 | dmae_cmd_comp_word_enabled /* Write the completion word */, | |
1883 | MAX_DMAE_CMD_COMP_WORD_EN_ENUM | |
1884 | }; | |
1885 | ||
1886 | ||
1887 | enum dmae_cmd_c_dst_enum { | |
1888 | dmae_cmd_c_dst_pcie, | |
1889 | dmae_cmd_c_dst_grc, | |
1890 | MAX_DMAE_CMD_C_DST_ENUM | |
1891 | }; | |
1892 | ||
1893 | ||
1894 | enum dmae_cmd_dst_enum { | |
1895 | dmae_cmd_dst_none_0, | |
1896 | dmae_cmd_dst_pcie, | |
1897 | dmae_cmd_dst_grc, | |
1898 | dmae_cmd_dst_none_3, | |
1899 | MAX_DMAE_CMD_DST_ENUM | |
1900 | }; | |
1901 | ||
1902 | ||
1903 | enum dmae_cmd_error_handling_enum { | |
1904 | /* Send a regular completion (with no error indication) */ | |
1905 | dmae_cmd_error_handling_send_regular_comp, | |
1906 | /* Send a completion with an error indication (i.e. set bit 31 of the completion | |
1907 | * word) | |
1908 | */ | |
1909 | dmae_cmd_error_handling_send_comp_with_err, | |
1910 | dmae_cmd_error_handling_dont_send_comp /* Do not send a completion */, | |
1911 | MAX_DMAE_CMD_ERROR_HANDLING_ENUM | |
1912 | }; | |
1913 | ||
1914 | ||
1915 | enum dmae_cmd_src_enum { | |
1916 | dmae_cmd_src_pcie /* The source is the PCIe */, | |
1917 | dmae_cmd_src_grc /* The source is the GRC */, | |
1918 | MAX_DMAE_CMD_SRC_ENUM | |
1919 | }; | |
1920 | ||
1921 | ||
1922 | /* | |
1923 | * IGU cleanup command | |
1924 | */ | |
1925 | struct igu_cleanup { | |
1926 | __le32 sb_id_and_flags; | |
1927 | #define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF | |
1928 | #define IGU_CLEANUP_RESERVED0_SHIFT 0 | |
1929 | /* cleanup clear - 0, set - 1 */ | |
1930 | #define IGU_CLEANUP_CLEANUP_SET_MASK 0x1 | |
1931 | #define IGU_CLEANUP_CLEANUP_SET_SHIFT 27 | |
1932 | #define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7 | |
1933 | #define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28 | |
1934 | /* must always be set (use enum command_type_bit) */ | |
1935 | #define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1 | |
1936 | #define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31 | |
1937 | __le32 reserved1; | |
1938 | }; | |
1939 | ||
1940 | ||
1941 | /* | |
1942 | * IGU firmware driver command | |
1943 | */ | |
1944 | union igu_command { | |
1945 | struct igu_prod_cons_update prod_cons_update; | |
1946 | struct igu_cleanup cleanup; | |
1947 | }; | |
1948 | ||
1949 | ||
1950 | /* | |
1951 | * IGU firmware driver command | |
1952 | */ | |
1953 | struct igu_command_reg_ctrl { | |
1954 | __le16 opaque_fid; | |
1955 | __le16 igu_command_reg_ctrl_fields; | |
1956 | #define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF | |
1957 | #define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0 | |
1958 | #define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7 | |
1959 | #define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12 | |
1960 | /* command typ: 0 - read, 1 - write */ | |
1961 | #define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1 | |
1962 | #define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15 | |
1963 | }; | |
1964 | ||
1965 | ||
1966 | /* | |
1967 | * IGU mapping line structure | |
1968 | */ | |
1969 | struct igu_mapping_line { | |
1970 | __le32 igu_mapping_line_fields; | |
1971 | #define IGU_MAPPING_LINE_VALID_MASK 0x1 | |
1972 | #define IGU_MAPPING_LINE_VALID_SHIFT 0 | |
1973 | #define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF | |
1974 | #define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1 | |
1975 | /* In BB: VF-0-120, PF-0-7; In K2: VF-0-191, PF-0-15 */ | |
1976 | #define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF | |
1977 | #define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9 | |
1978 | #define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 /* PF-1, VF-0 */ | |
1979 | #define IGU_MAPPING_LINE_PF_VALID_SHIFT 17 | |
1980 | #define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F | |
1981 | #define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18 | |
1982 | #define IGU_MAPPING_LINE_RESERVED_MASK 0xFF | |
1983 | #define IGU_MAPPING_LINE_RESERVED_SHIFT 24 | |
1984 | }; | |
1985 | ||
1986 | ||
1987 | /* | |
1988 | * IGU MSIX line structure | |
1989 | */ | |
1990 | struct igu_msix_vector { | |
1991 | struct regpair address; | |
1992 | __le32 data; | |
1993 | __le32 msix_vector_fields; | |
1994 | #define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1 | |
1995 | #define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0 | |
1996 | #define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF | |
1997 | #define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1 | |
1998 | #define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF | |
1999 | #define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16 | |
2000 | #define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF | |
2001 | #define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24 | |
2002 | }; | |
2003 | ||
2004 | ||
2005 | struct mstorm_core_conn_ag_ctx { | |
2006 | u8 byte0 /* cdu_validation */; | |
2007 | u8 byte1 /* state */; | |
2008 | u8 flags0; | |
2009 | #define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ | |
2010 | #define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 | |
2011 | #define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ | |
2012 | #define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 | |
2013 | #define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */ | |
2014 | #define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 | |
2015 | #define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */ | |
2016 | #define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 | |
2017 | #define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ | |
2018 | #define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 | |
2019 | u8 flags1; | |
2020 | #define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ | |
2021 | #define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 | |
2022 | #define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ | |
2023 | #define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 | |
2024 | #define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ | |
2025 | #define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 | |
2026 | #define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ | |
2027 | #define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 | |
2028 | #define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ | |
2029 | #define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 | |
2030 | #define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ | |
2031 | #define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 | |
2032 | #define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ | |
2033 | #define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 | |
2034 | #define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ | |
2035 | #define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 | |
2036 | __le16 word0 /* word0 */; | |
2037 | __le16 word1 /* word1 */; | |
2038 | __le32 reg0 /* reg0 */; | |
2039 | __le32 reg1 /* reg1 */; | |
2040 | }; | |
2041 | ||
2042 | ||
2043 | /* | |
2044 | * per encapsulation type enabling flags | |
2045 | */ | |
2046 | struct prs_reg_encapsulation_type_en { | |
2047 | u8 flags; | |
2048 | /* Enable bit for Ethernet-over-GRE (L2 GRE) encapsulation. */ | |
2049 | #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1 | |
2050 | #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0 | |
2051 | /* Enable bit for IP-over-GRE (IP GRE) encapsulation. */ | |
2052 | #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1 | |
2053 | #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1 | |
2054 | /* Enable bit for VXLAN encapsulation. */ | |
2055 | #define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1 | |
2056 | #define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2 | |
2057 | /* Enable bit for T-Tag encapsulation. */ | |
2058 | #define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1 | |
2059 | #define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3 | |
2060 | /* Enable bit for Ethernet-over-GENEVE (L2 GENEVE) encapsulation. */ | |
2061 | #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1 | |
2062 | #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4 | |
2063 | /* Enable bit for IP-over-GENEVE (IP GENEVE) encapsulation. */ | |
2064 | #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1 | |
2065 | #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5 | |
2066 | #define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3 | |
2067 | #define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6 | |
2068 | }; | |
2069 | ||
2070 | ||
2071 | enum pxp_tph_st_hint { | |
2072 | TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */, | |
2073 | TPH_ST_HINT_REQUESTER /* Read/Write access by Device */, | |
2074 | /* Device Write and Host Read, or Host Write and Device Read */ | |
2075 | TPH_ST_HINT_TARGET, | |
2076 | /* Device Write and Host Read, or Host Write and Device Read - with temporal | |
2077 | * reuse | |
2078 | */ | |
2079 | TPH_ST_HINT_TARGET_PRIO, | |
2080 | MAX_PXP_TPH_ST_HINT | |
2081 | }; | |
2082 | ||
2083 | ||
2084 | /* | |
2085 | * QM hardware structure of enable bypass credit mask | |
2086 | */ | |
2087 | struct qm_rf_bypass_mask { | |
2088 | u8 flags; | |
2089 | #define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1 | |
2090 | #define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0 | |
2091 | #define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1 | |
2092 | #define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1 | |
2093 | #define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1 | |
2094 | #define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2 | |
2095 | #define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1 | |
2096 | #define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3 | |
2097 | #define QM_RF_BYPASS_MASK_PFRL_MASK 0x1 | |
2098 | #define QM_RF_BYPASS_MASK_PFRL_SHIFT 4 | |
2099 | #define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1 | |
2100 | #define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5 | |
2101 | #define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1 | |
2102 | #define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6 | |
2103 | #define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1 | |
2104 | #define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7 | |
2105 | }; | |
2106 | ||
2107 | ||
2108 | /* | |
2109 | * QM hardware structure of opportunistic credit mask | |
2110 | */ | |
2111 | struct qm_rf_opportunistic_mask { | |
2112 | __le16 flags; | |
2113 | #define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1 | |
2114 | #define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0 | |
2115 | #define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1 | |
2116 | #define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1 | |
2117 | #define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1 | |
2118 | #define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2 | |
2119 | #define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1 | |
2120 | #define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3 | |
2121 | #define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1 | |
2122 | #define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4 | |
2123 | #define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1 | |
2124 | #define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5 | |
2125 | #define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1 | |
2126 | #define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6 | |
2127 | #define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1 | |
2128 | #define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7 | |
2129 | #define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1 | |
2130 | #define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8 | |
2131 | #define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F | |
2132 | #define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9 | |
2133 | }; | |
2134 | ||
2135 | ||
2136 | /* | |
2137 | * QM hardware structure of QM map memory | |
2138 | */ | |
2139 | struct qm_rf_pq_map { | |
2140 | __le32 reg; | |
2141 | #define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 /* PQ active */ | |
2142 | #define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0 | |
2143 | #define QM_RF_PQ_MAP_RL_ID_MASK 0xFF /* RL ID */ | |
2144 | #define QM_RF_PQ_MAP_RL_ID_SHIFT 1 | |
2145 | /* the first PQ associated with the VPORT and VOQ of this PQ */ | |
2146 | #define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF | |
2147 | #define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9 | |
2148 | #define QM_RF_PQ_MAP_VOQ_MASK 0x1F /* VOQ */ | |
2149 | #define QM_RF_PQ_MAP_VOQ_SHIFT 18 | |
2150 | #define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */ | |
2151 | #define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23 | |
2152 | #define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 /* RL active */ | |
2153 | #define QM_RF_PQ_MAP_RL_VALID_SHIFT 25 | |
2154 | #define QM_RF_PQ_MAP_RESERVED_MASK 0x3F | |
2155 | #define QM_RF_PQ_MAP_RESERVED_SHIFT 26 | |
2156 | }; | |
2157 | ||
2158 | ||
2159 | /* | |
2160 | * Completion params for aggregated interrupt completion | |
2161 | */ | |
2162 | struct sdm_agg_int_comp_params { | |
2163 | __le16 params; | |
2164 | /* the number of aggregated interrupt, 0-31 */ | |
2165 | #define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F | |
2166 | #define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0 | |
2167 | /* 1 - set a bit in aggregated vector, 0 - dont set */ | |
2168 | #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1 | |
2169 | #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6 | |
2170 | /* Number of bit in the aggregated vector, 0-279 (TBD) */ | |
2171 | #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF | |
2172 | #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7 | |
2173 | }; | |
2174 | ||
2175 | ||
2176 | /* | |
2177 | * SDM operation gen command (generate aggregative interrupt) | |
2178 | */ | |
2179 | struct sdm_op_gen { | |
2180 | __le32 command; | |
2181 | /* completion parameters 0-15 */ | |
2182 | #define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF | |
2183 | #define SDM_OP_GEN_COMP_PARAM_SHIFT 0 | |
2184 | #define SDM_OP_GEN_COMP_TYPE_MASK 0xF /* completion type 16-19 */ | |
2185 | #define SDM_OP_GEN_COMP_TYPE_SHIFT 16 | |
2186 | #define SDM_OP_GEN_RESERVED_MASK 0xFFF /* reserved 20-31 */ | |
2187 | #define SDM_OP_GEN_RESERVED_SHIFT 20 | |
2188 | }; | |
2189 | ||
2190 | ||
2191 | ||
2192 | ||
2193 | ||
2194 | struct ystorm_core_conn_ag_ctx { | |
2195 | u8 byte0 /* cdu_validation */; | |
2196 | u8 byte1 /* state */; | |
2197 | u8 flags0; | |
2198 | #define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ | |
2199 | #define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 | |
2200 | #define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ | |
2201 | #define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 | |
2202 | #define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */ | |
2203 | #define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 | |
2204 | #define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */ | |
2205 | #define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 | |
2206 | #define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ | |
2207 | #define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 | |
2208 | u8 flags1; | |
2209 | #define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ | |
2210 | #define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 | |
2211 | #define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ | |
2212 | #define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 | |
2213 | #define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ | |
2214 | #define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 | |
2215 | #define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ | |
2216 | #define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 | |
2217 | #define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ | |
2218 | #define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 | |
2219 | #define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ | |
2220 | #define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 | |
2221 | #define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ | |
2222 | #define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 | |
2223 | #define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ | |
2224 | #define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 | |
2225 | u8 byte2 /* byte2 */; | |
2226 | u8 byte3 /* byte3 */; | |
2227 | __le16 word0 /* word0 */; | |
2228 | __le32 reg0 /* reg0 */; | |
2229 | __le32 reg1 /* reg1 */; | |
2230 | __le16 word1 /* word1 */; | |
2231 | __le16 word2 /* word2 */; | |
2232 | __le16 word3 /* word3 */; | |
2233 | __le16 word4 /* word4 */; | |
2234 | __le32 reg2 /* reg2 */; | |
2235 | __le32 reg3 /* reg3 */; | |
2236 | }; | |
2237 | ||
2238 | #endif /* __ECORE_HSI_COMMON__ */ |