]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / qede / base / ecore_init_fw_funcs.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
3 * All rights reserved.
4 * www.cavium.com
5 */
6
7 #include "bcm_osal.h"
8 #include "ecore_hw.h"
9 #include "ecore_init_ops.h"
10 #include "reg_addr.h"
11 #include "ecore_rt_defs.h"
12 #include "ecore_hsi_common.h"
13 #include "ecore_hsi_init_func.h"
14 #include "ecore_hsi_eth.h"
15 #include "ecore_hsi_init_tool.h"
16 #include "ecore_iro.h"
17 #include "ecore_init_fw_funcs.h"
18
19 #define CDU_VALIDATION_DEFAULT_CFG 61
20
21 static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
22 { 400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
23 { 528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
24 { 608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
25 };
26 static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
27 { 240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
28 };
29
30 /* General constants */
31 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
32 QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
33 #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : \
34 0)
35 #define QM_INVALID_PQ_ID 0xffff
36
37 /* Feature enable */
38 #define QM_BYPASS_EN 1
39 #define QM_BYTE_CRD_EN 1
40
41 /* Other PQ constants */
42 #define QM_OTHER_PQS_PER_PF 4
43
44 /* VOQ constants */
45 #define QM_E5_NUM_EXT_VOQ (MAX_NUM_PORTS_E5 * NUM_OF_TCS)
46
47 /* WFQ constants: */
48
49 /* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
50 #define QM_WFQ_UPPER_BOUND 62500000
51
52 /* Bit of VOQ in WFQ VP PQ map */
53 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0
54
55 /* Bit of PF in WFQ VP PQ map */
56 #define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
57 #define QM_WFQ_VP_PQ_PF_E5_SHIFT 6
58
59 /* 0x9000 = 4*9*1024 */
60 #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
61
62 /* Max WFQ increment value is 0.7 * upper bound */
63 #define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10)
64
65 /* Number of VOQs in E5 QmWfqCrd register */
66 #define QM_WFQ_CRD_E5_NUM_VOQS 16
67
68 /* RL constants: */
69
70 /* Period in us */
71 #define QM_RL_PERIOD 5
72
73 /* Period in 25MHz cycles */
74 #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
75
76 /* RL increment value - rate is specified in mbps. the factor of 1.01 was
77 * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
78 * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
79 * although the credit increment value was the correct one and FW calculated
80 * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
81 * this point.
82 */
83 #define QM_RL_INC_VAL(rate) \
84 OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / \
85 (8 * 100)), 1)
86
87 /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
88 #define QM_PF_RL_UPPER_BOUND 62500000
89
90 /* Max PF RL increment value is 0.7 * upper bound */
91 #define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
92
93 /* Vport RL Upper bound, link speed is in Mpbs */
94 #define QM_VP_RL_UPPER_BOUND(speed) \
95 ((u32)OSAL_MAX_T(u32, QM_RL_INC_VAL(speed), 9700 + 1000))
96
97 /* Max Vport RL increment value is the Vport RL upper bound */
98 #define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed)
99
100 /* Vport RL credit threshold in case of QM bypass */
101 #define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1)
102
103 /* AFullOprtnstcCrdMask constants */
104 #define QM_OPPOR_LINE_VOQ_DEF 1
105 #define QM_OPPOR_FW_STOP_DEF 0
106 #define QM_OPPOR_PQ_EMPTY_DEF 1
107
108 /* Command Queue constants: */
109
110 /* Pure LB CmdQ lines (+spare) */
111 #define PBF_CMDQ_PURE_LB_LINES 150
112
113 #define PBF_CMDQ_LINES_E5_RSVD_RATIO 8
114
115 #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
116 (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
117 ext_voq * \
118 (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
119 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
120
121 #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
122 (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
123 ext_voq * \
124 (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
125 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
126
127 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
128 ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
129
130 /* BTB: blocks constants (block size = 256B) */
131
132 /* 256B blocks in 9700B packet */
133 #define BTB_JUMBO_PKT_BLOCKS 38
134
135 /* Headroom per-port */
136 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
137 #define BTB_PURE_LB_FACTOR 10
138
139 /* Factored (hence really 0.7) */
140 #define BTB_PURE_LB_RATIO 7
141
142 /* QM stop command constants */
143 #define QM_STOP_PQ_MASK_WIDTH 32
144 #define QM_STOP_CMD_ADDR 2
145 #define QM_STOP_CMD_STRUCT_SIZE 2
146 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
147 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
148 #define QM_STOP_CMD_PAUSE_MASK_MASK 0xffffffff /* @DPDK */
149 #define QM_STOP_CMD_GROUP_ID_OFFSET 1
150 #define QM_STOP_CMD_GROUP_ID_SHIFT 16
151 #define QM_STOP_CMD_GROUP_ID_MASK 15
152 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
153 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
154 #define QM_STOP_CMD_PQ_TYPE_MASK 1
155 #define QM_STOP_CMD_MAX_POLL_COUNT 100
156 #define QM_STOP_CMD_POLL_PERIOD_US 500
157
158 /* QM command macros */
159 #define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
160 #define QM_CMD_SET_FIELD(var, cmd, field, value) \
161 SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
162
163 #define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, \
164 vp_pq_id, rl_id, ext_voq, wrr) \
165 do { \
166 OSAL_MEMSET(&map, 0, sizeof(map)); \
167 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); \
168 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_VALID, rl_valid); \
169 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, vp_pq_id); \
170 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_ID, rl_id); \
171 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VOQ, ext_voq); \
172 SET_FIELD(map.reg, \
173 QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, wrr); \
174 STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, \
175 *((u32 *)&map)); \
176 } while (0)
177
178 #define WRITE_PQ_INFO_TO_RAM 1
179 #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
180 (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | \
181 ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24))
182 #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
183 (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
184
185 /******************** INTERNAL IMPLEMENTATION *********************/
186
187 /* Returns the external VOQ number */
188 static u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn,
189 u8 port_id,
190 u8 tc,
191 u8 max_phys_tcs_per_port)
192 {
193 if (tc == PURE_LB_TC)
194 return NUM_OF_PHYS_TCS * (MAX_NUM_PORTS_BB) + port_id;
195 else
196 return port_id * (max_phys_tcs_per_port) + tc;
197 }
198
199 /* Prepare PF RL enable/disable runtime init values */
200 static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
201 {
202 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
203 if (pf_rl_en) {
204 u8 num_ext_voqs = MAX_NUM_VOQS_E4;
205 u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
206
207 /* Enable RLs for all VOQs */
208 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
209 (u32)voq_bit_mask);
210 #ifdef QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET
211 if (num_ext_voqs >= 32)
212 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
213 (u32)(voq_bit_mask >> 32));
214 #endif
215
216 /* Write RL period */
217 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
218 QM_RL_PERIOD_CLK_25M);
219 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
220 QM_RL_PERIOD_CLK_25M);
221
222 /* Set credit threshold for QM bypass flow */
223 if (QM_BYPASS_EN)
224 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
225 QM_PF_RL_UPPER_BOUND);
226 }
227 }
228
229 /* Prepare PF WFQ enable/disable runtime init values */
230 static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
231 {
232 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
233
234 /* Set credit threshold for QM bypass flow */
235 if (pf_wfq_en && QM_BYPASS_EN)
236 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
237 QM_WFQ_UPPER_BOUND);
238 }
239
240 /* Prepare VPORT RL enable/disable runtime init values */
241 static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
242 {
243 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
244 vport_rl_en ? 1 : 0);
245 if (vport_rl_en) {
246 /* Write RL period (use timer 0 only) */
247 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
248 QM_RL_PERIOD_CLK_25M);
249 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
250 QM_RL_PERIOD_CLK_25M);
251
252 /* Set credit threshold for QM bypass flow */
253 if (QM_BYPASS_EN)
254 STORE_RT_REG(p_hwfn,
255 QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
256 QM_VP_RL_BYPASS_THRESH_SPEED);
257 }
258 }
259
260 /* Prepare VPORT WFQ enable/disable runtime init values */
261 static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
262 {
263 STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
264 vport_wfq_en ? 1 : 0);
265
266 /* Set credit threshold for QM bypass flow */
267 if (vport_wfq_en && QM_BYPASS_EN)
268 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
269 QM_WFQ_UPPER_BOUND);
270 }
271
272 /* Prepare runtime init values to allocate PBF command queue lines for
273 * the specified VOQ
274 */
275 static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
276 u8 ext_voq,
277 u16 cmdq_lines)
278 {
279 u32 qm_line_crd;
280
281 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
282
283 OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
284 (u32)cmdq_lines);
285 STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
286 qm_line_crd);
287 STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
288 qm_line_crd);
289 }
290
291 /* Prepare runtime init values to allocate PBF command queue lines. */
292 static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
293 u8 max_ports_per_engine,
294 u8 max_phys_tcs_per_port,
295 struct init_qm_port_params
296 port_params[MAX_NUM_PORTS])
297 {
298 u8 tc, ext_voq, port_id, num_tcs_in_port;
299 u8 num_ext_voqs = MAX_NUM_VOQS_E4;
300
301 /* Clear PBF lines of all VOQs */
302 for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
303 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
304
305 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
306 u16 phys_lines, phys_lines_per_tc;
307
308 if (!port_params[port_id].active)
309 continue;
310
311 /* Find number of command queue lines to divide between the
312 * active physical TCs. In E5, 1/8 of the lines are reserved.
313 * the lines for pure LB TC are subtracted.
314 */
315 phys_lines = port_params[port_id].num_pbf_cmd_lines;
316 phys_lines -= PBF_CMDQ_PURE_LB_LINES;
317
318 /* Find #lines per active physical TC */
319 num_tcs_in_port = 0;
320 for (tc = 0; tc < max_phys_tcs_per_port; tc++)
321 if (((port_params[port_id].active_phys_tcs >> tc) &
322 0x1) == 1)
323 num_tcs_in_port++;
324 phys_lines_per_tc = phys_lines / num_tcs_in_port;
325
326 /* Init registers per active TC */
327 for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
328 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
329 max_phys_tcs_per_port);
330 if (((port_params[port_id].active_phys_tcs >> tc) &
331 0x1) == 1)
332 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
333 phys_lines_per_tc);
334 }
335
336 /* Init registers for pure LB TC */
337 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
338 max_phys_tcs_per_port);
339 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
340 PBF_CMDQ_PURE_LB_LINES);
341 }
342 }
343
344 /*
345 * Prepare runtime init values to allocate guaranteed BTB blocks for the
346 * specified port. The guaranteed BTB space is divided between the TCs as
347 * follows (shared space Is currently not used):
348 * 1. Parameters:
349 * B BTB blocks for this port
350 * C Number of physical TCs for this port
351 * 2. Calculation:
352 * a. 38 blocks (9700B jumbo frame) are allocated for global per port
353 * headroom
354 * b. B = B 38 (remainder after global headroom allocation)
355 * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
356 * d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
357 * e. B/C blocks are allocated for each physical TC.
358 * Assumptions:
359 * - MTU is up to 9700 bytes (38 blocks)
360 * - All TCs are considered symmetrical (same rate and packet size)
361 * - No optimization for lossy TC (all are considered lossless). Shared space is
362 * not enabled and allocated for each TC.
363 */
364 static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
365 u8 max_ports_per_engine,
366 u8 max_phys_tcs_per_port,
367 struct init_qm_port_params
368 port_params[MAX_NUM_PORTS])
369 {
370 u32 usable_blocks, pure_lb_blocks, phys_blocks;
371 u8 tc, ext_voq, port_id, num_tcs_in_port;
372
373 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
374 if (!port_params[port_id].active)
375 continue;
376
377 /* Subtract headroom blocks */
378 usable_blocks = port_params[port_id].num_btb_blocks -
379 BTB_HEADROOM_BLOCKS;
380
381 /* Find blocks per physical TC. use factor to avoid floating
382 * arithmethic.
383 */
384 num_tcs_in_port = 0;
385 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
386 if (((port_params[port_id].active_phys_tcs >> tc) &
387 0x1) == 1)
388 num_tcs_in_port++;
389
390 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
391 (num_tcs_in_port * BTB_PURE_LB_FACTOR +
392 BTB_PURE_LB_RATIO);
393 pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
394 pure_lb_blocks /
395 BTB_PURE_LB_FACTOR);
396 phys_blocks = (usable_blocks - pure_lb_blocks) /
397 num_tcs_in_port;
398
399 /* Init physical TCs */
400 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
401 if (((port_params[port_id].active_phys_tcs >> tc) &
402 0x1) == 1) {
403 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
404 max_phys_tcs_per_port);
405 STORE_RT_REG(p_hwfn,
406 PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
407 phys_blocks);
408 }
409 }
410
411 /* Init pure LB TC */
412 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
413 max_phys_tcs_per_port);
414 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
415 pure_lb_blocks);
416 }
417 }
418
419 /* Prepare Tx PQ mapping runtime init values for the specified PF */
420 static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
421 struct ecore_ptt *p_ptt,
422 u8 pf_id,
423 u8 max_phys_tcs_per_port,
424 bool is_pf_loading,
425 u32 num_pf_cids,
426 u32 num_vf_cids,
427 u16 start_pq,
428 u16 num_pf_pqs,
429 u16 num_vf_pqs,
430 u8 start_vport,
431 u32 base_mem_addr_4kb,
432 struct init_qm_pq_params *pq_params,
433 struct init_qm_vport_params *vport_params)
434 {
435 /* A bit per Tx PQ indicating if the PQ is associated with a VF */
436 u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
437 u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
438 u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
439 u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
440
441 num_pqs = num_pf_pqs + num_vf_pqs;
442
443 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
444 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
445
446 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
447 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
448 mem_addr_4kb = base_mem_addr_4kb;
449
450 /* Set mapping from PQ group to PF */
451 for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
452 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
453 (u32)(pf_id));
454
455 /* Set PQ sizes */
456 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
457 QM_PQ_SIZE_256B(num_pf_cids));
458 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
459 QM_PQ_SIZE_256B(num_vf_cids));
460
461 /* Go over all Tx PQs */
462 for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
463 u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
464 u8 ext_voq, vport_id_in_pf;
465 bool is_vf_pq, rl_valid;
466 u16 first_tx_pq_id;
467
468 ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
469 pq_params[i].tc_id,
470 max_phys_tcs_per_port);
471 is_vf_pq = (i >= num_pf_pqs);
472 rl_valid = pq_params[i].rl_valid > 0;
473
474 /* Update first Tx PQ of VPORT/TC */
475 vport_id_in_pf = pq_params[i].vport_id - start_vport;
476 first_tx_pq_id =
477 vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
478 if (first_tx_pq_id == QM_INVALID_PQ_ID) {
479 u32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
480 (pf_id << (QM_WFQ_VP_PQ_PF_E4_SHIFT));
481
482 /* Create new VP PQ */
483 vport_params[vport_id_in_pf].
484 first_tx_pq_id[pq_params[i].tc_id] = pq_id;
485 first_tx_pq_id = pq_id;
486
487 /* Map VP PQ to VOQ and PF */
488 STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET +
489 first_tx_pq_id, map_val);
490 }
491
492 /* Check RL ID */
493 if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) {
494 DP_NOTICE(p_hwfn, true,
495 "Invalid VPORT ID for rate limiter config\n");
496 rl_valid = false;
497 }
498
499 /* Prepare PQ map entry */
500 struct qm_rf_pq_map_e4 tx_pq_map;
501
502 QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E4, pq_id, rl_valid ?
503 1 : 0,
504 first_tx_pq_id, rl_valid ?
505 pq_params[i].vport_id : 0,
506 ext_voq, pq_params[i].wrr_group);
507
508 /* Set PQ base address */
509 STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
510 mem_addr_4kb);
511
512 /* Clear PQ pointer table entry (64 bit) */
513 if (is_pf_loading)
514 for (j = 0; j < 2; j++)
515 STORE_RT_REG(p_hwfn, QM_REG_PTRTBLTX_RT_OFFSET +
516 (pq_id * 2) + j, 0);
517
518 /* Write PQ info to RAM */
519 if (WRITE_PQ_INFO_TO_RAM != 0) {
520 u32 pq_info = 0;
521
522 pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id,
523 pq_params[i].tc_id,
524 pq_params[i].port_id,
525 rl_valid ? 1 : 0, rl_valid ?
526 pq_params[i].vport_id : 0);
527 ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
528 pq_info);
529 }
530
531 /* If VF PQ, add indication to PQ VF mask */
532 if (is_vf_pq) {
533 tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
534 (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
535 mem_addr_4kb += vport_pq_mem_4kb;
536 } else {
537 mem_addr_4kb += pq_mem_4kb;
538 }
539 }
540
541 /* Store Tx PQ VF mask to size select register */
542 for (i = 0; i < num_tx_pq_vf_masks; i++)
543 if (tx_pq_vf_mask[i])
544 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
545 i, tx_pq_vf_mask[i]);
546 }
547
548 /* Prepare Other PQ mapping runtime init values for the specified PF */
549 static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
550 u8 pf_id,
551 bool is_pf_loading,
552 u32 num_pf_cids,
553 u32 num_tids,
554 u32 base_mem_addr_4kb)
555 {
556 u32 pq_size, pq_mem_4kb, mem_addr_4kb;
557 u16 i, j, pq_id, pq_group;
558
559 /* A single other PQ group is used in each PF, where PQ group i is used
560 * in PF i.
561 */
562 pq_group = pf_id;
563 pq_size = num_pf_cids + num_tids;
564 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
565 mem_addr_4kb = base_mem_addr_4kb;
566
567 /* Map PQ group to PF */
568 STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
569 (u32)(pf_id));
570
571 /* Set PQ sizes */
572 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
573 QM_PQ_SIZE_256B(pq_size));
574
575 for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
576 i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
577 /* Set PQ base address */
578 STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
579 mem_addr_4kb);
580
581 /* Clear PQ pointer table entry */
582 if (is_pf_loading)
583 for (j = 0; j < 2; j++)
584 STORE_RT_REG(p_hwfn,
585 QM_REG_PTRTBLOTHER_RT_OFFSET +
586 (pq_id * 2) + j, 0);
587
588 mem_addr_4kb += pq_mem_4kb;
589 }
590 }
591
592 /* Prepare PF WFQ runtime init values for the specified PF.
593 * Return -1 on error.
594 */
595 static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
596 u8 pf_id,
597 u16 pf_wfq,
598 u8 max_phys_tcs_per_port,
599 u16 num_tx_pqs,
600 struct init_qm_pq_params *pq_params)
601 {
602 u32 inc_val, crd_reg_offset;
603 u8 ext_voq;
604 u16 i;
605
606 inc_val = QM_WFQ_INC_VAL(pf_wfq);
607 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
608 DP_NOTICE(p_hwfn, true,
609 "Invalid PF WFQ weight configuration\n");
610 return -1;
611 }
612
613 for (i = 0; i < num_tx_pqs; i++) {
614 ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
615 pq_params[i].tc_id,
616 max_phys_tcs_per_port);
617 crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ?
618 QM_REG_WFQPFCRD_RT_OFFSET :
619 QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
620 ext_voq * MAX_NUM_PFS_BB +
621 (pf_id % MAX_NUM_PFS_BB);
622 OVERWRITE_RT_REG(p_hwfn, crd_reg_offset,
623 (u32)QM_WFQ_CRD_REG_SIGN_BIT);
624 }
625
626 STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET +
627 pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
628 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
629
630 return 0;
631 }
632
633 /* Prepare PF RL runtime init values for the specified PF.
634 * Return -1 on error.
635 */
636 static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
637 {
638 u32 inc_val;
639
640 inc_val = QM_RL_INC_VAL(pf_rl);
641 if (inc_val > QM_PF_RL_MAX_INC_VAL) {
642 DP_NOTICE(p_hwfn, true,
643 "Invalid PF rate limit configuration\n");
644 return -1;
645 }
646
647 STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
648 (u32)QM_RL_CRD_REG_SIGN_BIT);
649 STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
650 QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
651 STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
652
653 return 0;
654 }
655
656 /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
657 * Return -1 on error.
658 */
659 static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
660 u8 num_vports,
661 struct init_qm_vport_params *vport_params)
662 {
663 u16 vport_pq_id;
664 u32 inc_val;
665 u8 tc, i;
666
667 /* Go over all PF VPORTs */
668 for (i = 0; i < num_vports; i++) {
669 if (!vport_params[i].vport_wfq)
670 continue;
671
672 inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
673 if (inc_val > QM_WFQ_MAX_INC_VAL) {
674 DP_NOTICE(p_hwfn, true,
675 "Invalid VPORT WFQ weight configuration\n");
676 return -1;
677 }
678
679 /* Each VPORT can have several VPORT PQ IDs for various TCs */
680 for (tc = 0; tc < NUM_OF_TCS; tc++) {
681 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
682 if (vport_pq_id != QM_INVALID_PQ_ID) {
683 STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
684 vport_pq_id,
685 (u32)QM_WFQ_CRD_REG_SIGN_BIT);
686 STORE_RT_REG(p_hwfn,
687 QM_REG_WFQVPWEIGHT_RT_OFFSET +
688 vport_pq_id, inc_val);
689 }
690 }
691 }
692 return 0;
693 }
694
695 /* Prepare VPORT RL runtime init values for the specified VPORTs.
696 * Return -1 on error.
697 */
698 static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
699 u8 start_vport,
700 u8 num_vports,
701 u32 link_speed,
702 struct init_qm_vport_params *vport_params)
703 {
704 u8 i, vport_id;
705 u32 inc_val;
706
707 if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
708 DP_NOTICE(p_hwfn, true,
709 "Invalid VPORT ID for rate limiter configuration\n");
710 return -1;
711 }
712
713 /* Go over all PF VPORTs */
714 for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
715 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
716 vport_params[i].vport_rl : link_speed);
717 if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
718 DP_NOTICE(p_hwfn, true,
719 "Invalid VPORT rate-limit configuration\n");
720 return -1;
721 }
722
723 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
724 (u32)QM_RL_CRD_REG_SIGN_BIT);
725 STORE_RT_REG(p_hwfn,
726 QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
727 QM_VP_RL_UPPER_BOUND(link_speed) |
728 (u32)QM_RL_CRD_REG_SIGN_BIT);
729 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
730 inc_val);
731 }
732
733 return 0;
734 }
735
736 static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
737 struct ecore_ptt *p_ptt)
738 {
739 u32 reg_val, i;
740
741 for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
742 i++) {
743 OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
744 reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
745 }
746
747 /* Check if timeout while waiting for SDM command ready */
748 if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
749 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
750 "Timeout waiting for QM SDM cmd ready signal\n");
751 return false;
752 }
753
754 return true;
755 }
756
757 static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
758 struct ecore_ptt *p_ptt,
759 u32 cmd_addr,
760 u32 cmd_data_lsb,
761 u32 cmd_data_msb)
762 {
763 if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
764 return false;
765
766 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
767 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
768 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
769 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
770 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
771
772 return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
773 }
774
775
776 /******************** INTERFACE IMPLEMENTATION *********************/
777
778 u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
779 u32 num_vf_cids,
780 u32 num_tids,
781 u16 num_pf_pqs,
782 u16 num_vf_pqs)
783 {
784 return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
785 QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
786 QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
787 }
788
789 int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
790 u8 max_ports_per_engine,
791 u8 max_phys_tcs_per_port,
792 bool pf_rl_en,
793 bool pf_wfq_en,
794 bool vport_rl_en,
795 bool vport_wfq_en,
796 struct init_qm_port_params
797 port_params[MAX_NUM_PORTS])
798 {
799 u32 mask;
800
801 /* Init AFullOprtnstcCrdMask */
802 mask = (QM_OPPOR_LINE_VOQ_DEF <<
803 QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
804 (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
805 (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
806 (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
807 (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
808 (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
809 (QM_OPPOR_FW_STOP_DEF <<
810 QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
811 (QM_OPPOR_PQ_EMPTY_DEF <<
812 QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
813 STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
814
815 /* Enable/disable PF RL */
816 ecore_enable_pf_rl(p_hwfn, pf_rl_en);
817
818 /* Enable/disable PF WFQ */
819 ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
820
821 /* Enable/disable VPORT RL */
822 ecore_enable_vport_rl(p_hwfn, vport_rl_en);
823
824 /* Enable/disable VPORT WFQ */
825 ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
826
827 /* Init PBF CMDQ line credit */
828 ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
829 max_phys_tcs_per_port, port_params);
830
831 /* Init BTB blocks in PBF */
832 ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
833 max_phys_tcs_per_port, port_params);
834
835 return 0;
836 }
837
838 int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
839 struct ecore_ptt *p_ptt,
840 u8 pf_id,
841 u8 max_phys_tcs_per_port,
842 bool is_pf_loading,
843 u32 num_pf_cids,
844 u32 num_vf_cids,
845 u32 num_tids,
846 u16 start_pq,
847 u16 num_pf_pqs,
848 u16 num_vf_pqs,
849 u8 start_vport,
850 u8 num_vports,
851 u16 pf_wfq,
852 u32 pf_rl,
853 u32 link_speed,
854 struct init_qm_pq_params *pq_params,
855 struct init_qm_vport_params *vport_params)
856 {
857 u32 other_mem_size_4kb;
858 u8 tc, i;
859
860 other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) *
861 QM_OTHER_PQS_PER_PF;
862
863 /* Clear first Tx PQ ID array for each VPORT */
864 for (i = 0; i < num_vports; i++)
865 for (tc = 0; tc < NUM_OF_TCS; tc++)
866 vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
867
868 /* Map Other PQs (if any) */
869 #if QM_OTHER_PQS_PER_PF > 0
870 ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids,
871 num_tids, 0);
872 #endif
873
874 /* Map Tx PQs */
875 ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port,
876 is_pf_loading, num_pf_cids, num_vf_cids,
877 start_pq, num_pf_pqs, num_vf_pqs, start_vport,
878 other_mem_size_4kb, pq_params, vport_params);
879
880 /* Init PF WFQ */
881 if (pf_wfq)
882 if (ecore_pf_wfq_rt_init(p_hwfn, pf_id, pf_wfq,
883 max_phys_tcs_per_port,
884 num_pf_pqs + num_vf_pqs, pq_params))
885 return -1;
886
887 /* Init PF RL */
888 if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl))
889 return -1;
890
891 /* Set VPORT WFQ */
892 if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params))
893 return -1;
894
895 /* Set VPORT RL */
896 if (ecore_vport_rl_rt_init
897 (p_hwfn, start_vport, num_vports, link_speed, vport_params))
898 return -1;
899
900 return 0;
901 }
902
903 int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
904 struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
905 {
906 u32 inc_val;
907
908 inc_val = QM_WFQ_INC_VAL(pf_wfq);
909 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
910 DP_NOTICE(p_hwfn, true,
911 "Invalid PF WFQ weight configuration\n");
912 return -1;
913 }
914
915 ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
916
917 return 0;
918 }
919
920 int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
921 struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
922 {
923 u32 inc_val;
924
925 inc_val = QM_RL_INC_VAL(pf_rl);
926 if (inc_val > QM_PF_RL_MAX_INC_VAL) {
927 DP_NOTICE(p_hwfn, true,
928 "Invalid PF rate limit configuration\n");
929 return -1;
930 }
931
932 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
933 (u32)QM_RL_CRD_REG_SIGN_BIT);
934 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
935
936 return 0;
937 }
938
939 int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
940 struct ecore_ptt *p_ptt,
941 u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
942 {
943 u16 vport_pq_id;
944 u32 inc_val;
945 u8 tc;
946
947 inc_val = QM_WFQ_INC_VAL(vport_wfq);
948 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
949 DP_NOTICE(p_hwfn, true,
950 "Invalid VPORT WFQ weight configuration\n");
951 return -1;
952 }
953
954 for (tc = 0; tc < NUM_OF_TCS; tc++) {
955 vport_pq_id = first_tx_pq_id[tc];
956 if (vport_pq_id != QM_INVALID_PQ_ID) {
957 ecore_wr(p_hwfn, p_ptt,
958 QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
959 }
960 }
961
962 return 0;
963 }
964
965 int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
966 struct ecore_ptt *p_ptt, u8 vport_id,
967 u32 vport_rl,
968 u32 link_speed)
969 {
970 u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
971
972 if (vport_id >= max_qm_global_rls) {
973 DP_NOTICE(p_hwfn, true,
974 "Invalid VPORT ID for rate limiter configuration\n");
975 return -1;
976 }
977
978 inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
979 if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
980 DP_NOTICE(p_hwfn, true,
981 "Invalid VPORT rate-limit configuration\n");
982 return -1;
983 }
984
985 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
986 (u32)QM_RL_CRD_REG_SIGN_BIT);
987 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
988
989 return 0;
990 }
991
992 bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
993 struct ecore_ptt *p_ptt,
994 bool is_release_cmd,
995 bool is_tx_pq, u16 start_pq, u16 num_pqs)
996 {
997 u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
998 u32 pq_mask = 0, last_pq, pq_id;
999
1000 last_pq = start_pq + num_pqs - 1;
1001
1002 /* Set command's PQ type */
1003 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
1004
1005 /* Go over requested PQs */
1006 for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
1007 /* Set PQ bit in mask (stop command only) */
1008 if (!is_release_cmd)
1009 pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
1010
1011 /* If last PQ or end of PQ mask, write command */
1012 if ((pq_id == last_pq) ||
1013 (pq_id % QM_STOP_PQ_MASK_WIDTH ==
1014 (QM_STOP_PQ_MASK_WIDTH - 1))) {
1015 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK,
1016 pq_mask);
1017 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID,
1018 pq_id / QM_STOP_PQ_MASK_WIDTH);
1019 if (!ecore_send_qm_cmd
1020 (p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0],
1021 cmd_arr[1]))
1022 return false;
1023 pq_mask = 0;
1024 }
1025 }
1026
1027 return true;
1028 }
1029
1030
1031 /* NIG: ETS configuration constants */
1032 #define NIG_TX_ETS_CLIENT_OFFSET 4
1033 #define NIG_LB_ETS_CLIENT_OFFSET 1
1034 #define NIG_ETS_MIN_WFQ_BYTES 1600
1035
1036 /* NIG: ETS constants */
1037 #define NIG_ETS_UP_BOUND(weight, mtu) \
1038 (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1039
1040 /* NIG: RL constants */
1041
1042 /* Byte base type value */
1043 #define NIG_RL_BASE_TYPE 1
1044
1045 /* Period in us */
1046 #define NIG_RL_PERIOD 1
1047
1048 /* Period in 25MHz cycles */
1049 #define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD)
1050
1051 /* Rate in mbps */
1052 #define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8)
1053
1054 #define NIG_RL_MAX_VAL(inc_val, mtu) \
1055 (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
1056
1057 /* NIG: packet prioritry configuration constants */
1058 #define NIG_PRIORITY_MAP_TC_BITS 4
1059
1060
1061 void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
1062 struct ecore_ptt *p_ptt,
1063 struct init_ets_req *req, bool is_lb)
1064 {
1065 u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff;
1066 u32 tc_bound_base_addr, tc_bound_addr_diff;
1067 u8 sp_tc_map = 0, wfq_tc_map = 0;
1068 u8 tc, num_tc, tc_client_offset;
1069
1070 num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
1071 tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET :
1072 NIG_TX_ETS_CLIENT_OFFSET;
1073 min_weight = 0xffffffff;
1074 tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
1075 NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
1076 tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
1077 NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
1078 NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
1079 NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
1080 tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
1081 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
1082 tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
1083 NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
1084 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
1085 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
1086
1087 for (tc = 0; tc < num_tc; tc++) {
1088 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1089
1090 /* Update SP map */
1091 if (tc_req->use_sp)
1092 sp_tc_map |= (1 << tc);
1093
1094 if (!tc_req->use_wfq)
1095 continue;
1096
1097 /* Update WFQ map */
1098 wfq_tc_map |= (1 << tc);
1099
1100 /* Find minimal weight */
1101 if (tc_req->weight < min_weight)
1102 min_weight = tc_req->weight;
1103 }
1104
1105 /* Write SP map */
1106 ecore_wr(p_hwfn, p_ptt,
1107 is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
1108 NIG_REG_TX_ARB_CLIENT_IS_STRICT,
1109 (sp_tc_map << tc_client_offset));
1110
1111 /* Write WFQ map */
1112 ecore_wr(p_hwfn, p_ptt,
1113 is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
1114 NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
1115 (wfq_tc_map << tc_client_offset));
1116 /* write WFQ weights */
1117 for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
1118 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1119 u32 byte_weight;
1120
1121 if (!tc_req->use_wfq)
1122 continue;
1123
1124 /* Translate weight to bytes */
1125 byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1126 min_weight;
1127
1128 /* Write WFQ weight */
1129 ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr +
1130 tc_weight_addr_diff * tc_client_offset, byte_weight);
1131
1132 /* Write WFQ upper bound */
1133 ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr +
1134 tc_bound_addr_diff * tc_client_offset,
1135 NIG_ETS_UP_BOUND(byte_weight, req->mtu));
1136 }
1137 }
1138
1139 void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
1140 struct ecore_ptt *p_ptt,
1141 struct init_nig_lb_rl_req *req)
1142 {
1143 u32 ctrl, inc_val, reg_offset;
1144 u8 tc;
1145
1146 /* Disable global MAC+LB RL */
1147 ctrl =
1148 NIG_RL_BASE_TYPE <<
1149 NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
1150 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1151
1152 /* Configure and enable global MAC+LB RL */
1153 if (req->lb_mac_rate) {
1154 /* Configure */
1155 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
1156 NIG_RL_PERIOD_CLK_25M);
1157 inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
1158 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE,
1159 inc_val);
1160 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
1161 NIG_RL_MAX_VAL(inc_val, req->mtu));
1162
1163 /* Enable */
1164 ctrl |=
1165 1 <<
1166 NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
1167 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1168 }
1169
1170 /* Disable global LB-only RL */
1171 ctrl =
1172 NIG_RL_BASE_TYPE <<
1173 NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
1174 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1175
1176 /* Configure and enable global LB-only RL */
1177 if (req->lb_rate) {
1178 /* Configure */
1179 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
1180 NIG_RL_PERIOD_CLK_25M);
1181 inc_val = NIG_RL_INC_VAL(req->lb_rate);
1182 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE,
1183 inc_val);
1184 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
1185 NIG_RL_MAX_VAL(inc_val, req->mtu));
1186
1187 /* Enable */
1188 ctrl |=
1189 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
1190 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1191 }
1192
1193 /* Per-TC RLs */
1194 for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
1195 tc++, reg_offset += 4) {
1196 /* Disable TC RL */
1197 ctrl =
1198 NIG_RL_BASE_TYPE <<
1199 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
1200 ecore_wr(p_hwfn, p_ptt,
1201 NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
1202
1203 /* Configure and enable TC RL */
1204 if (!req->tc_rate[tc])
1205 continue;
1206
1207 /* Configure */
1208 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
1209 reg_offset, NIG_RL_PERIOD_CLK_25M);
1210 inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
1211 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
1212 reg_offset, inc_val);
1213 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
1214 reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
1215
1216 /* Enable */
1217 ctrl |= 1 <<
1218 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
1219 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 +
1220 reg_offset, ctrl);
1221 }
1222 }
1223
1224 void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
1225 struct ecore_ptt *p_ptt,
1226 struct init_nig_pri_tc_map_req *req)
1227 {
1228 u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
1229 u32 pri_tc_mask = 0;
1230 u8 pri, tc;
1231
1232 for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
1233 if (!req->pri[pri].valid)
1234 continue;
1235
1236 pri_tc_mask |= (req->pri[pri].tc_id <<
1237 (pri * NIG_PRIORITY_MAP_TC_BITS));
1238 tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
1239 }
1240
1241 /* Write priority -> TC mask */
1242 ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
1243
1244 /* Write TC -> priority mask */
1245 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
1246 ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
1247 tc_pri_mask[tc]);
1248 ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4,
1249 tc_pri_mask[tc]);
1250 }
1251 }
1252
1253
1254 /* PRS: ETS configuration constants */
1255 #define PRS_ETS_MIN_WFQ_BYTES 1600
1256 #define PRS_ETS_UP_BOUND(weight, mtu) \
1257 (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1258
1259
1260 void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
1261 struct ecore_ptt *p_ptt, struct init_ets_req *req)
1262 {
1263 u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff;
1264 u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
1265
1266 tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 -
1267 PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
1268 tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
1269 PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
1270
1271 for (tc = 0; tc < NUM_OF_TCS; tc++) {
1272 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1273
1274 /* Update SP map */
1275 if (tc_req->use_sp)
1276 sp_tc_map |= (1 << tc);
1277
1278 if (!tc_req->use_wfq)
1279 continue;
1280
1281 /* Update WFQ map */
1282 wfq_tc_map |= (1 << tc);
1283
1284 /* Find minimal weight */
1285 if (tc_req->weight < min_weight)
1286 min_weight = tc_req->weight;
1287 }
1288
1289 /* write SP map */
1290 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
1291
1292 /* write WFQ map */
1293 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
1294 wfq_tc_map);
1295
1296 /* write WFQ weights */
1297 for (tc = 0; tc < NUM_OF_TCS; tc++) {
1298 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1299 u32 byte_weight;
1300
1301 if (!tc_req->use_wfq)
1302 continue;
1303
1304 /* Translate weight to bytes */
1305 byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1306 min_weight;
1307
1308 /* Write WFQ weight */
1309 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc *
1310 tc_weight_addr_diff, byte_weight);
1311
1312 /* Write WFQ upper bound */
1313 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
1314 tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight,
1315 req->mtu));
1316 }
1317 }
1318
1319
1320 /* BRB: RAM configuration constants */
1321 #define BRB_TOTAL_RAM_BLOCKS_BB 4800
1322 #define BRB_TOTAL_RAM_BLOCKS_K2 5632
1323 #define BRB_BLOCK_SIZE 128
1324 #define BRB_MIN_BLOCKS_PER_TC 9
1325 #define BRB_HYST_BYTES 10240
1326 #define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
1327
1328 /* Temporary big RAM allocation - should be updated */
1329 void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
1330 struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
1331 {
1332 u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks;
1333 u32 active_port_blocks, reg_offset = 0;
1334 u8 port, active_ports = 0;
1335
1336 tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc,
1337 BRB_BLOCK_SIZE);
1338 min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size,
1339 BRB_BLOCK_SIZE);
1340 total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
1341 BRB_TOTAL_RAM_BLOCKS_BB;
1342
1343 /* Find number of active ports */
1344 for (port = 0; port < MAX_NUM_PORTS; port++)
1345 if (req->num_active_tcs[port])
1346 active_ports++;
1347
1348 active_port_blocks = (u32)(total_blocks / active_ports);
1349
1350 for (port = 0; port < req->max_ports_per_engine; port++) {
1351 u32 port_blocks, port_shared_blocks, port_guaranteed_blocks;
1352 u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th;
1353 u32 tc_guaranteed_blocks;
1354 u8 tc;
1355
1356 /* Calculate per-port sizes */
1357 tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc,
1358 BRB_BLOCK_SIZE);
1359 port_blocks = req->num_active_tcs[port] ? active_port_blocks :
1360 0;
1361 port_guaranteed_blocks = req->num_active_tcs[port] *
1362 tc_guaranteed_blocks;
1363 port_shared_blocks = port_blocks - port_guaranteed_blocks;
1364 full_xoff_th = req->num_active_tcs[port] *
1365 BRB_MIN_BLOCKS_PER_TC;
1366 full_xon_th = full_xoff_th + min_pkt_size_blocks;
1367 pause_xoff_th = tc_headroom_blocks;
1368 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
1369
1370 /* Init total size per port */
1371 ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
1372 port_blocks);
1373
1374 /* Init shared size per port */
1375 ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
1376 port_shared_blocks);
1377
1378 for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
1379 /* Clear init values for non-active TCs */
1380 if (tc == req->num_active_tcs[port]) {
1381 tc_guaranteed_blocks = 0;
1382 full_xoff_th = 0;
1383 full_xon_th = 0;
1384 pause_xoff_th = 0;
1385 pause_xon_th = 0;
1386 }
1387
1388 /* Init guaranteed size per TC */
1389 ecore_wr(p_hwfn, p_ptt,
1390 BRB_REG_TC_GUARANTIED_0 + reg_offset,
1391 tc_guaranteed_blocks);
1392 ecore_wr(p_hwfn, p_ptt,
1393 BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
1394 BRB_HYST_BLOCKS);
1395
1396 /* Init pause/full thresholds per physical TC - for
1397 * loopback traffic.
1398 */
1399 ecore_wr(p_hwfn, p_ptt,
1400 BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
1401 reg_offset, full_xoff_th);
1402 ecore_wr(p_hwfn, p_ptt,
1403 BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 +
1404 reg_offset, full_xon_th);
1405 ecore_wr(p_hwfn, p_ptt,
1406 BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 +
1407 reg_offset, pause_xoff_th);
1408 ecore_wr(p_hwfn, p_ptt,
1409 BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
1410 reg_offset, pause_xon_th);
1411
1412 /* Init pause/full thresholds per physical TC - for
1413 * main traffic.
1414 */
1415 ecore_wr(p_hwfn, p_ptt,
1416 BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
1417 reg_offset, full_xoff_th);
1418 ecore_wr(p_hwfn, p_ptt,
1419 BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 +
1420 reg_offset, full_xon_th);
1421 ecore_wr(p_hwfn, p_ptt,
1422 BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 +
1423 reg_offset, pause_xoff_th);
1424 ecore_wr(p_hwfn, p_ptt,
1425 BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 +
1426 reg_offset, pause_xon_th);
1427 }
1428 }
1429 }
1430
1431 /* In MF should be called once per port to set EtherType of OuterTag */
1432 void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
1433 {
1434 /* Update DORQ register */
1435 STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
1436 }
1437
1438 #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
1439 (var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
1440 #define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008
1441 #define PRS_ETH_OUTPUT_FORMAT -46832
1442
1443 void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
1444 struct ecore_ptt *p_ptt, u16 dest_port)
1445 {
1446 /* Update PRS register */
1447 ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
1448
1449 /* Update NIG register */
1450 ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
1451
1452 /* Update PBF register */
1453 ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
1454 }
1455
1456 void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
1457 struct ecore_ptt *p_ptt, bool vxlan_enable)
1458 {
1459 u32 reg_val;
1460
1461 /* Update PRS register */
1462 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1463 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1464 PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
1465 vxlan_enable);
1466 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1467 if (reg_val) { /* TODO: handle E5 init */
1468 reg_val = ecore_rd(p_hwfn, p_ptt,
1469 PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1470
1471 /* Update output only if tunnel blocks not included. */
1472 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1473 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1474 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1475 }
1476
1477 /* Update NIG register */
1478 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1479 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1480 NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
1481 vxlan_enable);
1482 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1483
1484 /* Update DORQ register */
1485 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
1486 vxlan_enable ? 1 : 0);
1487 }
1488
1489 void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
1490 struct ecore_ptt *p_ptt,
1491 bool eth_gre_enable, bool ip_gre_enable)
1492 {
1493 u32 reg_val;
1494
1495 /* Update PRS register */
1496 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1497 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1498 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
1499 eth_gre_enable);
1500 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1501 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
1502 ip_gre_enable);
1503 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1504 if (reg_val) { /* TODO: handle E5 init */
1505 reg_val = ecore_rd(p_hwfn, p_ptt,
1506 PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1507
1508 /* Update output only if tunnel blocks not included. */
1509 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1510 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1511 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1512 }
1513
1514 /* Update NIG register */
1515 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1516 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1517 NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
1518 eth_gre_enable);
1519 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1520 NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
1521 ip_gre_enable);
1522 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1523
1524 /* Update DORQ registers */
1525 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
1526 eth_gre_enable ? 1 : 0);
1527 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
1528 ip_gre_enable ? 1 : 0);
1529 }
1530
1531 void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
1532 struct ecore_ptt *p_ptt, u16 dest_port)
1533 {
1534 /* Update PRS register */
1535 ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
1536
1537 /* Update NIG register */
1538 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
1539
1540 /* Update PBF register */
1541 ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
1542 }
1543
1544 void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
1545 struct ecore_ptt *p_ptt,
1546 bool eth_geneve_enable, bool ip_geneve_enable)
1547 {
1548 u32 reg_val;
1549
1550 /* Update PRS register */
1551 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1552 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1553 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
1554 eth_geneve_enable);
1555 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1556 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
1557 ip_geneve_enable);
1558 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1559 if (reg_val) { /* TODO: handle E5 init */
1560 reg_val = ecore_rd(p_hwfn, p_ptt,
1561 PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1562
1563 /* Update output only if tunnel blocks not included. */
1564 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1565 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1566 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1567 }
1568
1569 /* Update NIG register */
1570 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
1571 eth_geneve_enable ? 1 : 0);
1572 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
1573 ip_geneve_enable ? 1 : 0);
1574
1575 /* EDPM with geneve tunnel not supported in BB */
1576 if (ECORE_IS_BB_B0(p_hwfn->p_dev))
1577 return;
1578
1579 /* Update DORQ registers */
1580 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
1581 eth_geneve_enable ? 1 : 0);
1582 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
1583 ip_geneve_enable ? 1 : 0);
1584 }
1585
1586 #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4
1587 #define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512
1588
1589 void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
1590 struct ecore_ptt *p_ptt,
1591 bool enable)
1592 {
1593 u32 reg_val, cfg_mask;
1594
1595 /* read PRS config register */
1596 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO);
1597
1598 /* set VXLAN_NO_L2_ENABLE mask */
1599 cfg_mask = (1 << PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);
1600
1601 if (enable) {
1602 /* set VXLAN_NO_L2_ENABLE flag */
1603 reg_val |= cfg_mask;
1604
1605 /* update PRS FIC register */
1606 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1607 (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
1608 } else {
1609 /* clear VXLAN_NO_L2_ENABLE flag */
1610 reg_val &= ~cfg_mask;
1611 }
1612
1613 /* write PRS config register */
1614 ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
1615 }
1616
1617 #define T_ETH_PACKET_ACTION_GFT_EVENTID 23
1618 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
1619 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
1620 #define PARSER_ETH_CONN_CM_HDR 0
1621 #define CAM_LINE_SIZE sizeof(u32)
1622 #define RAM_LINE_SIZE sizeof(u64)
1623 #define REG_SIZE sizeof(u32)
1624
1625 void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
1626 struct ecore_ptt *p_ptt,
1627 u16 pf_id)
1628 {
1629 /* disable gft search for PF */
1630 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
1631
1632 /* Clean ram & cam for next gft session*/
1633
1634 /* Zero camline */
1635 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
1636
1637 /* Zero ramline */
1638 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1639 RAM_LINE_SIZE * pf_id, 0);
1640 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1641 RAM_LINE_SIZE * pf_id + REG_SIZE, 0);
1642 }
1643
1644
1645 void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
1646 struct ecore_ptt *p_ptt)
1647 {
1648 u32 rfs_cm_hdr_event_id;
1649
1650 /* Set RFS event ID to be awakened i Tstorm By Prs */
1651 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
1652 rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
1653 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1654 rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
1655 PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1656 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
1657 }
1658
1659 void ecore_gft_config(struct ecore_hwfn *p_hwfn,
1660 struct ecore_ptt *p_ptt,
1661 u16 pf_id,
1662 bool tcp,
1663 bool udp,
1664 bool ipv4,
1665 bool ipv6,
1666 enum gft_profile_type profile_type)
1667 {
1668 u32 reg_val, cam_line, ram_line_lo, ram_line_hi;
1669
1670 if (!ipv6 && !ipv4)
1671 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n");
1672 if (!tcp && !udp)
1673 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - udp or tcp\n");
1674 if (profile_type >= MAX_GFT_PROFILE_TYPE)
1675 DP_NOTICE(p_hwfn, true, "gft_config: unsupported gft_profile_type\n");
1676
1677 /* Set RFS event ID to be awakened i Tstorm By Prs */
1678 reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
1679 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1680 reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1681 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
1682
1683 /* Do not load context only cid in PRS on match. */
1684 ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
1685
1686 /* Do not use tenant ID exist bit for gft search*/
1687 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
1688
1689 /* Set Cam */
1690 cam_line = 0;
1691 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
1692
1693 /* Filters are per PF!! */
1694 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK,
1695 GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
1696 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
1697
1698 if (!(tcp && udp)) {
1699 SET_FIELD(cam_line,
1700 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
1701 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
1702 if (tcp)
1703 SET_FIELD(cam_line,
1704 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1705 GFT_PROFILE_TCP_PROTOCOL);
1706 else
1707 SET_FIELD(cam_line,
1708 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1709 GFT_PROFILE_UDP_PROTOCOL);
1710 }
1711
1712 if (!(ipv4 && ipv6)) {
1713 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
1714 if (ipv4)
1715 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
1716 GFT_PROFILE_IPV4);
1717 else
1718 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
1719 GFT_PROFILE_IPV6);
1720 }
1721
1722 /* Write characteristics to cam */
1723 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
1724 cam_line);
1725 cam_line = ecore_rd(p_hwfn, p_ptt,
1726 PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
1727
1728 /* Write line to RAM - compare to filter 4 tuple */
1729 ram_line_lo = 0;
1730 ram_line_hi = 0;
1731
1732 /* Tunnel type */
1733 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
1734 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
1735
1736 if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
1737 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
1738 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
1739 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1740 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1741 SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
1742 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
1743 } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
1744 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1745 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1746 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
1747 } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
1748 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
1749 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1750 } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
1751 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
1752 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1753 } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
1754 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
1755 }
1756
1757 ecore_wr(p_hwfn, p_ptt,
1758 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
1759 ram_line_lo);
1760 ecore_wr(p_hwfn, p_ptt,
1761 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
1762 REG_SIZE, ram_line_hi);
1763
1764 /* Set default profile so that no filter match will happen */
1765 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1766 PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
1767 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1768 PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
1769
1770 /* Enable gft search */
1771 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
1772 }
1773
1774 /* Configure VF zone size mode */
1775 void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
1776 struct ecore_ptt *p_ptt, u16 mode,
1777 bool runtime_init)
1778 {
1779 u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
1780 u32 msdm_vf_offset_mask;
1781
1782 if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
1783 msdm_vf_size_log += 1;
1784 else if (mode == VF_ZONE_SIZE_MODE_QUAD)
1785 msdm_vf_size_log += 2;
1786
1787 msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
1788
1789 if (runtime_init) {
1790 STORE_RT_REG(p_hwfn,
1791 PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET,
1792 msdm_vf_size_log);
1793 STORE_RT_REG(p_hwfn,
1794 PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET,
1795 msdm_vf_offset_mask);
1796 } else {
1797 ecore_wr(p_hwfn, p_ptt,
1798 PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log);
1799 ecore_wr(p_hwfn, p_ptt,
1800 PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask);
1801 }
1802 }
1803
1804 /* Get mstorm statistics for offset by VF zone size mode */
1805 u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
1806 u16 stat_cnt_id,
1807 u16 vf_zone_size_mode)
1808 {
1809 u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
1810
1811 if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) &&
1812 (stat_cnt_id > MAX_NUM_PFS)) {
1813 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1814 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1815 (stat_cnt_id - MAX_NUM_PFS);
1816 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1817 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1818 (stat_cnt_id - MAX_NUM_PFS);
1819 }
1820
1821 return offset;
1822 }
1823
1824 /* Get mstorm VF producer offset by VF zone size mode */
1825 u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
1826 u8 vf_id,
1827 u8 vf_queue_id,
1828 u16 vf_zone_size_mode)
1829 {
1830 u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
1831
1832 if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
1833 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1834 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1835 vf_id;
1836 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1837 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1838 vf_id;
1839 }
1840
1841 return offset;
1842 }
1843
1844 #ifndef LINUX_REMOVE
1845 #define CRC8_INIT_VALUE 0xFF
1846 #endif
1847 static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
1848
1849 /* Calculate and return CDU validation byte per connection type / region /
1850 * cid
1851 */
1852 static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
1853 {
1854 const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
1855
1856 static u8 crc8_table_valid; /*automatically initialized to 0*/
1857 u8 crc, validation_byte = 0;
1858 u32 validation_string = 0;
1859 u32 data_to_crc;
1860
1861 if (crc8_table_valid == 0) {
1862 OSAL_CRC8_POPULATE(cdu_crc8_table, 0x07);
1863 crc8_table_valid = 1;
1864 }
1865
1866 /*
1867 * The CRC is calculated on the String-to-compress:
1868 * [31:8] = {CID[31:20],CID[11:0]}
1869 * [7:4] = Region
1870 * [3:0] = Type
1871 */
1872 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
1873 validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
1874
1875 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
1876 validation_string |= ((region & 0xF) << 4);
1877
1878 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
1879 validation_string |= (conn_type & 0xF);
1880
1881 /* Convert to big-endian and calculate CRC8*/
1882 data_to_crc = OSAL_BE32_TO_CPU(validation_string);
1883
1884 crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
1885 CRC8_INIT_VALUE);
1886
1887 /* The validation byte [7:0] is composed:
1888 * for type A validation
1889 * [7] = active configuration bit
1890 * [6:0] = crc[6:0]
1891 *
1892 * for type B validation
1893 * [7] = active configuration bit
1894 * [6:3] = connection_type[3:0]
1895 * [2:0] = crc[2:0]
1896 */
1897
1898 validation_byte |= ((validation_cfg >>
1899 CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
1900
1901 if ((validation_cfg >>
1902 CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
1903 validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
1904 else
1905 validation_byte |= crc & 0x7F;
1906
1907 return validation_byte;
1908 }
1909
1910 /* Calcualte and set validation bytes for session context */
1911 void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
1912 u8 ctx_type, u32 cid)
1913 {
1914 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1915
1916 p_ctx = (u8 *)p_ctx_mem;
1917 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1918 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1919 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1920
1921 OSAL_MEMSET(p_ctx, 0, ctx_size);
1922
1923 *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
1924 *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
1925 *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
1926 }
1927
1928 /* Calcualte and set validation bytes for task context */
1929 void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
1930 u32 tid)
1931 {
1932 u8 *p_ctx, *region1_val_ptr;
1933
1934 p_ctx = (u8 *)p_ctx_mem;
1935 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1936
1937 OSAL_MEMSET(p_ctx, 0, ctx_size);
1938
1939 *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
1940 }
1941
1942 /* Memset session context to 0 while preserving validation bytes */
1943 void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1944 {
1945 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1946 u8 x_val, t_val, u_val;
1947
1948 p_ctx = (u8 *)p_ctx_mem;
1949 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1950 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1951 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1952
1953 x_val = *x_val_ptr;
1954 t_val = *t_val_ptr;
1955 u_val = *u_val_ptr;
1956
1957 OSAL_MEMSET(p_ctx, 0, ctx_size);
1958
1959 *x_val_ptr = x_val;
1960 *t_val_ptr = t_val;
1961 *u_val_ptr = u_val;
1962 }
1963
1964 /* Memset task context to 0 while preserving validation bytes */
1965 void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1966 {
1967 u8 *p_ctx, *region1_val_ptr;
1968 u8 region1_val;
1969
1970 p_ctx = (u8 *)p_ctx_mem;
1971 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1972
1973 region1_val = *region1_val_ptr;
1974
1975 OSAL_MEMSET(p_ctx, 0, ctx_size);
1976
1977 *region1_val_ptr = region1_val;
1978 }
1979
1980 /* Enable and configure context validation */
1981 void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
1982 struct ecore_ptt *p_ptt)
1983 {
1984 u32 ctx_validation;
1985
1986 /* Enable validation for connection region 3 - bits [31:24] */
1987 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
1988 ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
1989
1990 /* Enable validation for connection region 5 - bits [15: 8] */
1991 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1992 ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
1993
1994 /* Enable validation for connection region 1 - bits [15: 8] */
1995 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1996 ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
1997 }
1998
1999 #define RSS_IND_TABLE_BASE_ADDR 4112
2000 #define RSS_IND_TABLE_VPORT_SIZE 16
2001 #define RSS_IND_TABLE_ENTRY_PER_LINE 8
2002
2003 /* Update RSS indirection table entry. */
2004 void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn,
2005 struct ecore_ptt *p_ptt,
2006 u8 rss_id,
2007 u8 ind_table_index,
2008 u16 ind_table_value)
2009 {
2010 u32 cnt, rss_addr;
2011 u32 *reg_val;
2012 u16 rss_ind_entry[RSS_IND_TABLE_ENTRY_PER_LINE];
2013 u16 rss_ind_mask[RSS_IND_TABLE_ENTRY_PER_LINE];
2014
2015 /* get entry address */
2016 rss_addr = RSS_IND_TABLE_BASE_ADDR +
2017 RSS_IND_TABLE_VPORT_SIZE * rss_id +
2018 ind_table_index / RSS_IND_TABLE_ENTRY_PER_LINE;
2019
2020 /* prepare update command */
2021 ind_table_index %= RSS_IND_TABLE_ENTRY_PER_LINE;
2022
2023 for (cnt = 0; cnt < RSS_IND_TABLE_ENTRY_PER_LINE; cnt++) {
2024 if (cnt == ind_table_index) {
2025 rss_ind_entry[cnt] = ind_table_value;
2026 rss_ind_mask[cnt] = 0xFFFF;
2027 } else {
2028 rss_ind_entry[cnt] = 0;
2029 rss_ind_mask[cnt] = 0;
2030 }
2031 }
2032
2033 /* Update entry in HW*/
2034 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
2035
2036 reg_val = (u32 *)rss_ind_mask;
2037 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK, reg_val[0]);
2038 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 4, reg_val[1]);
2039 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 8, reg_val[2]);
2040 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 12, reg_val[3]);
2041
2042 reg_val = (u32 *)rss_ind_entry;
2043 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA, reg_val[0]);
2044 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 4, reg_val[1]);
2045 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 8, reg_val[2]);
2046 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 12, reg_val[3]);
2047 }