2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_init_ops.h"
13 #include "ecore_rt_defs.h"
14 #include "ecore_hsi_common.h"
15 #include "ecore_hsi_init_func.h"
16 #include "ecore_hsi_eth.h"
17 #include "ecore_hsi_init_tool.h"
18 #include "ecore_iro.h"
19 #include "ecore_init_fw_funcs.h"
20 enum CmInterfaceEnum
{
33 /* general constants */
34 #define QM_PQ_MEM_4KB(pq_size) \
35 (pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
36 #define QM_PQ_SIZE_256B(pq_size) \
37 (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0)
38 #define QM_INVALID_PQ_ID 0xffff
40 #define QM_BYPASS_EN 1
41 #define QM_BYTE_CRD_EN 1
42 /* other PQ constants */
43 #define QM_OTHER_PQS_PER_PF 4
45 #define QM_WFQ_UPPER_BOUND 62500000
46 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0
47 #define QM_WFQ_VP_PQ_PF_SHIFT 5
48 #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
49 #define QM_WFQ_MAX_INC_VAL 43750000
51 #define QM_RL_UPPER_BOUND 62500000
52 #define QM_RL_PERIOD 5
53 #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
54 #define QM_RL_MAX_INC_VAL 43750000
55 /* RL increment value - the factor of 1.01 was added after seeing only
56 * 99% factor reached in a 25Gbps port with DPDK RFC 2544 test.
57 * In this scenario the PF RL was reducing the line rate to 99% although
58 * the credit increment value was the correct one and FW calculated
59 * correct packet sizes. The reason for the inaccuracy of the RL is
60 * unknown at this point.
63 #define QM_RL_INC_VAL(rate) OSAL_MAX_T(u32, (u32)(((rate ? rate : 1000000) * \
64 QM_RL_PERIOD * 101) / (8 * 100)), 1)
65 /* AFullOprtnstcCrdMask constants */
66 #define QM_OPPOR_LINE_VOQ_DEF 1
67 #define QM_OPPOR_FW_STOP_DEF 0
68 #define QM_OPPOR_PQ_EMPTY_DEF 1
69 /* Command Queue constants */
70 #define PBF_CMDQ_PURE_LB_LINES 150
71 #define PBF_CMDQ_LINES_RT_OFFSET(voq) \
72 (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
73 voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET \
74 - PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
75 #define PBF_BTB_GUARANTEED_RT_OFFSET(voq) \
76 (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
77 (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
78 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
79 ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
80 /* BTB: blocks constants (block size = 256B) */
81 #define BTB_JUMBO_PKT_BLOCKS 38 /* 256B blocks in 9700B packet */
82 /* headroom per-port */
83 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
84 #define BTB_PURE_LB_FACTOR 10
85 #define BTB_PURE_LB_RATIO 7 /* factored (hence really 0.7) */
86 /* QM stop command constants */
87 #define QM_STOP_PQ_MASK_WIDTH 32
88 #define QM_STOP_CMD_ADDR 0x2
89 #define QM_STOP_CMD_STRUCT_SIZE 2
90 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
91 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
92 #define QM_STOP_CMD_PAUSE_MASK_MASK -1
93 #define QM_STOP_CMD_GROUP_ID_OFFSET 1
94 #define QM_STOP_CMD_GROUP_ID_SHIFT 16
95 #define QM_STOP_CMD_GROUP_ID_MASK 15
96 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
97 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
98 #define QM_STOP_CMD_PQ_TYPE_MASK 1
99 #define QM_STOP_CMD_MAX_POLL_COUNT 100
100 #define QM_STOP_CMD_POLL_PERIOD_US 500
101 /* QM command macros */
102 #define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
103 #define QM_CMD_SET_FIELD(var, cmd, field, value) \
104 SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
106 #define PHYS_VOQ(port, tc, max_phys_tcs_per_port) \
107 ((port) * (max_phys_tcs_per_port) + (tc))
108 #define LB_VOQ(port) (MAX_PHYS_VOQS + (port))
109 #define VOQ(port, tc, max_phys_tcs_per_port) \
110 ((tc) < LB_TC ? PHYS_VOQ(port, tc, max_phys_tcs_per_port) : LB_VOQ(port))
111 /******************** INTERNAL IMPLEMENTATION *********************/
112 /* Prepare PF RL enable/disable runtime init values */
113 static void ecore_enable_pf_rl(struct ecore_hwfn
*p_hwfn
, bool pf_rl_en
)
115 STORE_RT_REG(p_hwfn
, QM_REG_RLPFENABLE_RT_OFFSET
, pf_rl_en
? 1 : 0);
117 /* enable RLs for all VOQs */
118 STORE_RT_REG(p_hwfn
, QM_REG_RLPFVOQENABLE_RT_OFFSET
,
119 (1 << MAX_NUM_VOQS
) - 1);
120 /* write RL period */
121 STORE_RT_REG(p_hwfn
, QM_REG_RLPFPERIOD_RT_OFFSET
,
122 QM_RL_PERIOD_CLK_25M
);
123 STORE_RT_REG(p_hwfn
, QM_REG_RLPFPERIODTIMER_RT_OFFSET
,
124 QM_RL_PERIOD_CLK_25M
);
125 /* set credit threshold for QM bypass flow */
127 STORE_RT_REG(p_hwfn
, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET
,
132 /* Prepare PF WFQ enable/disable runtime init values */
133 static void ecore_enable_pf_wfq(struct ecore_hwfn
*p_hwfn
, bool pf_wfq_en
)
135 STORE_RT_REG(p_hwfn
, QM_REG_WFQPFENABLE_RT_OFFSET
, pf_wfq_en
? 1 : 0);
136 /* set credit threshold for QM bypass flow */
137 if (pf_wfq_en
&& QM_BYPASS_EN
)
138 STORE_RT_REG(p_hwfn
, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET
,
142 /* Prepare VPORT RL enable/disable runtime init values */
143 static void ecore_enable_vport_rl(struct ecore_hwfn
*p_hwfn
, bool vport_rl_en
)
145 STORE_RT_REG(p_hwfn
, QM_REG_RLGLBLENABLE_RT_OFFSET
,
146 vport_rl_en
? 1 : 0);
148 /* write RL period (use timer 0 only) */
149 STORE_RT_REG(p_hwfn
, QM_REG_RLGLBLPERIOD_0_RT_OFFSET
,
150 QM_RL_PERIOD_CLK_25M
);
151 STORE_RT_REG(p_hwfn
, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET
,
152 QM_RL_PERIOD_CLK_25M
);
153 /* set credit threshold for QM bypass flow */
156 QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET
,
161 /* Prepare VPORT WFQ enable/disable runtime init values */
162 static void ecore_enable_vport_wfq(struct ecore_hwfn
*p_hwfn
, bool vport_wfq_en
)
164 STORE_RT_REG(p_hwfn
, QM_REG_WFQVPENABLE_RT_OFFSET
,
165 vport_wfq_en
? 1 : 0);
166 /* set credit threshold for QM bypass flow */
167 if (vport_wfq_en
&& QM_BYPASS_EN
)
168 STORE_RT_REG(p_hwfn
, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET
,
172 /* Prepare runtime init values to allocate PBF command queue lines for
175 static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn
*p_hwfn
,
176 u8 voq
, u16 cmdq_lines
)
179 /* In A0 - Limit the size of pbf queue so that only 511 commands
180 * with the minimum size of 4 (FCoE minimum size)
182 bool is_bb_a0
= ECORE_IS_BB_A0(p_hwfn
->p_dev
);
184 cmdq_lines
= OSAL_MIN_T(u32
, cmdq_lines
, 1022);
185 qm_line_crd
= QM_VOQ_LINE_CRD(cmdq_lines
);
186 OVERWRITE_RT_REG(p_hwfn
, PBF_CMDQ_LINES_RT_OFFSET(voq
),
188 STORE_RT_REG(p_hwfn
, QM_REG_VOQCRDLINE_RT_OFFSET
+ voq
, qm_line_crd
);
189 STORE_RT_REG(p_hwfn
, QM_REG_VOQINITCRDLINE_RT_OFFSET
+ voq
,
193 /* Prepare runtime init values to allocate PBF command queue lines. */
194 static void ecore_cmdq_lines_rt_init(struct ecore_hwfn
*p_hwfn
,
195 u8 max_ports_per_engine
,
196 u8 max_phys_tcs_per_port
,
197 struct init_qm_port_params
198 port_params
[MAX_NUM_PORTS
])
200 u8 tc
, voq
, port_id
, num_tcs_in_port
;
201 /* clear PBF lines for all VOQs */
202 for (voq
= 0; voq
< MAX_NUM_VOQS
; voq
++)
203 STORE_RT_REG(p_hwfn
, PBF_CMDQ_LINES_RT_OFFSET(voq
), 0);
204 for (port_id
= 0; port_id
< max_ports_per_engine
; port_id
++) {
205 if (port_params
[port_id
].active
) {
206 u16 phys_lines
, phys_lines_per_tc
;
207 /* find #lines to divide between active physical TCs */
209 port_params
[port_id
].num_pbf_cmd_lines
-
210 PBF_CMDQ_PURE_LB_LINES
;
211 /* find #lines per active physical TC */
213 for (tc
= 0; tc
< NUM_OF_PHYS_TCS
; tc
++) {
214 if (((port_params
[port_id
].active_phys_tcs
>>
218 phys_lines_per_tc
= phys_lines
/ num_tcs_in_port
;
219 /* init registers per active TC */
220 for (tc
= 0; tc
< NUM_OF_PHYS_TCS
; tc
++) {
221 if (((port_params
[port_id
].active_phys_tcs
>>
223 voq
= PHYS_VOQ(port_id
, tc
,
224 max_phys_tcs_per_port
);
225 ecore_cmdq_lines_voq_rt_init(p_hwfn
,
226 voq
, phys_lines_per_tc
);
229 /* init registers for pure LB TC */
230 ecore_cmdq_lines_voq_rt_init(p_hwfn
, LB_VOQ(port_id
),
231 PBF_CMDQ_PURE_LB_LINES
);
237 * Prepare runtime init values to allocate guaranteed BTB blocks for the
238 * specified port. The guaranteed BTB space is divided between the TCs as
239 * follows (shared space Is currently not used):
241 * B BTB blocks for this port
242 * C Number of physical TCs for this port
244 * a. 38 blocks (9700B jumbo frame) are allocated for global per port
246 * b. B = B 38 (remainder after global headroom allocation)
247 * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
248 * d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
249 * e. B/C blocks are allocated for each physical TC.
251 * - MTU is up to 9700 bytes (38 blocks)
252 * - All TCs are considered symmetrical (same rate and packet size)
253 * - No optimization for lossy TC (all are considered lossless). Shared space is
254 * not enabled and allocated for each TC.
256 static void ecore_btb_blocks_rt_init(struct ecore_hwfn
*p_hwfn
,
257 u8 max_ports_per_engine
,
258 u8 max_phys_tcs_per_port
,
259 struct init_qm_port_params
260 port_params
[MAX_NUM_PORTS
])
262 u8 tc
, voq
, port_id
, num_tcs_in_port
;
263 u32 usable_blocks
, pure_lb_blocks
, phys_blocks
;
264 for (port_id
= 0; port_id
< max_ports_per_engine
; port_id
++) {
265 if (port_params
[port_id
].active
) {
266 /* subtract headroom blocks */
268 port_params
[port_id
].num_btb_blocks
-
270 /* find blocks per physical TC. use factor to avoid floating arithmethic */
273 for (tc
= 0; tc
< NUM_OF_PHYS_TCS
; tc
++)
274 if (((port_params
[port_id
].active_phys_tcs
>>
278 (usable_blocks
* BTB_PURE_LB_FACTOR
) /
280 BTB_PURE_LB_FACTOR
+ BTB_PURE_LB_RATIO
);
282 OSAL_MAX_T(u32
, BTB_JUMBO_PKT_BLOCKS
,
283 pure_lb_blocks
/ BTB_PURE_LB_FACTOR
);
288 /* init physical TCs */
290 tc
< NUM_OF_PHYS_TCS
;
292 if (((port_params
[port_id
].active_phys_tcs
>>
294 voq
= PHYS_VOQ(port_id
, tc
,
295 max_phys_tcs_per_port
);
297 PBF_BTB_GUARANTEED_RT_OFFSET(voq
),
301 /* init pure LB TC */
303 PBF_BTB_GUARANTEED_RT_OFFSET(
304 LB_VOQ(port_id
)), pure_lb_blocks
);
309 /* Prepare Tx PQ mapping runtime init values for the specified PF */
310 static void ecore_tx_pq_map_rt_init(struct ecore_hwfn
*p_hwfn
,
311 struct ecore_ptt
*p_ptt
,
314 u8 max_phys_tcs_per_port
,
322 u32 base_mem_addr_4kb
,
323 struct init_qm_pq_params
*pq_params
,
324 struct init_qm_vport_params
*vport_params
)
326 u16 i
, pq_id
, pq_group
;
327 u16 num_pqs
= num_pf_pqs
+ num_vf_pqs
;
328 u16 first_pq_group
= start_pq
/ QM_PF_QUEUE_GROUP_SIZE
;
329 u16 last_pq_group
= (start_pq
+ num_pqs
- 1) / QM_PF_QUEUE_GROUP_SIZE
;
330 bool is_bb_a0
= ECORE_IS_BB_A0(p_hwfn
->p_dev
);
331 /* a bit per Tx PQ indicating if the PQ is associated with a VF */
332 u32 tx_pq_vf_mask
[MAX_QM_TX_QUEUES
/ QM_PF_QUEUE_GROUP_SIZE
] = { 0 };
333 u32 tx_pq_vf_mask_width
= is_bb_a0
? 32 : QM_PF_QUEUE_GROUP_SIZE
;
334 u32 num_tx_pq_vf_masks
= MAX_QM_TX_QUEUES
/ tx_pq_vf_mask_width
;
335 u32 pq_mem_4kb
= QM_PQ_MEM_4KB(num_pf_cids
);
336 u32 vport_pq_mem_4kb
= QM_PQ_MEM_4KB(num_vf_cids
);
337 u32 mem_addr_4kb
= base_mem_addr_4kb
;
338 /* set mapping from PQ group to PF */
339 for (pq_group
= first_pq_group
; pq_group
<= last_pq_group
; pq_group
++)
340 STORE_RT_REG(p_hwfn
, QM_REG_PQTX2PF_0_RT_OFFSET
+ pq_group
,
343 STORE_RT_REG(p_hwfn
, QM_REG_MAXPQSIZE_0_RT_OFFSET
,
344 QM_PQ_SIZE_256B(num_pf_cids
));
345 STORE_RT_REG(p_hwfn
, QM_REG_MAXPQSIZE_1_RT_OFFSET
,
346 QM_PQ_SIZE_256B(num_vf_cids
));
347 /* go over all Tx PQs */
348 for (i
= 0, pq_id
= start_pq
; i
< num_pqs
; i
++, pq_id
++) {
349 struct qm_rf_pq_map tx_pq_map
;
351 VOQ(port_id
, pq_params
[i
].tc_id
, max_phys_tcs_per_port
);
352 bool is_vf_pq
= (i
>= num_pf_pqs
);
353 /* added to avoid compilation warning */
354 u32 max_qm_global_rls
= MAX_QM_GLOBAL_RLS
;
355 bool rl_valid
= pq_params
[i
].rl_valid
&&
356 pq_params
[i
].vport_id
< max_qm_global_rls
;
357 /* update first Tx PQ of VPORT/TC */
358 u8 vport_id_in_pf
= pq_params
[i
].vport_id
- start_vport
;
360 vport_params
[vport_id_in_pf
].first_tx_pq_id
[pq_params
[i
].
362 if (first_tx_pq_id
== QM_INVALID_PQ_ID
) {
363 /* create new VP PQ */
364 vport_params
[vport_id_in_pf
].
365 first_tx_pq_id
[pq_params
[i
].tc_id
] = pq_id
;
366 first_tx_pq_id
= pq_id
;
367 /* map VP PQ to VOQ and PF */
369 QM_REG_WFQVPMAP_RT_OFFSET
+ first_tx_pq_id
,
370 (voq
<< QM_WFQ_VP_PQ_VOQ_SHIFT
) | (pf_id
<<
371 QM_WFQ_VP_PQ_PF_SHIFT
));
374 if (pq_params
[i
].rl_valid
&& pq_params
[i
].vport_id
>=
376 DP_NOTICE(p_hwfn
, true,
377 "Invalid VPORT ID for rate limiter config");
378 /* fill PQ map entry */
379 OSAL_MEMSET(&tx_pq_map
, 0, sizeof(tx_pq_map
));
380 SET_FIELD(tx_pq_map
.reg
, QM_RF_PQ_MAP_PQ_VALID
, 1);
381 SET_FIELD(tx_pq_map
.reg
, QM_RF_PQ_MAP_RL_VALID
,
383 SET_FIELD(tx_pq_map
.reg
, QM_RF_PQ_MAP_VP_PQ_ID
, first_tx_pq_id
);
384 SET_FIELD(tx_pq_map
.reg
, QM_RF_PQ_MAP_RL_ID
,
385 rl_valid
? pq_params
[i
].vport_id
: 0);
386 SET_FIELD(tx_pq_map
.reg
, QM_RF_PQ_MAP_VOQ
, voq
);
387 SET_FIELD(tx_pq_map
.reg
, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP
,
388 pq_params
[i
].wrr_group
);
389 /* write PQ map entry to CAM */
390 STORE_RT_REG(p_hwfn
, QM_REG_TXPQMAP_RT_OFFSET
+ pq_id
,
391 *((u32
*)&tx_pq_map
));
392 /* set base address */
393 STORE_RT_REG(p_hwfn
, QM_REG_BASEADDRTXPQ_RT_OFFSET
+ pq_id
,
397 /* if PQ is associated with a VF, add indication to PQ
400 tx_pq_vf_mask
[pq_id
/ tx_pq_vf_mask_width
] |=
401 (1 << (pq_id
% tx_pq_vf_mask_width
));
402 mem_addr_4kb
+= vport_pq_mem_4kb
;
404 mem_addr_4kb
+= pq_mem_4kb
;
407 /* store Tx PQ VF mask to size select register */
408 for (i
= 0; i
< num_tx_pq_vf_masks
; i
++) {
409 if (tx_pq_vf_mask
[i
]) {
411 /* A0-only: perform read-modify-write
415 is_first_pf
? 0 : ecore_rd(p_hwfn
, p_ptt
,
416 QM_REG_MAXPQSIZETXSEL_0
419 QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET
+
420 i
, curr_mask
| tx_pq_vf_mask
[i
]);
423 QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET
+
424 i
, tx_pq_vf_mask
[i
]);
429 /* Prepare Other PQ mapping runtime init values for the specified PF */
430 static void ecore_other_pq_map_rt_init(struct ecore_hwfn
*p_hwfn
,
434 u32 num_tids
, u32 base_mem_addr_4kb
)
437 /* a single other PQ grp is used in each PF, where PQ group i is used in PF i */
439 u16 pq_group
= pf_id
;
440 u32 pq_size
= num_pf_cids
+ num_tids
;
441 u32 pq_mem_4kb
= QM_PQ_MEM_4KB(pq_size
);
442 u32 mem_addr_4kb
= base_mem_addr_4kb
;
443 /* map PQ group to PF */
444 STORE_RT_REG(p_hwfn
, QM_REG_PQOTHER2PF_0_RT_OFFSET
+ pq_group
,
447 STORE_RT_REG(p_hwfn
, QM_REG_MAXPQSIZE_2_RT_OFFSET
,
448 QM_PQ_SIZE_256B(pq_size
));
449 /* set base address */
450 for (i
= 0, pq_id
= pf_id
* QM_PF_QUEUE_GROUP_SIZE
;
451 i
< QM_OTHER_PQS_PER_PF
; i
++, pq_id
++) {
452 STORE_RT_REG(p_hwfn
, QM_REG_BASEADDROTHERPQ_RT_OFFSET
+ pq_id
,
454 mem_addr_4kb
+= pq_mem_4kb
;
457 /* Prepare PF WFQ runtime init values for specified PF. Return -1 on error. */
458 static int ecore_pf_wfq_rt_init(struct ecore_hwfn
*p_hwfn
,
462 u8 max_phys_tcs_per_port
,
464 struct init_qm_pq_params
*pq_params
)
470 MAX_NUM_PFS_BB
? QM_REG_WFQPFCRD_RT_OFFSET
:
471 QM_REG_WFQPFCRD_MSB_RT_OFFSET
) + (pf_id
% MAX_NUM_PFS_BB
);
472 inc_val
= QM_WFQ_INC_VAL(pf_wfq
);
473 if (inc_val
== 0 || inc_val
> QM_WFQ_MAX_INC_VAL
) {
474 DP_NOTICE(p_hwfn
, true, "Invalid PF WFQ weight configuration");
477 for (i
= 0; i
< num_tx_pqs
; i
++) {
479 VOQ(port_id
, pq_params
[i
].tc_id
, max_phys_tcs_per_port
);
480 OVERWRITE_RT_REG(p_hwfn
, crd_reg_offset
+ voq
* MAX_NUM_PFS_BB
,
481 (u32
)QM_WFQ_CRD_REG_SIGN_BIT
);
483 STORE_RT_REG(p_hwfn
, QM_REG_WFQPFUPPERBOUND_RT_OFFSET
+ pf_id
,
484 QM_WFQ_UPPER_BOUND
| (u32
)QM_WFQ_CRD_REG_SIGN_BIT
);
485 STORE_RT_REG(p_hwfn
, QM_REG_WFQPFWEIGHT_RT_OFFSET
+ pf_id
, inc_val
);
488 /* Prepare PF RL runtime init values for specified PF. Return -1 on error. */
489 static int ecore_pf_rl_rt_init(struct ecore_hwfn
*p_hwfn
, u8 pf_id
, u32 pf_rl
)
491 u32 inc_val
= QM_RL_INC_VAL(pf_rl
);
492 if (inc_val
> QM_RL_MAX_INC_VAL
) {
493 DP_NOTICE(p_hwfn
, true, "Invalid PF rate limit configuration");
496 STORE_RT_REG(p_hwfn
, QM_REG_RLPFCRD_RT_OFFSET
+ pf_id
,
497 (u32
)QM_RL_CRD_REG_SIGN_BIT
);
498 STORE_RT_REG(p_hwfn
, QM_REG_RLPFUPPERBOUND_RT_OFFSET
+ pf_id
,
499 QM_RL_UPPER_BOUND
| (u32
)QM_RL_CRD_REG_SIGN_BIT
);
500 STORE_RT_REG(p_hwfn
, QM_REG_RLPFINCVAL_RT_OFFSET
+ pf_id
, inc_val
);
503 /* Prepare VPORT WFQ runtime init values for the specified VPORTs. Return -1 on
506 static int ecore_vp_wfq_rt_init(struct ecore_hwfn
*p_hwfn
,
508 struct init_qm_vport_params
*vport_params
)
512 /* go over all PF VPORTs */
513 for (i
= 0; i
< num_vports
; i
++) {
514 if (vport_params
[i
].vport_wfq
) {
515 inc_val
= QM_WFQ_INC_VAL(vport_params
[i
].vport_wfq
);
516 if (inc_val
> QM_WFQ_MAX_INC_VAL
) {
517 DP_NOTICE(p_hwfn
, true,
518 "Invalid VPORT WFQ weight config");
521 /* each VPORT can have several VPORT PQ IDs for
524 for (tc
= 0; tc
< NUM_OF_TCS
; tc
++) {
526 vport_params
[i
].first_tx_pq_id
[tc
];
527 if (vport_pq_id
!= QM_INVALID_PQ_ID
) {
529 QM_REG_WFQVPCRD_RT_OFFSET
+
531 (u32
)QM_WFQ_CRD_REG_SIGN_BIT
);
533 QM_REG_WFQVPWEIGHT_RT_OFFSET
534 + vport_pq_id
, inc_val
);
542 /* Prepare VPORT RL runtime init values for the specified VPORTs.
543 * Return -1 on error.
545 static int ecore_vport_rl_rt_init(struct ecore_hwfn
*p_hwfn
,
548 struct init_qm_vport_params
*vport_params
)
551 if (start_vport
+ num_vports
>= MAX_QM_GLOBAL_RLS
) {
552 DP_NOTICE(p_hwfn
, true,
553 "Invalid VPORT ID for rate limiter configuration");
556 /* go over all PF VPORTs */
557 for (i
= 0, vport_id
= start_vport
; i
< num_vports
; i
++, vport_id
++) {
558 u32 inc_val
= QM_RL_INC_VAL(vport_params
[i
].vport_rl
);
559 if (inc_val
> QM_RL_MAX_INC_VAL
) {
560 DP_NOTICE(p_hwfn
, true,
561 "Invalid VPORT rate-limit configuration");
564 STORE_RT_REG(p_hwfn
, QM_REG_RLGLBLCRD_RT_OFFSET
+ vport_id
,
565 (u32
)QM_RL_CRD_REG_SIGN_BIT
);
567 QM_REG_RLGLBLUPPERBOUND_RT_OFFSET
+ vport_id
,
568 QM_RL_UPPER_BOUND
| (u32
)QM_RL_CRD_REG_SIGN_BIT
);
569 STORE_RT_REG(p_hwfn
, QM_REG_RLGLBLINCVAL_RT_OFFSET
+ vport_id
,
575 static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn
*p_hwfn
,
576 struct ecore_ptt
*p_ptt
)
579 for (i
= 0, reg_val
= 0; i
< QM_STOP_CMD_MAX_POLL_COUNT
&& reg_val
== 0;
581 OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US
);
582 reg_val
= ecore_rd(p_hwfn
, p_ptt
, QM_REG_SDMCMDREADY
);
584 /* check if timeout while waiting for SDM command ready */
585 if (i
== QM_STOP_CMD_MAX_POLL_COUNT
) {
586 DP_VERBOSE(p_hwfn
, ECORE_MSG_DEBUG
,
587 "Timeout waiting for QM SDM cmd ready signal\n");
593 static bool ecore_send_qm_cmd(struct ecore_hwfn
*p_hwfn
,
594 struct ecore_ptt
*p_ptt
,
595 u32 cmd_addr
, u32 cmd_data_lsb
, u32 cmd_data_msb
)
597 if (!ecore_poll_on_qm_cmd_ready(p_hwfn
, p_ptt
))
599 ecore_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDADDR
, cmd_addr
);
600 ecore_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDDATALSB
, cmd_data_lsb
);
601 ecore_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDDATAMSB
, cmd_data_msb
);
602 ecore_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDGO
, 1);
603 ecore_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDGO
, 0);
604 return ecore_poll_on_qm_cmd_ready(p_hwfn
, p_ptt
);
607 /******************** INTERFACE IMPLEMENTATION *********************/
608 u32
ecore_qm_pf_mem_size(u8 pf_id
,
611 u32 num_tids
, u16 num_pf_pqs
, u16 num_vf_pqs
)
613 return QM_PQ_MEM_4KB(num_pf_cids
) * num_pf_pqs
+
614 QM_PQ_MEM_4KB(num_vf_cids
) * num_vf_pqs
+
615 QM_PQ_MEM_4KB(num_pf_cids
+ num_tids
) * QM_OTHER_PQS_PER_PF
;
618 int ecore_qm_common_rt_init(struct ecore_hwfn
*p_hwfn
,
619 u8 max_ports_per_engine
,
620 u8 max_phys_tcs_per_port
,
625 struct init_qm_port_params
626 port_params
[MAX_NUM_PORTS
])
628 /* init AFullOprtnstcCrdMask */
630 (QM_OPPOR_LINE_VOQ_DEF
<< QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT
) |
631 (QM_BYTE_CRD_EN
<< QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT
) |
632 (pf_wfq_en
<< QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT
) |
633 (vport_wfq_en
<< QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT
) |
634 (pf_rl_en
<< QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT
) |
635 (vport_rl_en
<< QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT
) |
636 (QM_OPPOR_FW_STOP_DEF
<< QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT
) |
637 (QM_OPPOR_PQ_EMPTY_DEF
<<
638 QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT
);
639 STORE_RT_REG(p_hwfn
, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET
, mask
);
640 /* enable/disable PF RL */
641 ecore_enable_pf_rl(p_hwfn
, pf_rl_en
);
642 /* enable/disable PF WFQ */
643 ecore_enable_pf_wfq(p_hwfn
, pf_wfq_en
);
644 /* enable/disable VPORT RL */
645 ecore_enable_vport_rl(p_hwfn
, vport_rl_en
);
646 /* enable/disable VPORT WFQ */
647 ecore_enable_vport_wfq(p_hwfn
, vport_wfq_en
);
648 /* init PBF CMDQ line credit */
649 ecore_cmdq_lines_rt_init(p_hwfn
, max_ports_per_engine
,
650 max_phys_tcs_per_port
, port_params
);
651 /* init BTB blocks in PBF */
652 ecore_btb_blocks_rt_init(p_hwfn
, max_ports_per_engine
,
653 max_phys_tcs_per_port
, port_params
);
657 int ecore_qm_pf_rt_init(struct ecore_hwfn
*p_hwfn
,
658 struct ecore_ptt
*p_ptt
,
661 u8 max_phys_tcs_per_port
,
673 struct init_qm_pq_params
*pq_params
,
674 struct init_qm_vport_params
*vport_params
)
677 u32 other_mem_size_4kb
=
678 QM_PQ_MEM_4KB(num_pf_cids
+ num_tids
) * QM_OTHER_PQS_PER_PF
;
679 /* clear first Tx PQ ID array for each VPORT */
680 for (i
= 0; i
< num_vports
; i
++)
681 for (tc
= 0; tc
< NUM_OF_TCS
; tc
++)
682 vport_params
[i
].first_tx_pq_id
[tc
] = QM_INVALID_PQ_ID
;
683 /* map Other PQs (if any) */
684 #if QM_OTHER_PQS_PER_PF > 0
685 ecore_other_pq_map_rt_init(p_hwfn
, port_id
, pf_id
, num_pf_cids
,
689 ecore_tx_pq_map_rt_init(p_hwfn
, p_ptt
, port_id
, pf_id
,
690 max_phys_tcs_per_port
, is_first_pf
, num_pf_cids
,
691 num_vf_cids
, start_pq
, num_pf_pqs
, num_vf_pqs
,
692 start_vport
, other_mem_size_4kb
, pq_params
,
696 if (ecore_pf_wfq_rt_init
697 (p_hwfn
, port_id
, pf_id
, pf_wfq
, max_phys_tcs_per_port
,
698 num_pf_pqs
+ num_vf_pqs
, pq_params
) != 0)
701 if (ecore_pf_rl_rt_init(p_hwfn
, pf_id
, pf_rl
) != 0)
704 if (ecore_vp_wfq_rt_init(p_hwfn
, num_vports
, vport_params
) != 0)
707 if (ecore_vport_rl_rt_init
708 (p_hwfn
, start_vport
, num_vports
, vport_params
) != 0)
713 int ecore_init_pf_wfq(struct ecore_hwfn
*p_hwfn
,
714 struct ecore_ptt
*p_ptt
, u8 pf_id
, u16 pf_wfq
)
716 u32 inc_val
= QM_WFQ_INC_VAL(pf_wfq
);
717 if (inc_val
== 0 || inc_val
> QM_WFQ_MAX_INC_VAL
) {
718 DP_NOTICE(p_hwfn
, true, "Invalid PF WFQ weight configuration");
721 ecore_wr(p_hwfn
, p_ptt
, QM_REG_WFQPFWEIGHT
+ pf_id
* 4, inc_val
);
725 int ecore_init_pf_rl(struct ecore_hwfn
*p_hwfn
,
726 struct ecore_ptt
*p_ptt
, u8 pf_id
, u32 pf_rl
)
728 u32 inc_val
= QM_RL_INC_VAL(pf_rl
);
729 if (inc_val
> QM_RL_MAX_INC_VAL
) {
730 DP_NOTICE(p_hwfn
, true, "Invalid PF rate limit configuration");
733 ecore_wr(p_hwfn
, p_ptt
, QM_REG_RLPFCRD
+ pf_id
* 4,
734 (u32
)QM_RL_CRD_REG_SIGN_BIT
);
735 ecore_wr(p_hwfn
, p_ptt
, QM_REG_RLPFINCVAL
+ pf_id
* 4, inc_val
);
739 int ecore_init_vport_wfq(struct ecore_hwfn
*p_hwfn
,
740 struct ecore_ptt
*p_ptt
,
741 u16 first_tx_pq_id
[NUM_OF_TCS
], u16 vport_wfq
)
744 u32 inc_val
= QM_WFQ_INC_VAL(vport_wfq
);
745 if (inc_val
== 0 || inc_val
> QM_WFQ_MAX_INC_VAL
) {
746 DP_NOTICE(p_hwfn
, true,
747 "Invalid VPORT WFQ weight configuration");
750 for (tc
= 0; tc
< NUM_OF_TCS
; tc
++) {
751 u16 vport_pq_id
= first_tx_pq_id
[tc
];
752 if (vport_pq_id
!= QM_INVALID_PQ_ID
) {
753 ecore_wr(p_hwfn
, p_ptt
,
754 QM_REG_WFQVPWEIGHT
+ vport_pq_id
* 4, inc_val
);
760 int ecore_init_vport_rl(struct ecore_hwfn
*p_hwfn
,
761 struct ecore_ptt
*p_ptt
, u8 vport_id
, u32 vport_rl
)
763 u32 inc_val
, max_qm_global_rls
= MAX_QM_GLOBAL_RLS
;
764 if (vport_id
>= max_qm_global_rls
) {
765 DP_NOTICE(p_hwfn
, true,
766 "Invalid VPORT ID for rate limiter configuration");
769 inc_val
= QM_RL_INC_VAL(vport_rl
);
770 if (inc_val
> QM_RL_MAX_INC_VAL
) {
771 DP_NOTICE(p_hwfn
, true,
772 "Invalid VPORT rate-limit configuration");
775 ecore_wr(p_hwfn
, p_ptt
, QM_REG_RLGLBLCRD
+ vport_id
* 4,
776 (u32
)QM_RL_CRD_REG_SIGN_BIT
);
777 ecore_wr(p_hwfn
, p_ptt
, QM_REG_RLGLBLINCVAL
+ vport_id
* 4, inc_val
);
781 bool ecore_send_qm_stop_cmd(struct ecore_hwfn
*p_hwfn
,
782 struct ecore_ptt
*p_ptt
,
784 bool is_tx_pq
, u16 start_pq
, u16 num_pqs
)
786 u32 cmd_arr
[QM_CMD_STRUCT_SIZE(QM_STOP_CMD
)] = { 0 };
787 u32 pq_mask
= 0, last_pq
= start_pq
+ num_pqs
- 1, pq_id
;
788 /* set command's PQ type */
789 QM_CMD_SET_FIELD(cmd_arr
, QM_STOP_CMD
, PQ_TYPE
, is_tx_pq
? 0 : 1);
790 /* go over requested PQs */
791 for (pq_id
= start_pq
; pq_id
<= last_pq
; pq_id
++) {
792 /* set PQ bit in mask (stop command only) */
794 pq_mask
|= (1 << (pq_id
% QM_STOP_PQ_MASK_WIDTH
));
795 /* if last PQ or end of PQ mask, write command */
796 if ((pq_id
== last_pq
) ||
797 (pq_id
% QM_STOP_PQ_MASK_WIDTH
==
798 (QM_STOP_PQ_MASK_WIDTH
- 1))) {
799 QM_CMD_SET_FIELD(cmd_arr
, QM_STOP_CMD
, PAUSE_MASK
,
801 QM_CMD_SET_FIELD(cmd_arr
, QM_STOP_CMD
, GROUP_ID
,
802 pq_id
/ QM_STOP_PQ_MASK_WIDTH
);
803 if (!ecore_send_qm_cmd
804 (p_hwfn
, p_ptt
, QM_STOP_CMD_ADDR
, cmd_arr
[0],
813 /* NIG: ETS configuration constants */
814 #define NIG_TX_ETS_CLIENT_OFFSET 4
815 #define NIG_LB_ETS_CLIENT_OFFSET 1
816 #define NIG_ETS_MIN_WFQ_BYTES 1600
817 /* NIG: ETS constants */
818 #define NIG_ETS_UP_BOUND(weight, mtu) \
819 (2 * ((weight) > (mtu) ? (weight) : (mtu)))
820 /* NIG: RL constants */
821 #define NIG_RL_BASE_TYPE 1 /* byte base type */
822 #define NIG_RL_PERIOD 1 /* in us */
823 #define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD)
824 #define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8)
825 #define NIG_RL_MAX_VAL(inc_val, mtu) \
826 (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
827 /* NIG: packet prioritry configuration constants */
828 #define NIG_PRIORITY_MAP_TC_BITS 4
829 void ecore_init_nig_ets(struct ecore_hwfn
*p_hwfn
,
830 struct ecore_ptt
*p_ptt
,
831 struct init_ets_req
*req
, bool is_lb
)
833 u8 tc
, sp_tc_map
= 0, wfq_tc_map
= 0;
834 u8 num_tc
= is_lb
? NUM_OF_TCS
: NUM_OF_PHYS_TCS
;
835 u8 tc_client_offset
=
836 is_lb
? NIG_LB_ETS_CLIENT_OFFSET
: NIG_TX_ETS_CLIENT_OFFSET
;
837 u32 min_weight
= 0xffffffff;
838 u32 tc_weight_base_addr
=
839 is_lb
? NIG_REG_LB_ARB_CREDIT_WEIGHT_0
:
840 NIG_REG_TX_ARB_CREDIT_WEIGHT_0
;
841 u32 tc_weight_addr_diff
=
842 is_lb
? NIG_REG_LB_ARB_CREDIT_WEIGHT_1
-
843 NIG_REG_LB_ARB_CREDIT_WEIGHT_0
: NIG_REG_TX_ARB_CREDIT_WEIGHT_1
-
844 NIG_REG_TX_ARB_CREDIT_WEIGHT_0
;
845 u32 tc_bound_base_addr
=
846 is_lb
? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0
:
847 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0
;
848 u32 tc_bound_addr_diff
=
849 is_lb
? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1
-
850 NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0
:
851 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1
-
852 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0
;
853 for (tc
= 0; tc
< num_tc
; tc
++) {
854 struct init_ets_tc_req
*tc_req
= &req
->tc_req
[tc
];
857 sp_tc_map
|= (1 << tc
);
858 if (tc_req
->use_wfq
) {
860 wfq_tc_map
|= (1 << tc
);
861 /* find minimal weight */
862 if (tc_req
->weight
< min_weight
)
863 min_weight
= tc_req
->weight
;
867 ecore_wr(p_hwfn
, p_ptt
,
868 is_lb
? NIG_REG_LB_ARB_CLIENT_IS_STRICT
:
869 NIG_REG_TX_ARB_CLIENT_IS_STRICT
,
870 (sp_tc_map
<< tc_client_offset
));
872 ecore_wr(p_hwfn
, p_ptt
,
873 is_lb
? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ
:
874 NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ
,
875 (wfq_tc_map
<< tc_client_offset
));
876 /* write WFQ weights */
877 for (tc
= 0; tc
< num_tc
; tc
++, tc_client_offset
++) {
878 struct init_ets_tc_req
*tc_req
= &req
->tc_req
[tc
];
879 if (tc_req
->use_wfq
) {
880 /* translate weight to bytes */
882 (NIG_ETS_MIN_WFQ_BYTES
* tc_req
->weight
) /
884 /* write WFQ weight */
885 ecore_wr(p_hwfn
, p_ptt
,
886 tc_weight_base_addr
+
887 tc_weight_addr_diff
* tc_client_offset
,
889 /* write WFQ upper bound */
890 ecore_wr(p_hwfn
, p_ptt
,
892 tc_bound_addr_diff
* tc_client_offset
,
893 NIG_ETS_UP_BOUND(byte_weight
, req
->mtu
));
898 void ecore_init_nig_lb_rl(struct ecore_hwfn
*p_hwfn
,
899 struct ecore_ptt
*p_ptt
,
900 struct init_nig_lb_rl_req
*req
)
903 u32 ctrl
, inc_val
, reg_offset
;
904 /* disable global MAC+LB RL */
907 NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT
;
908 ecore_wr(p_hwfn
, p_ptt
, NIG_REG_TX_LB_GLBRATELIMIT_CTRL
, ctrl
);
909 /* configure and enable global MAC+LB RL */
910 if (req
->lb_mac_rate
) {
912 ecore_wr(p_hwfn
, p_ptt
, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD
,
913 NIG_RL_PERIOD_CLK_25M
);
914 inc_val
= NIG_RL_INC_VAL(req
->lb_mac_rate
);
915 ecore_wr(p_hwfn
, p_ptt
, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE
,
917 ecore_wr(p_hwfn
, p_ptt
, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE
,
918 NIG_RL_MAX_VAL(inc_val
, req
->mtu
));
922 NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT
;
923 ecore_wr(p_hwfn
, p_ptt
, NIG_REG_TX_LB_GLBRATELIMIT_CTRL
, ctrl
);
925 /* disable global LB-only RL */
928 NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT
;
929 ecore_wr(p_hwfn
, p_ptt
, NIG_REG_LB_BRBRATELIMIT_CTRL
, ctrl
);
930 /* configure and enable global LB-only RL */
933 ecore_wr(p_hwfn
, p_ptt
, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD
,
934 NIG_RL_PERIOD_CLK_25M
);
935 inc_val
= NIG_RL_INC_VAL(req
->lb_rate
);
936 ecore_wr(p_hwfn
, p_ptt
, NIG_REG_LB_BRBRATELIMIT_INC_VALUE
,
938 ecore_wr(p_hwfn
, p_ptt
, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE
,
939 NIG_RL_MAX_VAL(inc_val
, req
->mtu
));
942 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT
;
943 ecore_wr(p_hwfn
, p_ptt
, NIG_REG_LB_BRBRATELIMIT_CTRL
, ctrl
);
946 for (tc
= 0, reg_offset
= 0; tc
< NUM_OF_PHYS_TCS
;
947 tc
++, reg_offset
+= 4) {
951 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT
;
952 ecore_wr(p_hwfn
, p_ptt
,
953 NIG_REG_LB_TCRATELIMIT_CTRL_0
+ reg_offset
, ctrl
);
954 /* configure and enable TC RL */
955 if (req
->tc_rate
[tc
]) {
957 ecore_wr(p_hwfn
, p_ptt
,
958 NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0
+
959 reg_offset
, NIG_RL_PERIOD_CLK_25M
);
960 inc_val
= NIG_RL_INC_VAL(req
->tc_rate
[tc
]);
961 ecore_wr(p_hwfn
, p_ptt
,
962 NIG_REG_LB_TCRATELIMIT_INC_VALUE_0
+
963 reg_offset
, inc_val
);
964 ecore_wr(p_hwfn
, p_ptt
,
965 NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0
+
966 reg_offset
, NIG_RL_MAX_VAL(inc_val
, req
->mtu
));
970 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT
;
971 ecore_wr(p_hwfn
, p_ptt
,
972 NIG_REG_LB_TCRATELIMIT_CTRL_0
+ reg_offset
,
978 void ecore_init_nig_pri_tc_map(struct ecore_hwfn
*p_hwfn
,
979 struct ecore_ptt
*p_ptt
,
980 struct init_nig_pri_tc_map_req
*req
)
984 u8 tc_pri_mask
[NUM_OF_PHYS_TCS
] = { 0 };
985 for (pri
= 0; pri
< NUM_OF_VLAN_PRIORITIES
; pri
++) {
986 if (req
->pri
[pri
].valid
) {
989 tc_id
<< (pri
* NIG_PRIORITY_MAP_TC_BITS
));
990 tc_pri_mask
[req
->pri
[pri
].tc_id
] |= (1 << pri
);
993 /* write priority -> TC mask */
994 ecore_wr(p_hwfn
, p_ptt
, NIG_REG_PKT_PRIORITY_TO_TC
, pri_tc_mask
);
995 /* write TC -> priority mask */
996 for (tc
= 0; tc
< NUM_OF_PHYS_TCS
; tc
++) {
997 ecore_wr(p_hwfn
, p_ptt
, NIG_REG_PRIORITY_FOR_TC_0
+ tc
* 4,
999 ecore_wr(p_hwfn
, p_ptt
, NIG_REG_RX_TC0_PRIORITY_MASK
+ tc
* 4,
1004 /* PRS: ETS configuration constants */
1005 #define PRS_ETS_MIN_WFQ_BYTES 1600
1006 #define PRS_ETS_UP_BOUND(weight, mtu) \
1007 (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1008 void ecore_init_prs_ets(struct ecore_hwfn
*p_hwfn
,
1009 struct ecore_ptt
*p_ptt
, struct init_ets_req
*req
)
1011 u8 tc
, sp_tc_map
= 0, wfq_tc_map
= 0;
1012 u32 min_weight
= 0xffffffff;
1013 u32 tc_weight_addr_diff
=
1014 PRS_REG_ETS_ARB_CREDIT_WEIGHT_1
- PRS_REG_ETS_ARB_CREDIT_WEIGHT_0
;
1015 u32 tc_bound_addr_diff
=
1016 PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1
-
1017 PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0
;
1018 for (tc
= 0; tc
< NUM_OF_TCS
; tc
++) {
1019 struct init_ets_tc_req
*tc_req
= &req
->tc_req
[tc
];
1022 sp_tc_map
|= (1 << tc
);
1023 if (tc_req
->use_wfq
) {
1024 /* update WFQ map */
1025 wfq_tc_map
|= (1 << tc
);
1026 /* find minimal weight */
1027 if (tc_req
->weight
< min_weight
)
1028 min_weight
= tc_req
->weight
;
1032 ecore_wr(p_hwfn
, p_ptt
, PRS_REG_ETS_ARB_CLIENT_IS_STRICT
, sp_tc_map
);
1034 ecore_wr(p_hwfn
, p_ptt
, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ
,
1036 /* write WFQ weights */
1037 for (tc
= 0; tc
< NUM_OF_TCS
; tc
++) {
1038 struct init_ets_tc_req
*tc_req
= &req
->tc_req
[tc
];
1039 if (tc_req
->use_wfq
) {
1040 /* translate weight to bytes */
1042 (PRS_ETS_MIN_WFQ_BYTES
* tc_req
->weight
) /
1044 /* write WFQ weight */
1045 ecore_wr(p_hwfn
, p_ptt
,
1046 PRS_REG_ETS_ARB_CREDIT_WEIGHT_0
+
1047 tc
* tc_weight_addr_diff
, byte_weight
);
1048 /* write WFQ upper bound */
1049 ecore_wr(p_hwfn
, p_ptt
,
1050 PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0
+
1051 tc
* tc_bound_addr_diff
,
1052 PRS_ETS_UP_BOUND(byte_weight
, req
->mtu
));
1057 /* BRB: RAM configuration constants */
1058 #define BRB_TOTAL_RAM_BLOCKS_BB 4800
1059 #define BRB_TOTAL_RAM_BLOCKS_K2 5632
1060 #define BRB_BLOCK_SIZE 128 /* in bytes */
1061 #define BRB_MIN_BLOCKS_PER_TC 9
1062 #define BRB_HYST_BYTES 10240
1063 #define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
1065 * temporary big RAM allocation - should be updated
1067 void ecore_init_brb_ram(struct ecore_hwfn
*p_hwfn
,
1068 struct ecore_ptt
*p_ptt
, struct init_brb_ram_req
*req
)
1070 u8 port
, active_ports
= 0;
1071 u32 active_port_blocks
, reg_offset
= 0;
1072 u32 tc_headroom_blocks
=
1073 (u32
)DIV_ROUND_UP(req
->headroom_per_tc
, BRB_BLOCK_SIZE
);
1074 u32 min_pkt_size_blocks
=
1075 (u32
)DIV_ROUND_UP(req
->min_pkt_size
, BRB_BLOCK_SIZE
);
1077 ECORE_IS_K2(p_hwfn
->
1078 p_dev
) ? BRB_TOTAL_RAM_BLOCKS_K2
:
1079 BRB_TOTAL_RAM_BLOCKS_BB
;
1080 /* find number of active ports */
1081 for (port
= 0; port
< MAX_NUM_PORTS
; port
++)
1082 if (req
->num_active_tcs
[port
])
1084 active_port_blocks
= (u32
)(total_blocks
/ active_ports
);
1085 for (port
= 0; port
< req
->max_ports_per_engine
; port
++) {
1086 /* calculate per-port sizes */
1087 u32 tc_guaranteed_blocks
=
1088 (u32
)DIV_ROUND_UP(req
->guranteed_per_tc
, BRB_BLOCK_SIZE
);
1090 req
->num_active_tcs
[port
] ? active_port_blocks
: 0;
1091 u32 port_guaranteed_blocks
=
1092 req
->num_active_tcs
[port
] * tc_guaranteed_blocks
;
1093 u32 port_shared_blocks
= port_blocks
- port_guaranteed_blocks
;
1095 req
->num_active_tcs
[port
] * BRB_MIN_BLOCKS_PER_TC
;
1096 u32 full_xon_th
= full_xoff_th
+ min_pkt_size_blocks
;
1097 u32 pause_xoff_th
= tc_headroom_blocks
;
1098 u32 pause_xon_th
= pause_xoff_th
+ min_pkt_size_blocks
;
1100 /* init total size per port */
1101 ecore_wr(p_hwfn
, p_ptt
, BRB_REG_TOTAL_MAC_SIZE
+ port
* 4,
1103 /* init shared size per port */
1104 ecore_wr(p_hwfn
, p_ptt
, BRB_REG_SHARED_HR_AREA
+ port
* 4,
1105 port_shared_blocks
);
1106 for (tc
= 0; tc
< NUM_OF_TCS
; tc
++, reg_offset
+= 4) {
1107 /* clear init values for non-active TCs */
1108 if (tc
== req
->num_active_tcs
[port
]) {
1109 tc_guaranteed_blocks
= 0;
1115 /* init guaranteed size per TC */
1116 ecore_wr(p_hwfn
, p_ptt
,
1117 BRB_REG_TC_GUARANTIED_0
+ reg_offset
,
1118 tc_guaranteed_blocks
);
1119 ecore_wr(p_hwfn
, p_ptt
,
1120 BRB_REG_MAIN_TC_GUARANTIED_HYST_0
+ reg_offset
,
1122 /* init pause/full thresholds per physical TC - for loopback traffic */
1124 ecore_wr(p_hwfn
, p_ptt
,
1125 BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0
+
1126 reg_offset
, full_xoff_th
);
1127 ecore_wr(p_hwfn
, p_ptt
,
1128 BRB_REG_LB_TC_FULL_XON_THRESHOLD_0
+
1129 reg_offset
, full_xon_th
);
1130 ecore_wr(p_hwfn
, p_ptt
,
1131 BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0
+
1132 reg_offset
, pause_xoff_th
);
1133 ecore_wr(p_hwfn
, p_ptt
,
1134 BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0
+
1135 reg_offset
, pause_xon_th
);
1136 /* init pause/full thresholds per physical TC - for main traffic */
1137 ecore_wr(p_hwfn
, p_ptt
,
1138 BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0
+
1139 reg_offset
, full_xoff_th
);
1140 ecore_wr(p_hwfn
, p_ptt
,
1141 BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0
+
1142 reg_offset
, full_xon_th
);
1143 ecore_wr(p_hwfn
, p_ptt
,
1144 BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0
+
1145 reg_offset
, pause_xoff_th
);
1146 ecore_wr(p_hwfn
, p_ptt
,
1147 BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0
+
1148 reg_offset
, pause_xon_th
);
1153 /*In MF should be called once per engine to set EtherType of OuterTag*/
1154 void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn
*p_hwfn
,
1155 struct ecore_ptt
*p_ptt
, u32 ethType
)
1157 /* update PRS register */
1158 STORE_RT_REG(p_hwfn
, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET
, ethType
);
1159 /* update NIG register */
1160 STORE_RT_REG(p_hwfn
, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET
, ethType
);
1161 /* update PBF register */
1162 STORE_RT_REG(p_hwfn
, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET
, ethType
);
1165 /*In MF should be called once per port to set EtherType of OuterTag*/
1166 void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn
*p_hwfn
,
1167 struct ecore_ptt
*p_ptt
, u32 ethType
)
1169 /* update DORQ register */
1170 STORE_RT_REG(p_hwfn
, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET
, ethType
);
1173 #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
1174 (var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
1175 #define PRS_ETH_TUNN_FIC_FORMAT -188897008
1176 void ecore_set_vxlan_dest_port(struct ecore_hwfn
*p_hwfn
,
1177 struct ecore_ptt
*p_ptt
, u16 dest_port
)
1179 /* update PRS register */
1180 ecore_wr(p_hwfn
, p_ptt
, PRS_REG_VXLAN_PORT
, dest_port
);
1181 /* update NIG register */
1182 ecore_wr(p_hwfn
, p_ptt
, NIG_REG_VXLAN_CTRL
, dest_port
);
1183 /* update PBF register */
1184 ecore_wr(p_hwfn
, p_ptt
, PBF_REG_VXLAN_PORT
, dest_port
);
1187 void ecore_set_vxlan_enable(struct ecore_hwfn
*p_hwfn
,
1188 struct ecore_ptt
*p_ptt
, bool vxlan_enable
)
1191 /* update PRS register */
1192 reg_val
= ecore_rd(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
);
1193 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val
,
1194 PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT
,
1196 ecore_wr(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
, reg_val
);
1198 ecore_wr(p_hwfn
, p_ptt
, PRS_REG_OUTPUT_FORMAT_4_0
,
1199 (u32
)PRS_ETH_TUNN_FIC_FORMAT
);
1201 /* update NIG register */
1202 reg_val
= ecore_rd(p_hwfn
, p_ptt
, NIG_REG_ENC_TYPE_ENABLE
);
1203 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val
,
1204 NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT
,
1206 ecore_wr(p_hwfn
, p_ptt
, NIG_REG_ENC_TYPE_ENABLE
, reg_val
);
1207 /* update DORQ register */
1208 ecore_wr(p_hwfn
, p_ptt
, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN
,
1209 vxlan_enable
? 1 : 0);
1212 void ecore_set_gre_enable(struct ecore_hwfn
*p_hwfn
,
1213 struct ecore_ptt
*p_ptt
,
1214 bool eth_gre_enable
, bool ip_gre_enable
)
1217 /* update PRS register */
1218 reg_val
= ecore_rd(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
);
1219 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val
,
1220 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT
,
1222 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val
,
1223 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT
,
1225 ecore_wr(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
, reg_val
);
1227 ecore_wr(p_hwfn
, p_ptt
, PRS_REG_OUTPUT_FORMAT_4_0
,
1228 (u32
)PRS_ETH_TUNN_FIC_FORMAT
);
1230 /* update NIG register */
1231 reg_val
= ecore_rd(p_hwfn
, p_ptt
, NIG_REG_ENC_TYPE_ENABLE
);
1232 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val
,
1233 NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT
,
1235 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val
,
1236 NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT
,
1238 ecore_wr(p_hwfn
, p_ptt
, NIG_REG_ENC_TYPE_ENABLE
, reg_val
);
1239 /* update DORQ registers */
1240 ecore_wr(p_hwfn
, p_ptt
, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN
,
1241 eth_gre_enable
? 1 : 0);
1242 ecore_wr(p_hwfn
, p_ptt
, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN
,
1243 ip_gre_enable
? 1 : 0);
1246 void ecore_set_geneve_dest_port(struct ecore_hwfn
*p_hwfn
,
1247 struct ecore_ptt
*p_ptt
, u16 dest_port
)
1249 /* geneve tunnel not supported in BB_A0 */
1250 if (ECORE_IS_BB_A0(p_hwfn
->p_dev
))
1252 /* update PRS register */
1253 ecore_wr(p_hwfn
, p_ptt
, PRS_REG_NGE_PORT
, dest_port
);
1254 /* update NIG register */
1255 ecore_wr(p_hwfn
, p_ptt
, NIG_REG_NGE_PORT
, dest_port
);
1256 /* update PBF register */
1257 ecore_wr(p_hwfn
, p_ptt
, PBF_REG_NGE_PORT
, dest_port
);
1260 void ecore_set_geneve_enable(struct ecore_hwfn
*p_hwfn
,
1261 struct ecore_ptt
*p_ptt
,
1262 bool eth_geneve_enable
, bool ip_geneve_enable
)
1265 /* geneve tunnel not supported in BB_A0 */
1266 if (ECORE_IS_BB_A0(p_hwfn
->p_dev
))
1268 /* update PRS register */
1269 reg_val
= ecore_rd(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
);
1270 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val
,
1271 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT
,
1273 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val
,
1274 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT
,
1276 ecore_wr(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
, reg_val
);
1278 ecore_wr(p_hwfn
, p_ptt
, PRS_REG_OUTPUT_FORMAT_4_0
,
1279 (u32
)PRS_ETH_TUNN_FIC_FORMAT
);
1281 /* update NIG register */
1282 ecore_wr(p_hwfn
, p_ptt
, NIG_REG_NGE_ETH_ENABLE
,
1283 eth_geneve_enable
? 1 : 0);
1284 ecore_wr(p_hwfn
, p_ptt
, NIG_REG_NGE_IP_ENABLE
,
1285 ip_geneve_enable
? 1 : 0);
1287 reg_val
= (ip_geneve_enable
|| eth_geneve_enable
) ? 1 : 0;
1288 ecore_wr(p_hwfn
, p_ptt
, NIG_REG_NGE_COMP_VER
, reg_val
);
1289 ecore_wr(p_hwfn
, p_ptt
, PBF_REG_NGE_COMP_VER
, reg_val
);
1290 ecore_wr(p_hwfn
, p_ptt
, PRS_REG_NGE_COMP_VER
, reg_val
);
1291 /* EDPM with geneve tunnel not supported in BB_B0 */
1292 if (ECORE_IS_BB_B0(p_hwfn
->p_dev
))
1294 /* update DORQ registers */
1295 ecore_wr(p_hwfn
, p_ptt
, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN
,
1296 eth_geneve_enable
? 1 : 0);
1297 ecore_wr(p_hwfn
, p_ptt
, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN
,
1298 ip_geneve_enable
? 1 : 0);
1301 #define T_ETH_PACKET_ACTION_GFT_EVENTID 23
1302 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
1303 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
1304 #define PARSER_ETH_CONN_CM_HDR (0x0)
1305 #define CAM_LINE_SIZE sizeof(u32)
1306 #define RAM_LINE_SIZE sizeof(u64)
1307 #define REG_SIZE sizeof(u32)
1309 void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn
*p_hwfn
,
1310 struct ecore_ptt
*p_ptt
)
1312 /* set RFS event ID to be awakened i Tstorm By Prs */
1313 u32 rfs_cm_hdr_event_id
= ecore_rd(p_hwfn
, p_ptt
, PRS_REG_CM_HDR_GFT
);
1314 rfs_cm_hdr_event_id
|= T_ETH_PACKET_ACTION_GFT_EVENTID
<<
1315 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT
;
1316 rfs_cm_hdr_event_id
|= PARSER_ETH_CONN_GFT_ACTION_CM_HDR
<<
1317 PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT
;
1318 ecore_wr(p_hwfn
, p_ptt
, PRS_REG_CM_HDR_GFT
, rfs_cm_hdr_event_id
);
1321 void ecore_set_rfs_mode_enable(struct ecore_hwfn
*p_hwfn
,
1322 struct ecore_ptt
*p_ptt
,
1329 u32 rfs_cm_hdr_event_id
= ecore_rd(p_hwfn
, p_ptt
, PRS_REG_CM_HDR_GFT
);
1330 union gft_cam_line_union camLine
;
1331 struct gft_ram_line ramLine
;
1332 u32
*ramLinePointer
= (u32
*)&ramLine
;
1335 DP_NOTICE(p_hwfn
, true,
1336 "set_rfs_mode_enable: must accept at "
1337 "least on of - ipv4 or ipv6");
1339 DP_NOTICE(p_hwfn
, true,
1340 "set_rfs_mode_enable: must accept at "
1341 "least on of - udp or tcp");
1342 /* set RFS event ID to be awakened i Tstorm By Prs */
1343 rfs_cm_hdr_event_id
|= T_ETH_PACKET_MATCH_RFS_EVENTID
<<
1344 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT
;
1345 rfs_cm_hdr_event_id
|= PARSER_ETH_CONN_CM_HDR
<<
1346 PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT
;
1347 ecore_wr(p_hwfn
, p_ptt
, PRS_REG_CM_HDR_GFT
, rfs_cm_hdr_event_id
);
1348 /* Configure Registers for RFS mode */
1349 /* enable gft search */
1350 ecore_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_GFT
, 1);
1351 ecore_wr(p_hwfn
, p_ptt
, PRS_REG_LOAD_L2_FILTER
, 0); /* do not load
1355 camLine
.cam_line_mapped
.camline
= 0;
1356 /* cam line is now valid!! */
1357 SET_FIELD(camLine
.cam_line_mapped
.camline
,
1358 GFT_CAM_LINE_MAPPED_VALID
, 1);
1359 /* filters are per PF!! */
1360 SET_FIELD(camLine
.cam_line_mapped
.camline
,
1361 GFT_CAM_LINE_MAPPED_PF_ID_MASK
, 1);
1362 SET_FIELD(camLine
.cam_line_mapped
.camline
,
1363 GFT_CAM_LINE_MAPPED_PF_ID
, pf_id
);
1364 if (!(tcp
&& udp
)) {
1365 SET_FIELD(camLine
.cam_line_mapped
.camline
,
1366 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK
, 1);
1368 SET_FIELD(camLine
.cam_line_mapped
.camline
,
1369 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE
,
1370 GFT_PROFILE_TCP_PROTOCOL
);
1372 SET_FIELD(camLine
.cam_line_mapped
.camline
,
1373 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE
,
1374 GFT_PROFILE_UDP_PROTOCOL
);
1376 if (!(ipv4
&& ipv6
)) {
1377 SET_FIELD(camLine
.cam_line_mapped
.camline
,
1378 GFT_CAM_LINE_MAPPED_IP_VERSION_MASK
, 1);
1380 SET_FIELD(camLine
.cam_line_mapped
.camline
,
1381 GFT_CAM_LINE_MAPPED_IP_VERSION
,
1384 SET_FIELD(camLine
.cam_line_mapped
.camline
,
1385 GFT_CAM_LINE_MAPPED_IP_VERSION
,
1388 /* write characteristics to cam */
1389 ecore_wr(p_hwfn
, p_ptt
, PRS_REG_GFT_CAM
+ CAM_LINE_SIZE
* pf_id
,
1390 camLine
.cam_line_mapped
.camline
);
1391 camLine
.cam_line_mapped
.camline
=
1392 ecore_rd(p_hwfn
, p_ptt
, PRS_REG_GFT_CAM
+ CAM_LINE_SIZE
* pf_id
);
1393 /* write line to RAM - compare to filter 4 tuple */
1394 ramLine
.low32bits
= 0;
1395 ramLine
.high32bits
= 0;
1396 SET_FIELD(ramLine
.high32bits
, GFT_RAM_LINE_DST_IP
, 1);
1397 SET_FIELD(ramLine
.high32bits
, GFT_RAM_LINE_SRC_IP
, 1);
1398 SET_FIELD(ramLine
.low32bits
, GFT_RAM_LINE_SRC_PORT
, 1);
1399 SET_FIELD(ramLine
.low32bits
, GFT_RAM_LINE_DST_PORT
, 1);
1400 /* each iteration write to reg */
1401 for (i
= 0; i
< RAM_LINE_SIZE
/ REG_SIZE
; i
++)
1402 ecore_wr(p_hwfn
, p_ptt
, PRS_REG_GFT_PROFILE_MASK_RAM
+
1403 RAM_LINE_SIZE
* pf_id
+
1404 i
* REG_SIZE
, *(ramLinePointer
+ i
));
1405 /* set default profile so that no filter match will happen */
1406 ramLine
.low32bits
= 0xffff;
1407 ramLine
.high32bits
= 0xffff;
1408 for (i
= 0; i
< RAM_LINE_SIZE
/ REG_SIZE
; i
++)
1409 ecore_wr(p_hwfn
, p_ptt
, PRS_REG_GFT_PROFILE_MASK_RAM
+
1410 RAM_LINE_SIZE
* PRS_GFT_CAM_LINES_NO_MATCH
+
1411 i
* REG_SIZE
, *(ramLinePointer
+ i
));
1414 /* Configure VF zone size mode*/
1415 void ecore_config_vf_zone_size_mode(struct ecore_hwfn
*p_hwfn
,
1416 struct ecore_ptt
*p_ptt
, u16 mode
,
1419 u32 msdm_vf_size_log
= MSTORM_VF_ZONE_DEFAULT_SIZE_LOG
;
1420 u32 msdm_vf_offset_mask
;
1421 if (mode
== VF_ZONE_SIZE_MODE_DOUBLE
)
1422 msdm_vf_size_log
+= 1;
1423 else if (mode
== VF_ZONE_SIZE_MODE_QUAD
)
1424 msdm_vf_size_log
+= 2;
1425 msdm_vf_offset_mask
= (1 << msdm_vf_size_log
) - 1;
1427 STORE_RT_REG(p_hwfn
,
1428 PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET
,
1430 STORE_RT_REG(p_hwfn
,
1431 PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET
,
1432 msdm_vf_offset_mask
);
1434 ecore_wr(p_hwfn
, p_ptt
,
1435 PGLUE_B_REG_MSDM_VF_SHIFT_B
, msdm_vf_size_log
);
1436 ecore_wr(p_hwfn
, p_ptt
,
1437 PGLUE_B_REG_MSDM_OFFSET_MASK_B
, msdm_vf_offset_mask
);
1441 /* get mstorm statistics for offset by VF zone size mode*/
1442 u32
ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn
*p_hwfn
,
1444 u16 vf_zone_size_mode
)
1446 u32 offset
= MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id
);
1447 if ((vf_zone_size_mode
!= VF_ZONE_SIZE_MODE_DEFAULT
) &&
1448 (stat_cnt_id
> MAX_NUM_PFS
)) {
1449 if (vf_zone_size_mode
== VF_ZONE_SIZE_MODE_DOUBLE
)
1450 offset
+= (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG
) *
1451 (stat_cnt_id
- MAX_NUM_PFS
);
1452 else if (vf_zone_size_mode
== VF_ZONE_SIZE_MODE_QUAD
)
1453 offset
+= 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG
) *
1454 (stat_cnt_id
- MAX_NUM_PFS
);
1459 /* get mstorm VF producer offset by VF zone size mode*/
1460 u32
ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn
*p_hwfn
,
1463 u16 vf_zone_size_mode
)
1465 u32 offset
= MSTORM_ETH_VF_PRODS_OFFSET(vf_id
, vf_queue_id
);
1466 if (vf_zone_size_mode
!= VF_ZONE_SIZE_MODE_DEFAULT
) {
1467 if (vf_zone_size_mode
== VF_ZONE_SIZE_MODE_DOUBLE
)
1468 offset
+= (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG
) *
1470 else if (vf_zone_size_mode
== VF_ZONE_SIZE_MODE_QUAD
)
1471 offset
+= 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG
) *