]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
Merge remote-tracking branch 'regulator/fix/max77802' into regulator-linus
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / qlogic / qed / qed_init_fw_funcs.c
CommitLineData
fe56b9e6 1/* QLogic qed NIC Driver
e8f1cb50 2 * Copyright (c) 2015-2017 QLogic Corporation
fe56b9e6 3 *
e8f1cb50
MY
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
fe56b9e6
YM
31 */
32
33#include <linux/types.h>
34#include <linux/delay.h>
35#include <linux/kernel.h>
36#include <linux/slab.h>
37#include <linux/string.h>
38#include "qed_hsi.h"
39#include "qed_hw.h"
40#include "qed_init_ops.h"
41#include "qed_reg_addr.h"
42
43enum cminterface {
44 MCM_SEC,
45 MCM_PRI,
46 UCM_SEC,
47 UCM_PRI,
48 TCM_SEC,
49 TCM_PRI,
50 YCM_SEC,
51 YCM_PRI,
52 XCM_SEC,
53 XCM_PRI,
54 NUM_OF_CM_INTERFACES
55};
56
57/* general constants */
fe56b9e6
YM
58#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
59 QM_PQ_ELEMENT_SIZE, \
60 0x1000) : 0)
61#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
62 0x100) - 1 : 0)
63#define QM_INVALID_PQ_ID 0xffff
64/* feature enable */
65#define QM_BYPASS_EN 1
66#define QM_BYTE_CRD_EN 1
67/* other PQ constants */
68#define QM_OTHER_PQS_PER_PF 4
69/* WFQ constants */
351a4ded 70#define QM_WFQ_UPPER_BOUND 62500000
fe56b9e6
YM
71#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
72#define QM_WFQ_VP_PQ_PF_SHIFT 5
73#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
351a4ded
YM
74#define QM_WFQ_MAX_INC_VAL 43750000
75
fe56b9e6 76/* RL constants */
351a4ded 77#define QM_RL_UPPER_BOUND 62500000
fe56b9e6
YM
78#define QM_RL_PERIOD 5 /* in us */
79#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
351a4ded 80#define QM_RL_MAX_INC_VAL 43750000
fe56b9e6 81#define QM_RL_INC_VAL(rate) max_t(u32, \
351a4ded
YM
82 (u32)(((rate ? rate : \
83 1000000) * \
84 QM_RL_PERIOD * \
85 101) / (8 * 100)), 1)
fe56b9e6
YM
86/* AFullOprtnstcCrdMask constants */
87#define QM_OPPOR_LINE_VOQ_DEF 1
88#define QM_OPPOR_FW_STOP_DEF 0
89#define QM_OPPOR_PQ_EMPTY_DEF 1
fe56b9e6
YM
90/* Command Queue constants */
91#define PBF_CMDQ_PURE_LB_LINES 150
fe56b9e6
YM
92#define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \
93 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
94 (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
95 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
96#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) ( \
97 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
98 (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
99 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
100#define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - \
101 4) * \
102 2) | QM_LINE_CRD_REG_SIGN_BIT)
103/* BTB: blocks constants (block size = 256B) */
104#define BTB_JUMBO_PKT_BLOCKS 38
105#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
fe56b9e6
YM
106#define BTB_PURE_LB_FACTOR 10
107#define BTB_PURE_LB_RATIO 7
108/* QM stop command constants */
109#define QM_STOP_PQ_MASK_WIDTH 32
110#define QM_STOP_CMD_ADDR 0x2
111#define QM_STOP_CMD_STRUCT_SIZE 2
112#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
113#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
114#define QM_STOP_CMD_PAUSE_MASK_MASK -1
115#define QM_STOP_CMD_GROUP_ID_OFFSET 1
116#define QM_STOP_CMD_GROUP_ID_SHIFT 16
117#define QM_STOP_CMD_GROUP_ID_MASK 15
118#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
119#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
120#define QM_STOP_CMD_PQ_TYPE_MASK 1
121#define QM_STOP_CMD_MAX_POLL_COUNT 100
122#define QM_STOP_CMD_POLL_PERIOD_US 500
123/* QM command macros */
124#define QM_CMD_STRUCT_SIZE(cmd) cmd ## \
125 _STRUCT_SIZE
126#define QM_CMD_SET_FIELD(var, cmd, field, \
127 value) SET_FIELD(var[cmd ## _ ## field ## \
128 _OFFSET], \
129 cmd ## _ ## field, \
130 value)
131/* QM: VOQ macros */
351a4ded
YM
132#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) * \
133 (max_phys_tcs_per_port) + \
134 (tc))
fe56b9e6
YM
135#define LB_VOQ(port) ( \
136 MAX_PHYS_VOQS + (port))
137#define VOQ(port, tc, max_phy_tcs_pr_port) \
138 ((tc) < \
139 LB_TC ? PHYS_VOQ(port, \
140 tc, \
141 max_phy_tcs_pr_port) \
142 : LB_VOQ(port))
143/******************** INTERNAL IMPLEMENTATION *********************/
144/* Prepare PF RL enable/disable runtime init values */
351a4ded 145static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
fe56b9e6
YM
146{
147 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
148 if (pf_rl_en) {
149 /* enable RLs for all VOQs */
150 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
151 (1 << MAX_NUM_VOQS) - 1);
152 /* write RL period */
153 STORE_RT_REG(p_hwfn,
351a4ded 154 QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
fe56b9e6
YM
155 STORE_RT_REG(p_hwfn,
156 QM_REG_RLPFPERIODTIMER_RT_OFFSET,
157 QM_RL_PERIOD_CLK_25M);
158 /* set credit threshold for QM bypass flow */
159 if (QM_BYPASS_EN)
160 STORE_RT_REG(p_hwfn,
161 QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
162 QM_RL_UPPER_BOUND);
163 }
164}
165
166/* Prepare PF WFQ enable/disable runtime init values */
351a4ded 167static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
fe56b9e6
YM
168{
169 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
170 /* set credit threshold for QM bypass flow */
171 if (pf_wfq_en && QM_BYPASS_EN)
172 STORE_RT_REG(p_hwfn,
173 QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
174 QM_WFQ_UPPER_BOUND);
175}
176
177/* Prepare VPORT RL enable/disable runtime init values */
351a4ded 178static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
fe56b9e6
YM
179{
180 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
181 vport_rl_en ? 1 : 0);
182 if (vport_rl_en) {
183 /* write RL period (use timer 0 only) */
184 STORE_RT_REG(p_hwfn,
185 QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
186 QM_RL_PERIOD_CLK_25M);
187 STORE_RT_REG(p_hwfn,
188 QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
189 QM_RL_PERIOD_CLK_25M);
190 /* set credit threshold for QM bypass flow */
191 if (QM_BYPASS_EN)
192 STORE_RT_REG(p_hwfn,
193 QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
194 QM_RL_UPPER_BOUND);
195 }
196}
197
198/* Prepare VPORT WFQ enable/disable runtime init values */
351a4ded 199static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
fe56b9e6
YM
200{
201 STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
202 vport_wfq_en ? 1 : 0);
203 /* set credit threshold for QM bypass flow */
204 if (vport_wfq_en && QM_BYPASS_EN)
205 STORE_RT_REG(p_hwfn,
206 QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
207 QM_WFQ_UPPER_BOUND);
208}
209
210/* Prepare runtime init values to allocate PBF command queue lines for
211 * the specified VOQ
212 */
213static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
351a4ded 214 u8 voq, u16 cmdq_lines)
fe56b9e6
YM
215{
216 u32 qm_line_crd;
217
fe56b9e6
YM
218 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
219 OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
220 (u32)cmdq_lines);
221 STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
222 STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
223 qm_line_crd);
224}
225
226/* Prepare runtime init values to allocate PBF command queue lines. */
227static void qed_cmdq_lines_rt_init(
228 struct qed_hwfn *p_hwfn,
229 u8 max_ports_per_engine,
230 u8 max_phys_tcs_per_port,
231 struct init_qm_port_params port_params[MAX_NUM_PORTS])
232{
351a4ded 233 u8 tc, voq, port_id, num_tcs_in_port;
fe56b9e6
YM
234
235 /* clear PBF lines for all VOQs */
236 for (voq = 0; voq < MAX_NUM_VOQS; voq++)
237 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
238 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
239 if (port_params[port_id].active) {
240 u16 phys_lines, phys_lines_per_tc;
fe56b9e6 241
351a4ded 242 /* find #lines to divide between active phys TCs */
fe56b9e6
YM
243 phys_lines = port_params[port_id].num_pbf_cmd_lines -
244 PBF_CMDQ_PURE_LB_LINES;
245 /* find #lines per active physical TC */
351a4ded
YM
246 num_tcs_in_port = 0;
247 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
248 if (((port_params[port_id].active_phys_tcs >>
249 tc) & 0x1) == 1)
250 num_tcs_in_port++;
251 }
252
253 phys_lines_per_tc = phys_lines / num_tcs_in_port;
fe56b9e6 254 /* init registers per active TC */
351a4ded
YM
255 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
256 if (((port_params[port_id].active_phys_tcs >>
257 tc) & 0x1) != 1)
258 continue;
259
fe56b9e6
YM
260 voq = PHYS_VOQ(port_id, tc,
261 max_phys_tcs_per_port);
262 qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
263 phys_lines_per_tc);
264 }
351a4ded 265
fe56b9e6
YM
266 /* init registers for pure LB TC */
267 qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
268 PBF_CMDQ_PURE_LB_LINES);
269 }
270 }
271}
272
273static void qed_btb_blocks_rt_init(
274 struct qed_hwfn *p_hwfn,
275 u8 max_ports_per_engine,
276 u8 max_phys_tcs_per_port,
277 struct init_qm_port_params port_params[MAX_NUM_PORTS])
278{
279 u32 usable_blocks, pure_lb_blocks, phys_blocks;
351a4ded 280 u8 tc, voq, port_id, num_tcs_in_port;
fe56b9e6
YM
281
282 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
283 u32 temp;
fe56b9e6
YM
284
285 if (!port_params[port_id].active)
286 continue;
287
fe56b9e6
YM
288 /* subtract headroom blocks */
289 usable_blocks = port_params[port_id].num_btb_blocks -
290 BTB_HEADROOM_BLOCKS;
291
351a4ded
YM
292 /* find blocks per physical TC */
293 num_tcs_in_port = 0;
294 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
295 if (((port_params[port_id].active_phys_tcs >>
296 tc) & 0x1) == 1)
297 num_tcs_in_port++;
298 }
299
fe56b9e6 300 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
351a4ded 301 (num_tcs_in_port * BTB_PURE_LB_FACTOR +
fe56b9e6
YM
302 BTB_PURE_LB_RATIO);
303 pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
304 pure_lb_blocks / BTB_PURE_LB_FACTOR);
351a4ded
YM
305 phys_blocks = (usable_blocks - pure_lb_blocks) /
306 num_tcs_in_port;
fe56b9e6
YM
307
308 /* init physical TCs */
351a4ded
YM
309 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
310 if (((port_params[port_id].active_phys_tcs >>
311 tc) & 0x1) != 1)
312 continue;
313
314 voq = PHYS_VOQ(port_id, tc,
315 max_phys_tcs_per_port);
fe56b9e6
YM
316 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
317 phys_blocks);
318 }
319
320 /* init pure LB TC */
321 temp = LB_VOQ(port_id);
322 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
323 pure_lb_blocks);
324 }
325}
326
327/* Prepare Tx PQ mapping runtime init values for the specified PF */
328static void qed_tx_pq_map_rt_init(
329 struct qed_hwfn *p_hwfn,
330 struct qed_ptt *p_ptt,
331 struct qed_qm_pf_rt_init_params *p_params,
332 u32 base_mem_addr_4kb)
333{
334 struct init_qm_vport_params *vport_params = p_params->vport_params;
335 u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
336 u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
337 u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
338 QM_PF_QUEUE_GROUP_SIZE;
fe56b9e6
YM
339 u16 i, pq_id, pq_group;
340
341 /* a bit per Tx PQ indicating if the PQ is associated with a VF */
342 u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
be086e7c 343 u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
fe56b9e6
YM
344 u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
345 u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
346 u32 mem_addr_4kb = base_mem_addr_4kb;
347
348 /* set mapping from PQ group to PF */
349 for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
350 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
351 (u32)(p_params->pf_id));
352 /* set PQ sizes */
353 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
354 QM_PQ_SIZE_256B(p_params->num_pf_cids));
355 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
356 QM_PQ_SIZE_256B(p_params->num_vf_cids));
357
358 /* go over all Tx PQs */
359 for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
360 u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
361 p_params->max_phys_tcs_per_port);
362 bool is_vf_pq = (i >= p_params->num_pf_pqs);
363 struct qm_rf_pq_map tx_pq_map;
364
be086e7c
MY
365 bool rl_valid = p_params->pq_params[i].rl_valid &&
366 (p_params->pq_params[i].vport_id <
367 MAX_QM_GLOBAL_RLS);
368
fe56b9e6
YM
369 /* update first Tx PQ of VPORT/TC */
370 u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
371 p_params->start_vport;
372 u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
373 u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
374
375 if (first_tx_pq_id == QM_INVALID_PQ_ID) {
376 /* create new VP PQ */
377 pq_ids[p_params->pq_params[i].tc_id] = pq_id;
378 first_tx_pq_id = pq_id;
379 /* map VP PQ to VOQ and PF */
380 STORE_RT_REG(p_hwfn,
381 QM_REG_WFQVPMAP_RT_OFFSET +
382 first_tx_pq_id,
383 (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
384 (p_params->pf_id <<
385 QM_WFQ_VP_PQ_PF_SHIFT));
386 }
be086e7c
MY
387
388 if (p_params->pq_params[i].rl_valid && !rl_valid)
389 DP_NOTICE(p_hwfn,
390 "Invalid VPORT ID for rate limiter configuration");
fe56b9e6
YM
391 /* fill PQ map entry */
392 memset(&tx_pq_map, 0, sizeof(tx_pq_map));
393 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
be086e7c
MY
394 SET_FIELD(tx_pq_map.reg,
395 QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
fe56b9e6
YM
396 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
397 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
be086e7c 398 rl_valid ?
351a4ded 399 p_params->pq_params[i].vport_id : 0);
fe56b9e6
YM
400 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
401 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
402 p_params->pq_params[i].wrr_group);
403 /* write PQ map entry to CAM */
404 STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
405 *((u32 *)&tx_pq_map));
406 /* set base address */
407 STORE_RT_REG(p_hwfn,
408 QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
409 mem_addr_4kb);
410 /* check if VF PQ */
411 if (is_vf_pq) {
412 /* if PQ is associated with a VF, add indication
413 * to PQ VF mask
414 */
be086e7c
MY
415 tx_pq_vf_mask[pq_id /
416 QM_PF_QUEUE_GROUP_SIZE] |=
417 BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
fe56b9e6
YM
418 mem_addr_4kb += vport_pq_mem_4kb;
419 } else {
420 mem_addr_4kb += pq_mem_4kb;
421 }
422 }
423
424 /* store Tx PQ VF mask to size select register */
425 for (i = 0; i < num_tx_pq_vf_masks; i++) {
426 if (tx_pq_vf_mask[i]) {
351a4ded 427 u32 addr;
fe56b9e6 428
351a4ded
YM
429 addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
430 STORE_RT_REG(p_hwfn, addr,
431 tx_pq_vf_mask[i]);
fe56b9e6
YM
432 }
433 }
434}
435
436/* Prepare Other PQ mapping runtime init values for the specified PF */
437static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
438 u8 port_id,
439 u8 pf_id,
440 u32 num_pf_cids,
351a4ded 441 u32 num_tids, u32 base_mem_addr_4kb)
fe56b9e6
YM
442{
443 u16 i, pq_id;
444
445 /* a single other PQ group is used in each PF,
446 * where PQ group i is used in PF i.
447 */
448 u16 pq_group = pf_id;
449 u32 pq_size = num_pf_cids + num_tids;
450 u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
451 u32 mem_addr_4kb = base_mem_addr_4kb;
452
453 /* map PQ group to PF */
454 STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
455 (u32)(pf_id));
456 /* set PQ sizes */
457 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
458 QM_PQ_SIZE_256B(pq_size));
459 /* set base address */
460 for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
461 i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
462 STORE_RT_REG(p_hwfn,
463 QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
464 mem_addr_4kb);
465 mem_addr_4kb += pq_mem_4kb;
466 }
467}
468
469/* Prepare PF WFQ runtime init values for the specified PF.
470 * Return -1 on error.
471 */
472static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
473 struct qed_qm_pf_rt_init_params *p_params)
474{
475 u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
476 u32 crd_reg_offset;
477 u32 inc_val;
478 u16 i;
479
480 if (p_params->pf_id < MAX_NUM_PFS_BB)
481 crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
482 else
be086e7c
MY
483 crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
484 crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
fe56b9e6
YM
485
486 inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
351a4ded 487 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
fe56b9e6
YM
488 DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
489 return -1;
490 }
fe56b9e6
YM
491
492 for (i = 0; i < num_tx_pqs; i++) {
493 u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
494 p_params->max_phys_tcs_per_port);
495
496 OVERWRITE_RT_REG(p_hwfn,
497 crd_reg_offset + voq * MAX_NUM_PFS_BB,
fe56b9e6
YM
498 QM_WFQ_CRD_REG_SIGN_BIT);
499 }
500
351a4ded
YM
501 STORE_RT_REG(p_hwfn,
502 QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
503 QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
be086e7c
MY
504 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
505 inc_val);
fe56b9e6
YM
506 return 0;
507}
508
509/* Prepare PF RL runtime init values for the specified PF.
510 * Return -1 on error.
511 */
351a4ded 512static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
fe56b9e6
YM
513{
514 u32 inc_val = QM_RL_INC_VAL(pf_rl);
515
516 if (inc_val > QM_RL_MAX_INC_VAL) {
517 DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
518 return -1;
519 }
520 STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
521 QM_RL_CRD_REG_SIGN_BIT);
522 STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
523 QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
524 STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
525 return 0;
526}
527
528/* Prepare VPORT WFQ runtime init values for the specified VPORTs.
529 * Return -1 on error.
530 */
531static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
fe56b9e6
YM
532 u8 num_vports,
533 struct init_qm_vport_params *vport_params)
534{
fe56b9e6 535 u32 inc_val;
fc48b7a6 536 u8 tc, i;
fe56b9e6
YM
537
538 /* go over all PF VPORTs */
fc48b7a6 539 for (i = 0; i < num_vports; i++) {
fe56b9e6
YM
540
541 if (!vport_params[i].vport_wfq)
542 continue;
543
544 inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
545 if (inc_val > QM_WFQ_MAX_INC_VAL) {
546 DP_NOTICE(p_hwfn,
547 "Invalid VPORT WFQ weight configuration");
548 return -1;
549 }
550
551 /* each VPORT can have several VPORT PQ IDs for
552 * different TCs
553 */
554 for (tc = 0; tc < NUM_OF_TCS; tc++) {
fc48b7a6 555 u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
fe56b9e6
YM
556
557 if (vport_pq_id != QM_INVALID_PQ_ID) {
fe56b9e6
YM
558 STORE_RT_REG(p_hwfn,
559 QM_REG_WFQVPCRD_RT_OFFSET +
560 vport_pq_id,
fe56b9e6 561 QM_WFQ_CRD_REG_SIGN_BIT);
fc48b7a6
YM
562 STORE_RT_REG(p_hwfn,
563 QM_REG_WFQVPWEIGHT_RT_OFFSET +
564 vport_pq_id, inc_val);
fe56b9e6
YM
565 }
566 }
567 }
568
569 return 0;
570}
571
572static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
573 u8 start_vport,
574 u8 num_vports,
575 struct init_qm_vport_params *vport_params)
576{
577 u8 i, vport_id;
578
be086e7c
MY
579 if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
580 DP_NOTICE(p_hwfn,
581 "Invalid VPORT ID for rate limiter configuration");
582 return -1;
583 }
584
fe56b9e6
YM
585 /* go over all PF VPORTs */
586 for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
587 u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
588
589 if (inc_val > QM_RL_MAX_INC_VAL) {
590 DP_NOTICE(p_hwfn,
591 "Invalid VPORT rate-limit configuration");
592 return -1;
593 }
594
595 STORE_RT_REG(p_hwfn,
596 QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
597 QM_RL_CRD_REG_SIGN_BIT);
598 STORE_RT_REG(p_hwfn,
599 QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
600 QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
601 STORE_RT_REG(p_hwfn,
602 QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
603 inc_val);
604 }
605
606 return 0;
607}
608
609static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
610 struct qed_ptt *p_ptt)
611{
612 u32 reg_val, i;
613
614 for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
615 i++) {
616 udelay(QM_STOP_CMD_POLL_PERIOD_US);
617 reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
618 }
619
620 /* check if timeout while waiting for SDM command ready */
621 if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
622 DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
623 "Timeout when waiting for QM SDM command ready signal\n");
624 return false;
625 }
626
627 return true;
628}
629
630static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
631 struct qed_ptt *p_ptt,
351a4ded 632 u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
fe56b9e6
YM
633{
634 if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
635 return false;
636
637 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
638 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
639 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
640 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
641 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
642
643 return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
644}
645
646/******************** INTERFACE IMPLEMENTATION *********************/
647u32 qed_qm_pf_mem_size(u8 pf_id,
648 u32 num_pf_cids,
649 u32 num_vf_cids,
351a4ded 650 u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
fe56b9e6
YM
651{
652 return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
653 QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
654 QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
655}
656
657int qed_qm_common_rt_init(
658 struct qed_hwfn *p_hwfn,
659 struct qed_qm_common_rt_init_params *p_params)
660{
661 /* init AFullOprtnstcCrdMask */
662 u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
663 QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
664 (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
665 (p_params->pf_wfq_en <<
666 QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
667 (p_params->vport_wfq_en <<
668 QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
669 (p_params->pf_rl_en <<
670 QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
671 (p_params->vport_rl_en <<
672 QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
673 (QM_OPPOR_FW_STOP_DEF <<
674 QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
675 (QM_OPPOR_PQ_EMPTY_DEF <<
676 QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
677
678 STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
679 qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
680 qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
681 qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
682 qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
683 qed_cmdq_lines_rt_init(p_hwfn,
684 p_params->max_ports_per_engine,
685 p_params->max_phys_tcs_per_port,
686 p_params->port_params);
687 qed_btb_blocks_rt_init(p_hwfn,
688 p_params->max_ports_per_engine,
689 p_params->max_phys_tcs_per_port,
690 p_params->port_params);
691 return 0;
692}
693
694int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
695 struct qed_ptt *p_ptt,
696 struct qed_qm_pf_rt_init_params *p_params)
697{
698 struct init_qm_vport_params *vport_params = p_params->vport_params;
699 u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
700 p_params->num_tids) *
701 QM_OTHER_PQS_PER_PF;
702 u8 tc, i;
703
704 /* clear first Tx PQ ID array for each VPORT */
705 for (i = 0; i < p_params->num_vports; i++)
706 for (tc = 0; tc < NUM_OF_TCS; tc++)
707 vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
708
709 /* map Other PQs (if any) */
710 qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
711 p_params->num_pf_cids, p_params->num_tids, 0);
712
713 /* map Tx PQs */
714 qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
715
716 if (p_params->pf_wfq)
717 if (qed_pf_wfq_rt_init(p_hwfn, p_params))
718 return -1;
719
720 if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
721 return -1;
722
fc48b7a6 723 if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
fe56b9e6
YM
724 return -1;
725
726 if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
727 p_params->num_vports, vport_params))
728 return -1;
729
730 return 0;
731}
732
a64b02d5 733int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
351a4ded 734 struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
a64b02d5
MC
735{
736 u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
737
738 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
739 DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
740 return -1;
741 }
742
743 qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
744 return 0;
745}
746
fe56b9e6 747int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
351a4ded 748 struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl)
fe56b9e6
YM
749{
750 u32 inc_val = QM_RL_INC_VAL(pf_rl);
751
752 if (inc_val > QM_RL_MAX_INC_VAL) {
753 DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
754 return -1;
755 }
756
757 qed_wr(p_hwfn, p_ptt,
758 QM_REG_RLPFCRD + pf_id * 4,
759 QM_RL_CRD_REG_SIGN_BIT);
760 qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
761
762 return 0;
763}
764
bcd197c8
MC
765int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
766 struct qed_ptt *p_ptt,
351a4ded 767 u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
bcd197c8
MC
768{
769 u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
770 u8 tc;
771
772 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
773 DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration");
774 return -1;
775 }
776
777 for (tc = 0; tc < NUM_OF_TCS; tc++) {
778 u16 vport_pq_id = first_tx_pq_id[tc];
779
780 if (vport_pq_id != QM_INVALID_PQ_ID)
781 qed_wr(p_hwfn, p_ptt,
782 QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
783 inc_val);
784 }
785
786 return 0;
787}
788
fe56b9e6 789int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
351a4ded 790 struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl)
fe56b9e6
YM
791{
792 u32 inc_val = QM_RL_INC_VAL(vport_rl);
793
be086e7c
MY
794 if (vport_id >= MAX_QM_GLOBAL_RLS) {
795 DP_NOTICE(p_hwfn,
796 "Invalid VPORT ID for rate limiter configuration");
797 return -1;
798 }
799
fe56b9e6
YM
800 if (inc_val > QM_RL_MAX_INC_VAL) {
801 DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
802 return -1;
803 }
804
805 qed_wr(p_hwfn, p_ptt,
806 QM_REG_RLGLBLCRD + vport_id * 4,
807 QM_RL_CRD_REG_SIGN_BIT);
808 qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
809
810 return 0;
811}
812
813bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
814 struct qed_ptt *p_ptt,
815 bool is_release_cmd,
351a4ded 816 bool is_tx_pq, u16 start_pq, u16 num_pqs)
fe56b9e6
YM
817{
818 u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
819 u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
820
821 /* set command's PQ type */
822 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
823
824 for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
825 /* set PQ bit in mask (stop command only) */
826 if (!is_release_cmd)
827 pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
828
829 /* if last PQ or end of PQ mask, write command */
830 if ((pq_id == last_pq) ||
831 (pq_id % QM_STOP_PQ_MASK_WIDTH ==
832 (QM_STOP_PQ_MASK_WIDTH - 1))) {
833 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
834 PAUSE_MASK, pq_mask);
835 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
836 GROUP_ID,
837 pq_id / QM_STOP_PQ_MASK_WIDTH);
838 if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
839 cmd_arr[0], cmd_arr[1]))
840 return false;
841 pq_mask = 0;
842 }
843 }
844
845 return true;
846}
464f6645
MC
847
848static void
849qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
850{
851 if (enable)
852 set_bit(bit, var);
853 else
854 clear_bit(bit, var);
855}
856
857#define PRS_ETH_TUNN_FIC_FORMAT -188897008
858
859void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
351a4ded 860 struct qed_ptt *p_ptt, u16 dest_port)
464f6645
MC
861{
862 qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
351a4ded 863 qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
464f6645
MC
864 qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
865}
866
867void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
351a4ded 868 struct qed_ptt *p_ptt, bool vxlan_enable)
464f6645
MC
869{
870 unsigned long reg_val = 0;
871 u8 shift;
872
873 reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
874 shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
875 qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
876
877 qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
878
879 if (reg_val)
880 qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
881 PRS_ETH_TUNN_FIC_FORMAT);
882
883 reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
884 shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
885 qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
886
887 qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
888
889 qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
890 vxlan_enable ? 1 : 0);
891}
892
893void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
894 bool eth_gre_enable, bool ip_gre_enable)
895{
896 unsigned long reg_val = 0;
897 u8 shift;
898
899 reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
900 shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
901 qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
902
903 shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
904 qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
905 qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
906 if (reg_val)
907 qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
908 PRS_ETH_TUNN_FIC_FORMAT);
909
910 reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
911 shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
912 qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
913
914 shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
915 qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
916 qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
917
918 qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
919 eth_gre_enable ? 1 : 0);
920 qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
921 ip_gre_enable ? 1 : 0);
922}
923
924void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
351a4ded 925 struct qed_ptt *p_ptt, u16 dest_port)
464f6645
MC
926{
927 qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
928 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
929 qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
930}
931
932void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
933 struct qed_ptt *p_ptt,
351a4ded 934 bool eth_geneve_enable, bool ip_geneve_enable)
464f6645
MC
935{
936 unsigned long reg_val = 0;
937 u8 shift;
938
939 reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
940 shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
941 qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);
942
943 shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
944 qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);
945
946 qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
947 if (reg_val)
948 qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
949 PRS_ETH_TUNN_FIC_FORMAT);
950
951 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
952 eth_geneve_enable ? 1 : 0);
953 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
954
464f6645
MC
955 /* EDPM with geneve tunnel not supported in BB_B0 */
956 if (QED_IS_BB_B0(p_hwfn->cdev))
957 return;
958
959 qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
960 eth_geneve_enable ? 1 : 0);
961 qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
962 ip_geneve_enable ? 1 : 0);
963}
d51e4af5
CM
964
965#define T_ETH_PACKET_MATCH_RFS_EVENTID 25
966#define PARSER_ETH_CONN_CM_HDR (0x0)
967#define CAM_LINE_SIZE sizeof(u32)
968#define RAM_LINE_SIZE sizeof(u64)
969#define REG_SIZE sizeof(u32)
970
971void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
972 struct qed_ptt *p_ptt, u16 pf_id)
973{
974 union gft_cam_line_union camline;
975 struct gft_ram_line ramline;
976 u32 *p_ramline, i;
977
978 p_ramline = (u32 *)&ramline;
979
980 /*stop using gft logic */
981 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
982 qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
983 memset(&camline, 0, sizeof(union gft_cam_line_union));
984 qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
985 camline.cam_line_mapped.camline);
aa4ad88c 986 memset(&ramline, 0, sizeof(ramline));
d51e4af5
CM
987
988 for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) {
989 u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM;
990
991 hw_addr += (RAM_LINE_SIZE * pf_id + i * REG_SIZE);
992
993 qed_wr(p_hwfn, p_ptt, hw_addr, *(p_ramline + i));
994 }
995}
996
997void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
998 u16 pf_id, bool tcp, bool udp,
999 bool ipv4, bool ipv6)
1000{
1001 u32 rfs_cm_hdr_event_id, *p_ramline;
1002 union gft_cam_line_union camline;
1003 struct gft_ram_line ramline;
1004 int i;
1005
1006 rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
1007 p_ramline = (u32 *)&ramline;
1008
1009 if (!ipv6 && !ipv4)
1010 DP_NOTICE(p_hwfn,
1011 "set_rfs_mode_enable: must accept at least on of - ipv4 or ipv6");
1012 if (!tcp && !udp)
1013 DP_NOTICE(p_hwfn,
1014 "set_rfs_mode_enable: must accept at least on of - udp or tcp");
1015
1016 rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
1017 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1018 rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
1019 PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1020 qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
1021
1022 /* Configure Registers for RFS mode */
1023 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
1024 qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
1025 camline.cam_line_mapped.camline = 0;
1026
1027 /* cam line is now valid!! */
1028 SET_FIELD(camline.cam_line_mapped.camline,
1029 GFT_CAM_LINE_MAPPED_VALID, 1);
1030
1031 /* filters are per PF!! */
1032 SET_FIELD(camline.cam_line_mapped.camline,
1033 GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1);
1034 SET_FIELD(camline.cam_line_mapped.camline,
1035 GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
1036 if (!(tcp && udp)) {
1037 SET_FIELD(camline.cam_line_mapped.camline,
1038 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, 1);
1039 if (tcp)
1040 SET_FIELD(camline.cam_line_mapped.camline,
1041 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1042 GFT_PROFILE_TCP_PROTOCOL);
1043 else
1044 SET_FIELD(camline.cam_line_mapped.camline,
1045 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1046 GFT_PROFILE_UDP_PROTOCOL);
1047 }
1048
1049 if (!(ipv4 && ipv6)) {
1050 SET_FIELD(camline.cam_line_mapped.camline,
1051 GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
1052 if (ipv4)
1053 SET_FIELD(camline.cam_line_mapped.camline,
1054 GFT_CAM_LINE_MAPPED_IP_VERSION,
1055 GFT_PROFILE_IPV4);
1056 else
1057 SET_FIELD(camline.cam_line_mapped.camline,
1058 GFT_CAM_LINE_MAPPED_IP_VERSION,
1059 GFT_PROFILE_IPV6);
1060 }
1061
1062 /* write characteristics to cam */
1063 qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
1064 camline.cam_line_mapped.camline);
1065 camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt,
1066 PRS_REG_GFT_CAM +
1067 CAM_LINE_SIZE * pf_id);
1068
1069 /* write line to RAM - compare to filter 4 tuple */
1070 ramline.low32bits = 0;
1071 ramline.high32bits = 0;
1072 SET_FIELD(ramline.high32bits, GFT_RAM_LINE_DST_IP, 1);
1073 SET_FIELD(ramline.high32bits, GFT_RAM_LINE_SRC_IP, 1);
1074 SET_FIELD(ramline.low32bits, GFT_RAM_LINE_SRC_PORT, 1);
1075 SET_FIELD(ramline.low32bits, GFT_RAM_LINE_DST_PORT, 1);
1076
1077 /* each iteration write to reg */
1078 for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
1079 qed_wr(p_hwfn, p_ptt,
1080 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
1081 i * REG_SIZE, *(p_ramline + i));
1082
1083 /* set default profile so that no filter match will happen */
1084 ramline.low32bits = 0xffff;
1085 ramline.high32bits = 0xffff;
1086
1087 for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
1088 qed_wr(p_hwfn, p_ptt,
1089 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1090 PRS_GFT_CAM_LINES_NO_MATCH + i * REG_SIZE,
1091 *(p_ramline + i));
1092}