]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/qlogic/qed/qed_l2.c
qede: Remove unnecessary datapath dereference
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / qlogic / qed / qed_l2.c
CommitLineData
25c089d7 1/* QLogic qed NIC Driver
e8f1cb50 2 * Copyright (c) 2015-2017 QLogic Corporation
25c089d7 3 *
e8f1cb50
MY
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
25c089d7
YM
31 */
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <asm/param.h>
36#include <linux/delay.h>
37#include <linux/dma-mapping.h>
38#include <linux/etherdevice.h>
39#include <linux/interrupt.h>
40#include <linux/kernel.h>
41#include <linux/module.h>
42#include <linux/pci.h>
43#include <linux/slab.h>
44#include <linux/stddef.h>
45#include <linux/string.h>
46#include <linux/version.h>
47#include <linux/workqueue.h>
48#include <linux/bitops.h>
49#include <linux/bug.h>
3da7a37a 50#include <linux/vmalloc.h>
25c089d7
YM
51#include "qed.h"
52#include <linux/qed/qed_chain.h>
53#include "qed_cxt.h"
54#include "qed_dev_api.h"
55#include <linux/qed/qed_eth_if.h>
56#include "qed_hsi.h"
57#include "qed_hw.h"
58#include "qed_int.h"
dacd88d6 59#include "qed_l2.h"
86622ee7 60#include "qed_mcp.h"
25c089d7
YM
61#include "qed_reg_addr.h"
62#include "qed_sp.h"
1408cc1f 63#include "qed_sriov.h"
25c089d7 64
088c8618 65
cee4d264
MC
66#define QED_MAX_SGES_NUM 16
67#define CRC32_POLY 0x1edc6f41
68
3da7a37a
MY
69void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
70 struct qed_queue_cid *p_cid)
71{
72 /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
73 if (!p_cid->is_vf && IS_PF(p_hwfn->cdev))
74 qed_cxt_release_cid(p_hwfn, p_cid->cid);
75 vfree(p_cid);
76}
77
78/* The internal is only meant to be directly called by PFs initializeing CIDs
79 * for their VFs.
80 */
81struct qed_queue_cid *
82_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
83 u16 opaque_fid,
84 u32 cid,
85 u8 vf_qid,
86 struct qed_queue_start_common_params *p_params)
87{
88 bool b_is_same = (p_hwfn->hw_info.opaque_fid == opaque_fid);
89 struct qed_queue_cid *p_cid;
90 int rc;
91
92 p_cid = vmalloc(sizeof(*p_cid));
93 if (!p_cid)
94 return NULL;
95 memset(p_cid, 0, sizeof(*p_cid));
96
97 p_cid->opaque_fid = opaque_fid;
98 p_cid->cid = cid;
99 p_cid->vf_qid = vf_qid;
100 p_cid->rel = *p_params;
101
102 /* Don't try calculating the absolute indices for VFs */
103 if (IS_VF(p_hwfn->cdev)) {
104 p_cid->abs = p_cid->rel;
105 goto out;
106 }
107
108 /* Calculate the engine-absolute indices of the resources.
109 * This would guarantee they're valid later on.
110 * In some cases [SBs] we already have the right values.
111 */
112 rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
113 if (rc)
114 goto fail;
115
116 rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id);
117 if (rc)
118 goto fail;
119
120 /* In case of a PF configuring its VF's queues, the stats-id is already
121 * absolute [since there's a single index that's suitable per-VF].
122 */
123 if (b_is_same) {
124 rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id,
125 &p_cid->abs.stats_id);
126 if (rc)
127 goto fail;
128 } else {
129 p_cid->abs.stats_id = p_cid->rel.stats_id;
130 }
131
132 /* SBs relevant information was already provided as absolute */
133 p_cid->abs.sb = p_cid->rel.sb;
134 p_cid->abs.sb_idx = p_cid->rel.sb_idx;
135
136 /* This is tricky - we're actually interested in whehter this is a PF
137 * entry meant for the VF.
138 */
139 if (!b_is_same)
140 p_cid->is_vf = true;
141out:
142 DP_VERBOSE(p_hwfn,
143 QED_MSG_SP,
144 "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
145 p_cid->opaque_fid,
146 p_cid->cid,
147 p_cid->rel.vport_id,
148 p_cid->abs.vport_id,
149 p_cid->rel.queue_id,
150 p_cid->abs.queue_id,
151 p_cid->rel.stats_id,
152 p_cid->abs.stats_id, p_cid->abs.sb, p_cid->abs.sb_idx);
153
154 return p_cid;
155
156fail:
157 vfree(p_cid);
158 return NULL;
159}
160
161static struct qed_queue_cid *qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
162 u16 opaque_fid, struct
163 qed_queue_start_common_params
164 *p_params)
165{
166 struct qed_queue_cid *p_cid;
167 u32 cid = 0;
168
169 /* Get a unique firmware CID for this queue, in case it's a PF.
170 * VF's don't need a CID as the queue configuration will be done
171 * by PF.
172 */
173 if (IS_PF(p_hwfn->cdev)) {
174 if (qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &cid)) {
175 DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
176 return NULL;
177 }
178 }
179
180 p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 0, p_params);
181 if (!p_cid && IS_PF(p_hwfn->cdev))
182 qed_cxt_release_cid(p_hwfn, cid);
183
184 return p_cid;
185}
186
dacd88d6
YM
187int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
188 struct qed_sp_vport_start_params *p_params)
cee4d264 189{
cee4d264
MC
190 struct vport_start_ramrod_data *p_ramrod = NULL;
191 struct qed_spq_entry *p_ent = NULL;
06f56b81 192 struct qed_sp_init_data init_data;
dacd88d6 193 u8 abs_vport_id = 0;
cee4d264
MC
194 int rc = -EINVAL;
195 u16 rx_mode = 0;
cee4d264 196
088c8618 197 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
1a635e48 198 if (rc)
cee4d264
MC
199 return rc;
200
06f56b81
YM
201 memset(&init_data, 0, sizeof(init_data));
202 init_data.cid = qed_spq_get_cid(p_hwfn);
088c8618 203 init_data.opaque_fid = p_params->opaque_fid;
06f56b81 204 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264
MC
205
206 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 207 ETH_RAMROD_VPORT_START,
06f56b81 208 PROTOCOLID_ETH, &init_data);
cee4d264
MC
209 if (rc)
210 return rc;
211
212 p_ramrod = &p_ent->ramrod.vport_start;
213 p_ramrod->vport_id = abs_vport_id;
214
088c8618
MC
215 p_ramrod->mtu = cpu_to_le16(p_params->mtu);
216 p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
217 p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
e6bd8923 218 p_ramrod->untagged = p_params->only_untagged;
cee4d264
MC
219
220 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
221 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
222
223 p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
224
225 /* TPA related fields */
1a635e48 226 memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param));
cee4d264 227
088c8618
MC
228 p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
229
230 switch (p_params->tpa_mode) {
231 case QED_TPA_MODE_GRO:
232 p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
233 p_ramrod->tpa_param.tpa_max_size = (u16)-1;
234 p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
235 p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
236 p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
237 p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
238 p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
239 p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
240 break;
241 default:
242 break;
243 }
244
831bfb0e
YM
245 p_ramrod->tx_switching_en = p_params->tx_switching;
246
11a85d75
YM
247 p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
248 p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
249
cee4d264
MC
250 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
251 p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
088c8618 252 p_params->concrete_fid);
cee4d264
MC
253
254 return qed_spq_post(p_hwfn, p_ent, NULL);
255}
256
ba56947a
BX
257static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
258 struct qed_sp_vport_start_params *p_params)
dacd88d6
YM
259{
260 if (IS_VF(p_hwfn->cdev)) {
261 return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
262 p_params->mtu,
263 p_params->remove_inner_vlan,
264 p_params->tpa_mode,
08feecd7
YM
265 p_params->max_buffers_per_cqe,
266 p_params->only_untagged);
dacd88d6
YM
267 }
268
269 return qed_sp_eth_vport_start(p_hwfn, p_params);
270}
271
cee4d264
MC
272static int
273qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
274 struct vport_update_ramrod_data *p_ramrod,
275 struct qed_rss_params *p_params)
276{
277 struct eth_vport_rss_config *rss = &p_ramrod->rss_config;
278 u16 abs_l2_queue = 0, capabilities = 0;
279 int rc = 0, i;
280
281 if (!p_params) {
282 p_ramrod->common.update_rss_flg = 0;
283 return rc;
284 }
285
286 BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE !=
287 ETH_RSS_IND_TABLE_ENTRIES_NUM);
288
289 rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id);
290 if (rc)
291 return rc;
292
293 p_ramrod->common.update_rss_flg = p_params->update_rss_config;
294 rss->update_rss_capabilities = p_params->update_rss_capabilities;
295 rss->update_rss_ind_table = p_params->update_rss_ind_table;
296 rss->update_rss_key = p_params->update_rss_key;
297
298 rss->rss_mode = p_params->rss_enable ?
299 ETH_VPORT_RSS_MODE_REGULAR :
300 ETH_VPORT_RSS_MODE_DISABLED;
301
302 SET_FIELD(capabilities,
303 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
304 !!(p_params->rss_caps & QED_RSS_IPV4));
305 SET_FIELD(capabilities,
306 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
307 !!(p_params->rss_caps & QED_RSS_IPV6));
308 SET_FIELD(capabilities,
309 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
310 !!(p_params->rss_caps & QED_RSS_IPV4_TCP));
311 SET_FIELD(capabilities,
312 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
313 !!(p_params->rss_caps & QED_RSS_IPV6_TCP));
314 SET_FIELD(capabilities,
315 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
316 !!(p_params->rss_caps & QED_RSS_IPV4_UDP));
317 SET_FIELD(capabilities,
318 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
319 !!(p_params->rss_caps & QED_RSS_IPV6_UDP));
320 rss->tbl_size = p_params->rss_table_size_log;
321
322 rss->capabilities = cpu_to_le16(capabilities);
323
324 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
325 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
326 p_ramrod->common.update_rss_flg,
327 rss->rss_mode, rss->update_rss_capabilities,
328 capabilities, rss->update_rss_ind_table,
329 rss->update_rss_key);
330
331 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
332 rc = qed_fw_l2_queue(p_hwfn,
333 (u8)p_params->rss_ind_table[i],
334 &abs_l2_queue);
335 if (rc)
336 return rc;
337
338 rss->indirection_table[i] = cpu_to_le16(abs_l2_queue);
339 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n",
340 i, rss->indirection_table[i]);
341 }
342
343 for (i = 0; i < 10; i++)
344 rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]);
345
346 return rc;
347}
348
349static void
350qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
351 struct vport_update_ramrod_data *p_ramrod,
352 struct qed_filter_accept_flags accept_flags)
353{
354 p_ramrod->common.update_rx_mode_flg =
355 accept_flags.update_rx_mode_config;
356
357 p_ramrod->common.update_tx_mode_flg =
358 accept_flags.update_tx_mode_config;
359
360 /* Set Rx mode accept flags */
361 if (p_ramrod->common.update_rx_mode_flg) {
362 u8 accept_filter = accept_flags.rx_accept_filter;
363 u16 state = 0;
364
365 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
366 !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
367 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
368
369 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
370 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
371
372 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
373 !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
374 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
375
376 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
377 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
378 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
379
380 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
381 !!(accept_filter & QED_ACCEPT_BCAST));
382
383 p_ramrod->rx_mode.state = cpu_to_le16(state);
384 DP_VERBOSE(p_hwfn, QED_MSG_SP,
385 "p_ramrod->rx_mode.state = 0x%x\n", state);
386 }
387
388 /* Set Tx mode accept flags */
389 if (p_ramrod->common.update_tx_mode_flg) {
390 u8 accept_filter = accept_flags.tx_accept_filter;
391 u16 state = 0;
392
393 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
394 !!(accept_filter & QED_ACCEPT_NONE));
395
cee4d264
MC
396 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
397 !!(accept_filter & QED_ACCEPT_NONE));
398
399 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
400 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
401 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
402
403 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
404 !!(accept_filter & QED_ACCEPT_BCAST));
405
406 p_ramrod->tx_mode.state = cpu_to_le16(state);
407 DP_VERBOSE(p_hwfn, QED_MSG_SP,
408 "p_ramrod->tx_mode.state = 0x%x\n", state);
409 }
410}
411
17b235c1
YM
412static void
413qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
414 struct vport_update_ramrod_data *p_ramrod,
415 struct qed_sge_tpa_params *p_params)
416{
417 struct eth_vport_tpa_param *p_tpa;
418
419 if (!p_params) {
420 p_ramrod->common.update_tpa_param_flg = 0;
421 p_ramrod->common.update_tpa_en_flg = 0;
422 p_ramrod->common.update_tpa_param_flg = 0;
423 return;
424 }
425
426 p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
427 p_tpa = &p_ramrod->tpa_param;
428 p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
429 p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
430 p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
431 p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
432
433 p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
434 p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
435 p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
436 p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
437 p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
438 p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
439 p_tpa->tpa_max_size = p_params->tpa_max_size;
440 p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
441 p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
442}
443
cee4d264
MC
444static void
445qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
446 struct vport_update_ramrod_data *p_ramrod,
447 struct qed_sp_vport_update_params *p_params)
448{
449 int i;
450
451 memset(&p_ramrod->approx_mcast.bins, 0,
452 sizeof(p_ramrod->approx_mcast.bins));
453
83aeb933
YM
454 if (!p_params->update_approx_mcast_flg)
455 return;
cee4d264 456
83aeb933
YM
457 p_ramrod->common.update_approx_mcast_flg = 1;
458 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
459 u32 *p_bins = (u32 *)p_params->bins;
460
461 p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
cee4d264
MC
462 }
463}
464
dacd88d6
YM
465int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
466 struct qed_sp_vport_update_params *p_params,
467 enum spq_mode comp_mode,
468 struct qed_spq_comp_cb *p_comp_data)
cee4d264
MC
469{
470 struct qed_rss_params *p_rss_params = p_params->rss_params;
471 struct vport_update_ramrod_data_cmn *p_cmn;
06f56b81 472 struct qed_sp_init_data init_data;
cee4d264
MC
473 struct vport_update_ramrod_data *p_ramrod = NULL;
474 struct qed_spq_entry *p_ent = NULL;
17b235c1 475 u8 abs_vport_id = 0, val;
cee4d264
MC
476 int rc = -EINVAL;
477
dacd88d6
YM
478 if (IS_VF(p_hwfn->cdev)) {
479 rc = qed_vf_pf_vport_update(p_hwfn, p_params);
480 return rc;
481 }
482
cee4d264 483 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
1a635e48 484 if (rc)
cee4d264
MC
485 return rc;
486
06f56b81
YM
487 memset(&init_data, 0, sizeof(init_data));
488 init_data.cid = qed_spq_get_cid(p_hwfn);
489 init_data.opaque_fid = p_params->opaque_fid;
490 init_data.comp_mode = comp_mode;
491 init_data.p_comp_data = p_comp_data;
cee4d264
MC
492
493 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 494 ETH_RAMROD_VPORT_UPDATE,
06f56b81 495 PROTOCOLID_ETH, &init_data);
cee4d264
MC
496 if (rc)
497 return rc;
498
499 /* Copy input params to ramrod according to FW struct */
500 p_ramrod = &p_ent->ramrod.vport_update;
501 p_cmn = &p_ramrod->common;
502
503 p_cmn->vport_id = abs_vport_id;
504 p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
505 p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
506 p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
507 p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
3f9b4a69 508 p_cmn->accept_any_vlan = p_params->accept_any_vlan;
83aeb933
YM
509 val = p_params->update_accept_any_vlan_flg;
510 p_cmn->update_accept_any_vlan_flg = val;
17b235c1
YM
511
512 p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
513 val = p_params->update_inner_vlan_removal_flg;
514 p_cmn->update_inner_vlan_removal_en_flg = val;
08feecd7
YM
515
516 p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
517 val = p_params->update_default_vlan_enable_flg;
518 p_cmn->update_default_vlan_en_flg = val;
519
520 p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
521 p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
522
523 p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
524
17b235c1
YM
525 p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
526 p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
527
6ddc7608
YM
528 p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
529 val = p_params->update_anti_spoofing_en_flg;
530 p_ramrod->common.update_anti_spoofing_en_flg = val;
531
cee4d264
MC
532 rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
533 if (rc) {
534 /* Return spq entry which is taken in qed_sp_init_request()*/
535 qed_spq_return_entry(p_hwfn, p_ent);
536 return rc;
537 }
538
539 /* Update mcast bins for VFs, PF doesn't use this functionality */
540 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
541
542 qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
17b235c1 543 qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
cee4d264
MC
544 return qed_spq_post(p_hwfn, p_ent, NULL);
545}
546
dacd88d6 547int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
cee4d264 548{
cee4d264 549 struct vport_stop_ramrod_data *p_ramrod;
06f56b81 550 struct qed_sp_init_data init_data;
cee4d264
MC
551 struct qed_spq_entry *p_ent;
552 u8 abs_vport_id = 0;
553 int rc;
554
dacd88d6
YM
555 if (IS_VF(p_hwfn->cdev))
556 return qed_vf_pf_vport_stop(p_hwfn);
557
cee4d264 558 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
1a635e48 559 if (rc)
cee4d264
MC
560 return rc;
561
06f56b81
YM
562 memset(&init_data, 0, sizeof(init_data));
563 init_data.cid = qed_spq_get_cid(p_hwfn);
564 init_data.opaque_fid = opaque_fid;
565 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264
MC
566
567 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 568 ETH_RAMROD_VPORT_STOP,
06f56b81 569 PROTOCOLID_ETH, &init_data);
cee4d264
MC
570 if (rc)
571 return rc;
572
573 p_ramrod = &p_ent->ramrod.vport_stop;
574 p_ramrod->vport_id = abs_vport_id;
575
576 return qed_spq_post(p_hwfn, p_ent, NULL);
577}
578
dacd88d6
YM
579static int
580qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
581 struct qed_filter_accept_flags *p_accept_flags)
582{
583 struct qed_sp_vport_update_params s_params;
584
585 memset(&s_params, 0, sizeof(s_params));
586 memcpy(&s_params.accept_flags, p_accept_flags,
587 sizeof(struct qed_filter_accept_flags));
588
589 return qed_vf_pf_vport_update(p_hwfn, &s_params);
590}
591
cee4d264
MC
592static int qed_filter_accept_cmd(struct qed_dev *cdev,
593 u8 vport,
594 struct qed_filter_accept_flags accept_flags,
3f9b4a69
YM
595 u8 update_accept_any_vlan,
596 u8 accept_any_vlan,
dacd88d6
YM
597 enum spq_mode comp_mode,
598 struct qed_spq_comp_cb *p_comp_data)
cee4d264
MC
599{
600 struct qed_sp_vport_update_params vport_update_params;
601 int i, rc;
602
603 /* Prepare and send the vport rx_mode change */
604 memset(&vport_update_params, 0, sizeof(vport_update_params));
605 vport_update_params.vport_id = vport;
606 vport_update_params.accept_flags = accept_flags;
3f9b4a69
YM
607 vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
608 vport_update_params.accept_any_vlan = accept_any_vlan;
cee4d264
MC
609
610 for_each_hwfn(cdev, i) {
611 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
612
613 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
614
dacd88d6
YM
615 if (IS_VF(cdev)) {
616 rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
617 if (rc)
618 return rc;
619 continue;
620 }
621
cee4d264
MC
622 rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
623 comp_mode, p_comp_data);
1a635e48 624 if (rc) {
cee4d264
MC
625 DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
626 return rc;
627 }
628
629 DP_VERBOSE(p_hwfn, QED_MSG_SP,
630 "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
631 accept_flags.rx_accept_filter,
632 accept_flags.tx_accept_filter);
3f9b4a69
YM
633 if (update_accept_any_vlan)
634 DP_VERBOSE(p_hwfn, QED_MSG_SP,
635 "accept_any_vlan=%d configured\n",
636 accept_any_vlan);
cee4d264
MC
637 }
638
639 return 0;
640}
641
3da7a37a
MY
642int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
643 struct qed_queue_cid *p_cid,
644 u16 bd_max_bytes,
645 dma_addr_t bd_chain_phys_addr,
646 dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
cee4d264
MC
647{
648 struct rx_queue_start_ramrod_data *p_ramrod = NULL;
cee4d264 649 struct qed_spq_entry *p_ent = NULL;
06f56b81 650 struct qed_sp_init_data init_data;
cee4d264
MC
651 int rc = -EINVAL;
652
cee4d264 653 DP_VERBOSE(p_hwfn, QED_MSG_SP,
3da7a37a
MY
654 "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
655 p_cid->opaque_fid, p_cid->cid,
656 p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->abs.sb);
cee4d264 657
06f56b81
YM
658 /* Get SPQ entry */
659 memset(&init_data, 0, sizeof(init_data));
3da7a37a
MY
660 init_data.cid = p_cid->cid;
661 init_data.opaque_fid = p_cid->opaque_fid;
06f56b81 662 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264
MC
663
664 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 665 ETH_RAMROD_RX_QUEUE_START,
06f56b81 666 PROTOCOLID_ETH, &init_data);
cee4d264
MC
667 if (rc)
668 return rc;
669
670 p_ramrod = &p_ent->ramrod.rx_queue_start;
671
3da7a37a
MY
672 p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb);
673 p_ramrod->sb_index = p_cid->abs.sb_idx;
674 p_ramrod->vport_id = p_cid->abs.vport_id;
675 p_ramrod->stats_counter_id = p_cid->abs.stats_id;
676 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
1a635e48
YM
677 p_ramrod->complete_cqe_flg = 0;
678 p_ramrod->complete_event_flg = 1;
cee4d264 679
1a635e48 680 p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
94494598 681 DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
cee4d264 682
1a635e48 683 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
94494598 684 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
cee4d264 685
3da7a37a
MY
686 if (p_cid->is_vf) {
687 p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
351a4ded 688 DP_VERBOSE(p_hwfn, QED_MSG_SP,
a044df83 689 "Queue%s is meant for VF rxq[%02x]\n",
3da7a37a
MY
690 !!p_cid->b_legacy_vf ? " [legacy]" : "",
691 p_cid->vf_qid);
692 p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf;
a044df83 693 }
cee4d264 694
351a4ded 695 return qed_spq_post(p_hwfn, p_ent, NULL);
cee4d264
MC
696}
697
698static int
3da7a37a
MY
699qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn,
700 struct qed_queue_cid *p_cid,
cee4d264
MC
701 u16 bd_max_bytes,
702 dma_addr_t bd_chain_phys_addr,
703 dma_addr_t cqe_pbl_addr,
dacd88d6 704 u16 cqe_pbl_size, void __iomem **pp_prod)
cee4d264 705{
b21290b7 706 u32 init_prod_val = 0;
cee4d264 707
3da7a37a
MY
708 *pp_prod = p_hwfn->regview +
709 GTT_BAR0_MAP_REG_MSDM_RAM +
710 MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
cee4d264
MC
711
712 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
b21290b7 713 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
cee4d264
MC
714 (u32 *)(&init_prod_val));
715
3da7a37a
MY
716 return qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
717 bd_max_bytes,
718 bd_chain_phys_addr,
719 cqe_pbl_addr, cqe_pbl_size);
720}
721
722static int
723qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
724 u16 opaque_fid,
725 struct qed_queue_start_common_params *p_params,
726 u16 bd_max_bytes,
727 dma_addr_t bd_chain_phys_addr,
728 dma_addr_t cqe_pbl_addr,
729 u16 cqe_pbl_size,
730 struct qed_rxq_start_ret_params *p_ret_params)
731{
732 struct qed_queue_cid *p_cid;
733 int rc;
734
cee4d264 735 /* Allocate a CID for the queue */
3da7a37a
MY
736 p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
737 if (!p_cid)
738 return -ENOMEM;
cee4d264 739
3da7a37a
MY
740 if (IS_PF(p_hwfn->cdev)) {
741 rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid,
742 bd_max_bytes,
743 bd_chain_phys_addr,
744 cqe_pbl_addr, cqe_pbl_size,
745 &p_ret_params->p_prod);
746 } else {
747 rc = qed_vf_pf_rxq_start(p_hwfn, p_cid,
cee4d264
MC
748 bd_max_bytes,
749 bd_chain_phys_addr,
3da7a37a
MY
750 cqe_pbl_addr,
751 cqe_pbl_size, &p_ret_params->p_prod);
752 }
cee4d264 753
3da7a37a 754 /* Provide the caller with a reference to as handler */
1a635e48 755 if (rc)
3da7a37a
MY
756 qed_eth_queue_cid_release(p_hwfn, p_cid);
757 else
758 p_ret_params->p_handle = (void *)p_cid;
cee4d264
MC
759
760 return rc;
761}
762
17b235c1 763int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
3da7a37a 764 void **pp_rxq_handles,
17b235c1
YM
765 u8 num_rxqs,
766 u8 complete_cqe_flg,
767 u8 complete_event_flg,
768 enum spq_mode comp_mode,
769 struct qed_spq_comp_cb *p_comp_data)
770{
771 struct rx_queue_update_ramrod_data *p_ramrod = NULL;
772 struct qed_spq_entry *p_ent = NULL;
773 struct qed_sp_init_data init_data;
3da7a37a 774 struct qed_queue_cid *p_cid;
17b235c1
YM
775 int rc = -EINVAL;
776 u8 i;
777
778 memset(&init_data, 0, sizeof(init_data));
779 init_data.comp_mode = comp_mode;
780 init_data.p_comp_data = p_comp_data;
781
782 for (i = 0; i < num_rxqs; i++) {
3da7a37a 783 p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i];
17b235c1
YM
784
785 /* Get SPQ entry */
3da7a37a
MY
786 init_data.cid = p_cid->cid;
787 init_data.opaque_fid = p_cid->opaque_fid;
17b235c1
YM
788
789 rc = qed_sp_init_request(p_hwfn, &p_ent,
790 ETH_RAMROD_RX_QUEUE_UPDATE,
791 PROTOCOLID_ETH, &init_data);
792 if (rc)
793 return rc;
794
795 p_ramrod = &p_ent->ramrod.rx_queue_update;
3da7a37a 796 p_ramrod->vport_id = p_cid->abs.vport_id;
17b235c1 797
3da7a37a 798 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
17b235c1
YM
799 p_ramrod->complete_cqe_flg = complete_cqe_flg;
800 p_ramrod->complete_event_flg = complete_event_flg;
801
802 rc = qed_spq_post(p_hwfn, p_ent, NULL);
803 if (rc)
804 return rc;
805 }
806
807 return rc;
808}
809
3da7a37a
MY
810static int
811qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn,
812 struct qed_queue_cid *p_cid,
813 bool b_eq_completion_only, bool b_cqe_completion)
cee4d264 814{
cee4d264 815 struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
cee4d264 816 struct qed_spq_entry *p_ent = NULL;
06f56b81 817 struct qed_sp_init_data init_data;
3da7a37a 818 int rc;
dacd88d6 819
06f56b81 820 memset(&init_data, 0, sizeof(init_data));
3da7a37a
MY
821 init_data.cid = p_cid->cid;
822 init_data.opaque_fid = p_cid->opaque_fid;
06f56b81 823 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264
MC
824
825 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 826 ETH_RAMROD_RX_QUEUE_STOP,
06f56b81 827 PROTOCOLID_ETH, &init_data);
cee4d264
MC
828 if (rc)
829 return rc;
830
831 p_ramrod = &p_ent->ramrod.rx_queue_stop;
3da7a37a
MY
832 p_ramrod->vport_id = p_cid->abs.vport_id;
833 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
cee4d264
MC
834
835 /* Cleaning the queue requires the completion to arrive there.
836 * In addition, VFs require the answer to come as eqe to PF.
837 */
3da7a37a
MY
838 p_ramrod->complete_cqe_flg = (!p_cid->is_vf &&
839 !b_eq_completion_only) ||
840 b_cqe_completion;
841 p_ramrod->complete_event_flg = p_cid->is_vf || b_eq_completion_only;
cee4d264 842
3da7a37a
MY
843 return qed_spq_post(p_hwfn, p_ent, NULL);
844}
845
846int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
847 void *p_rxq,
848 bool eq_completion_only, bool cqe_completion)
849{
850 struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq;
851 int rc = -EINVAL;
cee4d264 852
3da7a37a
MY
853 if (IS_PF(p_hwfn->cdev))
854 rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid,
855 eq_completion_only,
856 cqe_completion);
857 else
858 rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
859
860 if (!rc)
861 qed_eth_queue_cid_release(p_hwfn, p_cid);
862 return rc;
cee4d264
MC
863}
864
3da7a37a
MY
865int
866qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
867 struct qed_queue_cid *p_cid,
868 dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id)
cee4d264
MC
869{
870 struct tx_queue_start_ramrod_data *p_ramrod = NULL;
cee4d264 871 struct qed_spq_entry *p_ent = NULL;
06f56b81 872 struct qed_sp_init_data init_data;
cee4d264 873 int rc = -EINVAL;
351a4ded 874
06f56b81
YM
875 /* Get SPQ entry */
876 memset(&init_data, 0, sizeof(init_data));
3da7a37a
MY
877 init_data.cid = p_cid->cid;
878 init_data.opaque_fid = p_cid->opaque_fid;
06f56b81 879 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264 880
06f56b81 881 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 882 ETH_RAMROD_TX_QUEUE_START,
06f56b81 883 PROTOCOLID_ETH, &init_data);
cee4d264
MC
884 if (rc)
885 return rc;
886
1a635e48 887 p_ramrod = &p_ent->ramrod.tx_queue_start;
3da7a37a 888 p_ramrod->vport_id = p_cid->abs.vport_id;
1a635e48 889
3da7a37a
MY
890 p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb);
891 p_ramrod->sb_index = p_cid->abs.sb_idx;
892 p_ramrod->stats_counter_id = p_cid->abs.stats_id;
cee4d264 893
3da7a37a
MY
894 p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id);
895 p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id);
cee4d264 896
1a635e48 897 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
94494598 898 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
cee4d264 899
1a635e48 900 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
cee4d264
MC
901
902 return qed_spq_post(p_hwfn, p_ent, NULL);
903}
904
905static int
3da7a37a
MY
906qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
907 struct qed_queue_cid *p_cid,
908 u8 tc,
cee4d264 909 dma_addr_t pbl_addr,
dacd88d6 910 u16 pbl_size, void __iomem **pp_doorbell)
cee4d264 911{
cee4d264 912 union qed_qm_pq_params pq_params;
cee4d264
MC
913 int rc;
914
3da7a37a 915 memset(&pq_params, 0, sizeof(pq_params));
dacd88d6 916
3da7a37a
MY
917 rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
918 pbl_addr, pbl_size,
919 qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH,
920 &pq_params));
cee4d264
MC
921 if (rc)
922 return rc;
923
3da7a37a
MY
924 /* Provide the caller with the necessary return values */
925 *pp_doorbell = p_hwfn->doorbells +
926 qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY);
cee4d264 927
3da7a37a
MY
928 return 0;
929}
cee4d264 930
3da7a37a
MY
931static int
932qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
933 u16 opaque_fid,
934 struct qed_queue_start_common_params *p_params,
935 u8 tc,
936 dma_addr_t pbl_addr,
937 u16 pbl_size,
938 struct qed_txq_start_ret_params *p_ret_params)
939{
940 struct qed_queue_cid *p_cid;
941 int rc;
942
943 p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
944 if (!p_cid)
945 return -EINVAL;
946
947 if (IS_PF(p_hwfn->cdev))
948 rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
949 pbl_addr, pbl_size,
950 &p_ret_params->p_doorbell);
951 else
952 rc = qed_vf_pf_txq_start(p_hwfn, p_cid,
953 pbl_addr, pbl_size,
954 &p_ret_params->p_doorbell);
cee4d264
MC
955
956 if (rc)
3da7a37a
MY
957 qed_eth_queue_cid_release(p_hwfn, p_cid);
958 else
959 p_ret_params->p_handle = (void *)p_cid;
cee4d264
MC
960
961 return rc;
962}
963
3da7a37a
MY
964static int
965qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
cee4d264 966{
cee4d264 967 struct qed_spq_entry *p_ent = NULL;
06f56b81 968 struct qed_sp_init_data init_data;
3da7a37a 969 int rc;
dacd88d6 970
06f56b81 971 memset(&init_data, 0, sizeof(init_data));
3da7a37a
MY
972 init_data.cid = p_cid->cid;
973 init_data.opaque_fid = p_cid->opaque_fid;
06f56b81 974 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264
MC
975
976 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 977 ETH_RAMROD_TX_QUEUE_STOP,
06f56b81 978 PROTOCOLID_ETH, &init_data);
cee4d264
MC
979 if (rc)
980 return rc;
981
3da7a37a
MY
982 return qed_spq_post(p_hwfn, p_ent, NULL);
983}
984
985int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle)
986{
987 struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle;
988 int rc;
989
990 if (IS_PF(p_hwfn->cdev))
991 rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid);
992 else
993 rc = qed_vf_pf_txq_stop(p_hwfn, p_cid);
cee4d264 994
3da7a37a
MY
995 if (!rc)
996 qed_eth_queue_cid_release(p_hwfn, p_cid);
997 return rc;
cee4d264
MC
998}
999
1a635e48 1000static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
cee4d264
MC
1001{
1002 enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
1003
1004 switch (opcode) {
1005 case QED_FILTER_ADD:
1006 action = ETH_FILTER_ACTION_ADD;
1007 break;
1008 case QED_FILTER_REMOVE:
1009 action = ETH_FILTER_ACTION_REMOVE;
1010 break;
cee4d264 1011 case QED_FILTER_FLUSH:
fc48b7a6 1012 action = ETH_FILTER_ACTION_REMOVE_ALL;
cee4d264
MC
1013 break;
1014 default:
1015 action = MAX_ETH_FILTER_ACTION;
1016 }
1017
1018 return action;
1019}
1020
1021static void qed_set_fw_mac_addr(__le16 *fw_msb,
1022 __le16 *fw_mid,
1023 __le16 *fw_lsb,
1024 u8 *mac)
1025{
1026 ((u8 *)fw_msb)[0] = mac[1];
1027 ((u8 *)fw_msb)[1] = mac[0];
1028 ((u8 *)fw_mid)[0] = mac[3];
1029 ((u8 *)fw_mid)[1] = mac[2];
1030 ((u8 *)fw_lsb)[0] = mac[5];
1031 ((u8 *)fw_lsb)[1] = mac[4];
1032}
1033
1034static int
1035qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
1036 u16 opaque_fid,
1037 struct qed_filter_ucast *p_filter_cmd,
1038 struct vport_filter_update_ramrod_data **pp_ramrod,
1039 struct qed_spq_entry **pp_ent,
1040 enum spq_mode comp_mode,
1041 struct qed_spq_comp_cb *p_comp_data)
1042{
1043 u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1044 struct vport_filter_update_ramrod_data *p_ramrod;
cee4d264
MC
1045 struct eth_filter_cmd *p_first_filter;
1046 struct eth_filter_cmd *p_second_filter;
06f56b81 1047 struct qed_sp_init_data init_data;
cee4d264
MC
1048 enum eth_filter_action action;
1049 int rc;
1050
1051 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1052 &vport_to_remove_from);
1053 if (rc)
1054 return rc;
1055
1056 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1057 &vport_to_add_to);
1058 if (rc)
1059 return rc;
1060
06f56b81
YM
1061 /* Get SPQ entry */
1062 memset(&init_data, 0, sizeof(init_data));
1063 init_data.cid = qed_spq_get_cid(p_hwfn);
1064 init_data.opaque_fid = opaque_fid;
1065 init_data.comp_mode = comp_mode;
1066 init_data.p_comp_data = p_comp_data;
cee4d264
MC
1067
1068 rc = qed_sp_init_request(p_hwfn, pp_ent,
cee4d264 1069 ETH_RAMROD_FILTERS_UPDATE,
06f56b81 1070 PROTOCOLID_ETH, &init_data);
cee4d264
MC
1071 if (rc)
1072 return rc;
1073
1074 *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1075 p_ramrod = *pp_ramrod;
1076 p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1077 p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1078
1079 switch (p_filter_cmd->opcode) {
fc48b7a6 1080 case QED_FILTER_REPLACE:
cee4d264
MC
1081 case QED_FILTER_MOVE:
1082 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
1083 default:
1084 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
1085 }
1086
1087 p_first_filter = &p_ramrod->filter_cmds[0];
1088 p_second_filter = &p_ramrod->filter_cmds[1];
1089
1090 switch (p_filter_cmd->type) {
1091 case QED_FILTER_MAC:
1092 p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
1093 case QED_FILTER_VLAN:
1094 p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
1095 case QED_FILTER_MAC_VLAN:
1096 p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
1097 case QED_FILTER_INNER_MAC:
1098 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
1099 case QED_FILTER_INNER_VLAN:
1100 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
1101 case QED_FILTER_INNER_PAIR:
1102 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
1103 case QED_FILTER_INNER_MAC_VNI_PAIR:
1104 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1105 break;
1106 case QED_FILTER_MAC_VNI_PAIR:
1107 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
1108 case QED_FILTER_VNI:
1109 p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
1110 }
1111
1112 if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1113 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1114 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1115 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1116 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1117 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
1118 qed_set_fw_mac_addr(&p_first_filter->mac_msb,
1119 &p_first_filter->mac_mid,
1120 &p_first_filter->mac_lsb,
1121 (u8 *)p_filter_cmd->mac);
1122 }
1123
1124 if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1125 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1126 (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1127 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1128 p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
1129
1130 if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1131 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1132 (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1133 p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
1134
1135 if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
1a635e48
YM
1136 p_second_filter->type = p_first_filter->type;
1137 p_second_filter->mac_msb = p_first_filter->mac_msb;
1138 p_second_filter->mac_mid = p_first_filter->mac_mid;
1139 p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1140 p_second_filter->vlan_id = p_first_filter->vlan_id;
1141 p_second_filter->vni = p_first_filter->vni;
cee4d264
MC
1142
1143 p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1144
1145 p_first_filter->vport_id = vport_to_remove_from;
1146
1a635e48
YM
1147 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1148 p_second_filter->vport_id = vport_to_add_to;
fc48b7a6
YM
1149 } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
1150 p_first_filter->vport_id = vport_to_add_to;
1151 memcpy(p_second_filter, p_first_filter,
1152 sizeof(*p_second_filter));
1153 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
1154 p_second_filter->action = ETH_FILTER_ACTION_ADD;
cee4d264
MC
1155 } else {
1156 action = qed_filter_action(p_filter_cmd->opcode);
1157
1158 if (action == MAX_ETH_FILTER_ACTION) {
1159 DP_NOTICE(p_hwfn,
1160 "%d is not supported yet\n",
1161 p_filter_cmd->opcode);
1162 return -EINVAL;
1163 }
1164
1165 p_first_filter->action = action;
1166 p_first_filter->vport_id = (p_filter_cmd->opcode ==
1167 QED_FILTER_REMOVE) ?
1168 vport_to_remove_from :
1169 vport_to_add_to;
1170 }
1171
1172 return 0;
1173}
1174
dacd88d6
YM
1175int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
1176 u16 opaque_fid,
1177 struct qed_filter_ucast *p_filter_cmd,
1178 enum spq_mode comp_mode,
1179 struct qed_spq_comp_cb *p_comp_data)
cee4d264
MC
1180{
1181 struct vport_filter_update_ramrod_data *p_ramrod = NULL;
1182 struct qed_spq_entry *p_ent = NULL;
1183 struct eth_filter_cmd_header *p_header;
1184 int rc;
1185
1186 rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1187 &p_ramrod, &p_ent,
1188 comp_mode, p_comp_data);
1a635e48 1189 if (rc) {
cee4d264
MC
1190 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1191 return rc;
1192 }
1193 p_header = &p_ramrod->filter_cmd_hdr;
1194 p_header->assert_on_error = p_filter_cmd->assert_on_error;
1195
1196 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1a635e48
YM
1197 if (rc) {
1198 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
cee4d264
MC
1199 return rc;
1200 }
1201
1202 DP_VERBOSE(p_hwfn, QED_MSG_SP,
1203 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1204 (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
1205 ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
1206 "REMOVE" :
1207 ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
1208 "MOVE" : "REPLACE")),
1209 (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
1210 ((p_filter_cmd->type == QED_FILTER_VLAN) ?
1211 "VLAN" : "MAC & VLAN"),
1212 p_ramrod->filter_cmd_hdr.cmd_cnt,
1213 p_filter_cmd->is_rx_filter,
1214 p_filter_cmd->is_tx_filter);
1215 DP_VERBOSE(p_hwfn, QED_MSG_SP,
1216 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1217 p_filter_cmd->vport_to_add_to,
1218 p_filter_cmd->vport_to_remove_from,
1219 p_filter_cmd->mac[0],
1220 p_filter_cmd->mac[1],
1221 p_filter_cmd->mac[2],
1222 p_filter_cmd->mac[3],
1223 p_filter_cmd->mac[4],
1224 p_filter_cmd->mac[5],
1225 p_filter_cmd->vlan);
1226
1227 return 0;
1228}
1229
1230/*******************************************************************************
1231 * Description:
1232 * Calculates crc 32 on a buffer
1233 * Note: crc32_length MUST be aligned to 8
1234 * Return:
1235 ******************************************************************************/
1236static u32 qed_calc_crc32c(u8 *crc32_packet,
1a635e48 1237 u32 crc32_length, u32 crc32_seed, u8 complement)
cee4d264 1238{
1a635e48
YM
1239 u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1240 u8 msb = 0, current_byte = 0;
cee4d264
MC
1241
1242 if ((!crc32_packet) ||
1243 (crc32_length == 0) ||
1244 ((crc32_length % 8) != 0))
1245 return crc32_result;
1246 for (byte = 0; byte < crc32_length; byte++) {
1247 current_byte = crc32_packet[byte];
1248 for (bit = 0; bit < 8; bit++) {
1249 msb = (u8)(crc32_result >> 31);
1250 crc32_result = crc32_result << 1;
1251 if (msb != (0x1 & (current_byte >> bit))) {
1252 crc32_result = crc32_result ^ CRC32_POLY;
1253 crc32_result |= 1; /*crc32_result[0] = 1;*/
1254 }
1255 }
1256 }
1257 return crc32_result;
1258}
1259
1a635e48 1260static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len)
cee4d264
MC
1261{
1262 u32 packet_buf[2] = { 0 };
1263
1264 memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
1265 return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1266}
1267
dacd88d6 1268u8 qed_mcast_bin_from_mac(u8 *mac)
cee4d264
MC
1269{
1270 u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1271 mac, ETH_ALEN);
1272
1273 return crc & 0xff;
1274}
1275
1276static int
1277qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1278 u16 opaque_fid,
1279 struct qed_filter_mcast *p_filter_cmd,
1280 enum spq_mode comp_mode,
1281 struct qed_spq_comp_cb *p_comp_data)
1282{
1283 unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1284 struct vport_update_ramrod_data *p_ramrod = NULL;
cee4d264 1285 struct qed_spq_entry *p_ent = NULL;
06f56b81 1286 struct qed_sp_init_data init_data;
cee4d264
MC
1287 u8 abs_vport_id = 0;
1288 int rc, i;
1289
83aeb933 1290 if (p_filter_cmd->opcode == QED_FILTER_ADD)
cee4d264
MC
1291 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1292 &abs_vport_id);
83aeb933 1293 else
cee4d264
MC
1294 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1295 &abs_vport_id);
83aeb933
YM
1296 if (rc)
1297 return rc;
cee4d264 1298
06f56b81
YM
1299 /* Get SPQ entry */
1300 memset(&init_data, 0, sizeof(init_data));
1301 init_data.cid = qed_spq_get_cid(p_hwfn);
1302 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1303 init_data.comp_mode = comp_mode;
1304 init_data.p_comp_data = p_comp_data;
cee4d264
MC
1305
1306 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 1307 ETH_RAMROD_VPORT_UPDATE,
06f56b81 1308 PROTOCOLID_ETH, &init_data);
cee4d264
MC
1309 if (rc) {
1310 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1311 return rc;
1312 }
1313
1314 p_ramrod = &p_ent->ramrod.vport_update;
1315 p_ramrod->common.update_approx_mcast_flg = 1;
1316
1317 /* explicitly clear out the entire vector */
1318 memset(&p_ramrod->approx_mcast.bins, 0,
1319 sizeof(p_ramrod->approx_mcast.bins));
1320 memset(bins, 0, sizeof(unsigned long) *
1321 ETH_MULTICAST_MAC_BINS_IN_REGS);
1322 /* filter ADD op is explicit set op and it removes
1323 * any existing filters for the vport
1324 */
1325 if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1326 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1327 u32 bit;
1328
1329 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1330 __set_bit(bit, bins);
1331 }
1332
1333 /* Convert to correct endianity */
1334 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1a635e48 1335 struct vport_update_ramrod_mcast *p_ramrod_bins;
cee4d264 1336 u32 *p_bins = (u32 *)bins;
cee4d264 1337
1a635e48
YM
1338 p_ramrod_bins = &p_ramrod->approx_mcast;
1339 p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
cee4d264
MC
1340 }
1341 }
1342
1343 p_ramrod->common.vport_id = abs_vport_id;
1344
1345 return qed_spq_post(p_hwfn, p_ent, NULL);
1346}
1347
dacd88d6
YM
1348static int qed_filter_mcast_cmd(struct qed_dev *cdev,
1349 struct qed_filter_mcast *p_filter_cmd,
1350 enum spq_mode comp_mode,
1351 struct qed_spq_comp_cb *p_comp_data)
cee4d264
MC
1352{
1353 int rc = 0;
1354 int i;
1355
1356 /* only ADD and REMOVE operations are supported for multi-cast */
1357 if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
1358 (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
1359 (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
1360 return -EINVAL;
1361
1362 for_each_hwfn(cdev, i) {
1363 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1364
1365 u16 opaque_fid;
1366
dacd88d6
YM
1367 if (IS_VF(cdev)) {
1368 qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1369 continue;
1370 }
cee4d264
MC
1371
1372 opaque_fid = p_hwfn->hw_info.opaque_fid;
1373
1374 rc = qed_sp_eth_filter_mcast(p_hwfn,
1375 opaque_fid,
1376 p_filter_cmd,
1a635e48 1377 comp_mode, p_comp_data);
cee4d264
MC
1378 }
1379 return rc;
1380}
1381
1382static int qed_filter_ucast_cmd(struct qed_dev *cdev,
1383 struct qed_filter_ucast *p_filter_cmd,
1384 enum spq_mode comp_mode,
1385 struct qed_spq_comp_cb *p_comp_data)
1386{
1387 int rc = 0;
1388 int i;
1389
1390 for_each_hwfn(cdev, i) {
1391 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1392 u16 opaque_fid;
1393
dacd88d6
YM
1394 if (IS_VF(cdev)) {
1395 rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1396 continue;
1397 }
cee4d264
MC
1398
1399 opaque_fid = p_hwfn->hw_info.opaque_fid;
1400
1401 rc = qed_sp_eth_filter_ucast(p_hwfn,
1402 opaque_fid,
1403 p_filter_cmd,
1a635e48
YM
1404 comp_mode, p_comp_data);
1405 if (rc)
dacd88d6 1406 break;
cee4d264
MC
1407 }
1408
1409 return rc;
1410}
1411
86622ee7
YM
1412/* Statistics related code */
1413static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
1414 u32 *p_addr,
dacd88d6 1415 u32 *p_len, u16 statistics_bin)
86622ee7 1416{
dacd88d6
YM
1417 if (IS_PF(p_hwfn->cdev)) {
1418 *p_addr = BAR0_MAP_REG_PSDM_RAM +
1419 PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1420 *p_len = sizeof(struct eth_pstorm_per_queue_stat);
1421 } else {
1422 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1423 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1424
1425 *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1426 *p_len = p_resp->pfdev_info.stats_info.pstats.len;
1427 }
86622ee7
YM
1428}
1429
1430static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
1431 struct qed_ptt *p_ptt,
1432 struct qed_eth_stats *p_stats,
1433 u16 statistics_bin)
1434{
1435 struct eth_pstorm_per_queue_stat pstats;
1436 u32 pstats_addr = 0, pstats_len = 0;
1437
1438 __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1439 statistics_bin);
1440
1441 memset(&pstats, 0, sizeof(pstats));
dacd88d6
YM
1442 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1443
1444 p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1445 p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1446 p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1447 p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1448 p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1449 p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1450 p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
86622ee7
YM
1451}
1452
1453static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
1454 struct qed_ptt *p_ptt,
1455 struct qed_eth_stats *p_stats,
1456 u16 statistics_bin)
1457{
86622ee7 1458 struct tstorm_per_port_stat tstats;
dacd88d6 1459 u32 tstats_addr, tstats_len;
86622ee7 1460
dacd88d6
YM
1461 if (IS_PF(p_hwfn->cdev)) {
1462 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1463 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1464 tstats_len = sizeof(struct tstorm_per_port_stat);
1465 } else {
1466 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1467 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1468
1469 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1470 tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1471 }
86622ee7
YM
1472
1473 memset(&tstats, 0, sizeof(tstats));
dacd88d6 1474 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
86622ee7
YM
1475
1476 p_stats->mftag_filter_discards +=
1477 HILO_64_REGPAIR(tstats.mftag_filter_discard);
1478 p_stats->mac_filter_discards +=
1479 HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1480}
1481
1482static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
1483 u32 *p_addr,
dacd88d6 1484 u32 *p_len, u16 statistics_bin)
86622ee7 1485{
dacd88d6
YM
1486 if (IS_PF(p_hwfn->cdev)) {
1487 *p_addr = BAR0_MAP_REG_USDM_RAM +
1488 USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1489 *p_len = sizeof(struct eth_ustorm_per_queue_stat);
1490 } else {
1491 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1492 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1493
1494 *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1495 *p_len = p_resp->pfdev_info.stats_info.ustats.len;
1496 }
86622ee7
YM
1497}
1498
1499static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
1500 struct qed_ptt *p_ptt,
1501 struct qed_eth_stats *p_stats,
1502 u16 statistics_bin)
1503{
1504 struct eth_ustorm_per_queue_stat ustats;
1505 u32 ustats_addr = 0, ustats_len = 0;
1506
1507 __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1508 statistics_bin);
1509
1510 memset(&ustats, 0, sizeof(ustats));
dacd88d6
YM
1511 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1512
1513 p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1514 p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1515 p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1516 p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1517 p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1518 p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
86622ee7
YM
1519}
1520
1521static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
1522 u32 *p_addr,
dacd88d6 1523 u32 *p_len, u16 statistics_bin)
86622ee7 1524{
dacd88d6
YM
1525 if (IS_PF(p_hwfn->cdev)) {
1526 *p_addr = BAR0_MAP_REG_MSDM_RAM +
1527 MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1528 *p_len = sizeof(struct eth_mstorm_per_queue_stat);
1529 } else {
1530 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1531 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1532
1533 *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1534 *p_len = p_resp->pfdev_info.stats_info.mstats.len;
1535 }
86622ee7
YM
1536}
1537
1538static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
1539 struct qed_ptt *p_ptt,
1540 struct qed_eth_stats *p_stats,
1541 u16 statistics_bin)
1542{
1543 struct eth_mstorm_per_queue_stat mstats;
1544 u32 mstats_addr = 0, mstats_len = 0;
1545
1546 __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1547 statistics_bin);
1548
1549 memset(&mstats, 0, sizeof(mstats));
dacd88d6 1550 qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
86622ee7 1551
dacd88d6 1552 p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
86622ee7
YM
1553 p_stats->packet_too_big_discard +=
1554 HILO_64_REGPAIR(mstats.packet_too_big_discard);
dacd88d6 1555 p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
86622ee7
YM
1556 p_stats->tpa_coalesced_pkts +=
1557 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1558 p_stats->tpa_coalesced_events +=
1559 HILO_64_REGPAIR(mstats.tpa_coalesced_events);
dacd88d6 1560 p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
86622ee7
YM
1561 p_stats->tpa_coalesced_bytes +=
1562 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1563}
1564
1565static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
1566 struct qed_ptt *p_ptt,
1567 struct qed_eth_stats *p_stats)
1568{
1569 struct port_stats port_stats;
1570 int j;
1571
1572 memset(&port_stats, 0, sizeof(port_stats));
1573
1574 qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
1575 p_hwfn->mcp_info->port_addr +
1576 offsetof(struct public_port, stats),
1577 sizeof(port_stats));
1578
351a4ded
YM
1579 p_stats->rx_64_byte_packets += port_stats.eth.r64;
1580 p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127;
1581 p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255;
1582 p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511;
1583 p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1584 p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1585 p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522;
1586 p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047;
1587 p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095;
1588 p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216;
1589 p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383;
1590 p_stats->rx_crc_errors += port_stats.eth.rfcs;
1591 p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf;
1592 p_stats->rx_pause_frames += port_stats.eth.rxpf;
1593 p_stats->rx_pfc_frames += port_stats.eth.rxpp;
1594 p_stats->rx_align_errors += port_stats.eth.raln;
1595 p_stats->rx_carrier_errors += port_stats.eth.rfcr;
1596 p_stats->rx_oversize_packets += port_stats.eth.rovr;
1597 p_stats->rx_jabbers += port_stats.eth.rjbr;
1598 p_stats->rx_undersize_packets += port_stats.eth.rund;
1599 p_stats->rx_fragments += port_stats.eth.rfrg;
1600 p_stats->tx_64_byte_packets += port_stats.eth.t64;
1601 p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127;
1602 p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255;
1603 p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511;
1604 p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1605 p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1606 p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047;
1607 p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095;
1608 p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216;
1609 p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383;
1610 p_stats->tx_pause_frames += port_stats.eth.txpf;
1611 p_stats->tx_pfc_frames += port_stats.eth.txpp;
1612 p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec;
1613 p_stats->tx_total_collisions += port_stats.eth.tncl;
1614 p_stats->rx_mac_bytes += port_stats.eth.rbyte;
1615 p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
1616 p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
1617 p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
1618 p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
1619 p_stats->tx_mac_bytes += port_stats.eth.tbyte;
1620 p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
1621 p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
1622 p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
1623 p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
86622ee7
YM
1624 for (j = 0; j < 8; j++) {
1625 p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
1626 p_stats->brb_discards += port_stats.brb.brb_discard[j];
1627 }
1628}
1629
1630static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
1631 struct qed_ptt *p_ptt,
1632 struct qed_eth_stats *stats,
dacd88d6 1633 u16 statistics_bin, bool b_get_port_stats)
86622ee7
YM
1634{
1635 __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1636 __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1637 __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
1638 __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1639
dacd88d6 1640 if (b_get_port_stats && p_hwfn->mcp_info)
86622ee7
YM
1641 __qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
1642}
1643
1644static void _qed_get_vport_stats(struct qed_dev *cdev,
1645 struct qed_eth_stats *stats)
1646{
dacd88d6
YM
1647 u8 fw_vport = 0;
1648 int i;
86622ee7
YM
1649
1650 memset(stats, 0, sizeof(*stats));
1651
1652 for_each_hwfn(cdev, i) {
1653 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
dacd88d6
YM
1654 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1655 : NULL;
1656
1657 if (IS_PF(cdev)) {
1658 /* The main vport index is relative first */
1659 if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
1660 DP_ERR(p_hwfn, "No vport available!\n");
1661 goto out;
1662 }
86622ee7
YM
1663 }
1664
dacd88d6 1665 if (IS_PF(cdev) && !p_ptt) {
86622ee7
YM
1666 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1667 continue;
1668 }
1669
dacd88d6
YM
1670 __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
1671 IS_PF(cdev) ? true : false);
86622ee7 1672
dacd88d6
YM
1673out:
1674 if (IS_PF(cdev) && p_ptt)
1675 qed_ptt_release(p_hwfn, p_ptt);
86622ee7
YM
1676 }
1677}
1678
1a635e48 1679void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
86622ee7
YM
1680{
1681 u32 i;
1682
1683 if (!cdev) {
1684 memset(stats, 0, sizeof(*stats));
1685 return;
1686 }
1687
1688 _qed_get_vport_stats(cdev, stats);
1689
1690 if (!cdev->reset_stats)
1691 return;
1692
1693 /* Reduce the statistics baseline */
1694 for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
1695 ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
1696}
1697
1698/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1699void qed_reset_vport_stats(struct qed_dev *cdev)
1700{
1701 int i;
1702
1703 for_each_hwfn(cdev, i) {
1704 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1705 struct eth_mstorm_per_queue_stat mstats;
1706 struct eth_ustorm_per_queue_stat ustats;
1707 struct eth_pstorm_per_queue_stat pstats;
dacd88d6
YM
1708 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1709 : NULL;
86622ee7
YM
1710 u32 addr = 0, len = 0;
1711
dacd88d6 1712 if (IS_PF(cdev) && !p_ptt) {
86622ee7
YM
1713 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1714 continue;
1715 }
1716
1717 memset(&mstats, 0, sizeof(mstats));
1718 __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
1719 qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
1720
1721 memset(&ustats, 0, sizeof(ustats));
1722 __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
1723 qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
1724
1725 memset(&pstats, 0, sizeof(pstats));
1726 __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
1727 qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
1728
dacd88d6
YM
1729 if (IS_PF(cdev))
1730 qed_ptt_release(p_hwfn, p_ptt);
86622ee7
YM
1731 }
1732
1733 /* PORT statistics are not necessarily reset, so we need to
1734 * read and create a baseline for future statistics.
1735 */
1736 if (!cdev->reset_stats)
1737 DP_INFO(cdev, "Reset stats not allocated\n");
1738 else
1739 _qed_get_vport_stats(cdev, cdev->reset_stats);
1740}
1741
25c089d7
YM
1742static int qed_fill_eth_dev_info(struct qed_dev *cdev,
1743 struct qed_dev_eth_info *info)
1744{
1745 int i;
1746
1747 memset(info, 0, sizeof(*info));
1748
1749 info->num_tc = 1;
1750
1408cc1f 1751 if (IS_PF(cdev)) {
25eb8d46 1752 int max_vf_vlan_filters = 0;
7b7e70f9 1753 int max_vf_mac_filters = 0;
25eb8d46 1754
1408cc1f 1755 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
e1d32acb
MY
1756 u16 num_queues = 0;
1757
1758 /* Since the feature controls only queue-zones,
1759 * make sure we have the contexts [rx, tx, xdp] to
1760 * match.
1761 */
1762 for_each_hwfn(cdev, i) {
1763 struct qed_hwfn *hwfn = &cdev->hwfns[i];
1764 u16 l2_queues = (u16)FEAT_NUM(hwfn,
1765 QED_PF_L2_QUE);
1766 u16 cids;
1767
1768 cids = hwfn->pf_params.eth_pf_params.num_cons;
1769 num_queues += min_t(u16, l2_queues, cids / 3);
1770 }
1771
1772 /* queues might theoretically be >256, but interrupts'
1773 * upper-limit guarantes that it would fit in a u8.
1774 */
1775 if (cdev->int_params.fp_msix_cnt) {
1776 u8 irqs = cdev->int_params.fp_msix_cnt;
1777
1778 info->num_queues = (u8)min_t(u16,
1779 num_queues, irqs);
1780 }
1408cc1f
YM
1781 } else {
1782 info->num_queues = cdev->num_hwfns;
1783 }
1784
7b7e70f9 1785 if (IS_QED_SRIOV(cdev)) {
25eb8d46
YM
1786 max_vf_vlan_filters = cdev->p_iov_info->total_vfs *
1787 QED_ETH_VF_NUM_VLAN_FILTERS;
7b7e70f9
YM
1788 max_vf_mac_filters = cdev->p_iov_info->total_vfs *
1789 QED_ETH_VF_NUM_MAC_FILTERS;
1790 }
1791 info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
1792 QED_VLAN) -
25eb8d46 1793 max_vf_vlan_filters;
7b7e70f9
YM
1794 info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
1795 QED_MAC) -
1796 max_vf_mac_filters;
25eb8d46 1797
1408cc1f
YM
1798 ether_addr_copy(info->port_mac,
1799 cdev->hwfns[0].hw_info.hw_mac_addr);
25c089d7 1800 } else {
1408cc1f
YM
1801 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues);
1802 if (cdev->num_hwfns > 1) {
1803 u8 queues = 0;
25c089d7 1804
1408cc1f
YM
1805 qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues);
1806 info->num_queues += queues;
1807 }
1808
1809 qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
2edbff8d 1810 (u8 *)&info->num_vlan_filters);
b0fca312
MY
1811 qed_vf_get_num_mac_filters(&cdev->hwfns[0],
1812 (u8 *)&info->num_mac_filters);
1408cc1f 1813 qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
d8c2c7e3
YM
1814
1815 info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi;
1408cc1f 1816 }
25c089d7
YM
1817
1818 qed_fill_dev_info(cdev, &info->common);
1819
1408cc1f
YM
1820 if (IS_VF(cdev))
1821 memset(info->common.hw_mac, 0, ETH_ALEN);
1822
25c089d7
YM
1823 return 0;
1824}
1825
cc875c2e 1826static void qed_register_eth_ops(struct qed_dev *cdev,
1408cc1f 1827 struct qed_eth_cb_ops *ops, void *cookie)
cc875c2e 1828{
1408cc1f
YM
1829 cdev->protocol_ops.eth = ops;
1830 cdev->ops_cookie = cookie;
1831
1832 /* For VF, we start bulletin reading */
1833 if (IS_VF(cdev))
1834 qed_vf_start_iov_wq(cdev);
cc875c2e
YM
1835}
1836
eff16960
YM
1837static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
1838{
1839 if (IS_PF(cdev))
1840 return true;
1841
1842 return qed_vf_check_mac(&cdev->hwfns[0], mac);
1843}
1844
cee4d264 1845static int qed_start_vport(struct qed_dev *cdev,
088c8618 1846 struct qed_start_vport_params *params)
cee4d264
MC
1847{
1848 int rc, i;
1849
1850 for_each_hwfn(cdev, i) {
088c8618 1851 struct qed_sp_vport_start_params start = { 0 };
cee4d264
MC
1852 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1853
088c8618
MC
1854 start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
1855 QED_TPA_MODE_NONE;
1856 start.remove_inner_vlan = params->remove_inner_vlan;
08feecd7 1857 start.only_untagged = true; /* untagged only */
088c8618
MC
1858 start.drop_ttl0 = params->drop_ttl0;
1859 start.opaque_fid = p_hwfn->hw_info.opaque_fid;
1860 start.concrete_fid = p_hwfn->hw_info.concrete_fid;
1861 start.vport_id = params->vport_id;
1862 start.max_buffers_per_cqe = 16;
1863 start.mtu = params->mtu;
1864
1865 rc = qed_sp_vport_start(p_hwfn, &start);
cee4d264
MC
1866 if (rc) {
1867 DP_ERR(cdev, "Failed to start VPORT\n");
1868 return rc;
1869 }
1870
1871 qed_hw_start_fastpath(p_hwfn);
1872
1873 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1874 "Started V-PORT %d with MTU %d\n",
088c8618 1875 start.vport_id, start.mtu);
cee4d264
MC
1876 }
1877
a0d26d5a
YM
1878 if (params->clear_stats)
1879 qed_reset_vport_stats(cdev);
9df2ed04 1880
cee4d264
MC
1881 return 0;
1882}
1883
1a635e48 1884static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id)
cee4d264
MC
1885{
1886 int rc, i;
1887
1888 for_each_hwfn(cdev, i) {
1889 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1890
1891 rc = qed_sp_vport_stop(p_hwfn,
1a635e48 1892 p_hwfn->hw_info.opaque_fid, vport_id);
cee4d264
MC
1893
1894 if (rc) {
1895 DP_ERR(cdev, "Failed to stop VPORT\n");
1896 return rc;
1897 }
1898 }
1899 return 0;
1900}
1901
1902static int qed_update_vport(struct qed_dev *cdev,
1903 struct qed_update_vport_params *params)
1904{
1905 struct qed_sp_vport_update_params sp_params;
1906 struct qed_rss_params sp_rss_params;
1907 int rc, i;
1908
1909 if (!cdev)
1910 return -ENODEV;
1911
1912 memset(&sp_params, 0, sizeof(sp_params));
1913 memset(&sp_rss_params, 0, sizeof(sp_rss_params));
1914
1915 /* Translate protocol params into sp params */
1916 sp_params.vport_id = params->vport_id;
1a635e48
YM
1917 sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
1918 sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
cee4d264
MC
1919 sp_params.vport_active_rx_flg = params->vport_active_flg;
1920 sp_params.vport_active_tx_flg = params->vport_active_flg;
831bfb0e
YM
1921 sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
1922 sp_params.tx_switching_flg = params->tx_switching_flg;
3f9b4a69
YM
1923 sp_params.accept_any_vlan = params->accept_any_vlan;
1924 sp_params.update_accept_any_vlan_flg =
1925 params->update_accept_any_vlan_flg;
cee4d264
MC
1926
1927 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
1928 * We need to re-fix the rss values per engine for CMT.
1929 */
1930 if (cdev->num_hwfns > 1 && params->update_rss_flg) {
1a635e48 1931 struct qed_update_vport_rss_params *rss = &params->rss_params;
cee4d264
MC
1932 int k, max = 0;
1933
1934 /* Find largest entry, since it's possible RSS needs to
1935 * be disabled [in case only 1 queue per-hwfn]
1936 */
1937 for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
1938 max = (max > rss->rss_ind_table[k]) ?
1939 max : rss->rss_ind_table[k];
1940
1941 /* Either fix RSS values or disable RSS */
1942 if (cdev->num_hwfns < max + 1) {
1943 int divisor = (max + cdev->num_hwfns - 1) /
1944 cdev->num_hwfns;
1945
1946 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1947 "CMT - fixing RSS values (modulo %02x)\n",
1948 divisor);
1949
1950 for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
1951 rss->rss_ind_table[k] =
1952 rss->rss_ind_table[k] % divisor;
1953 } else {
1954 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1955 "CMT - 1 queue per-hwfn; Disabling RSS\n");
1956 params->update_rss_flg = 0;
1957 }
1958 }
1959
1960 /* Now, update the RSS configuration for actual configuration */
1961 if (params->update_rss_flg) {
1962 sp_rss_params.update_rss_config = 1;
1963 sp_rss_params.rss_enable = 1;
1964 sp_rss_params.update_rss_capabilities = 1;
1965 sp_rss_params.update_rss_ind_table = 1;
1966 sp_rss_params.update_rss_key = 1;
8c5ebd0c 1967 sp_rss_params.rss_caps = params->rss_params.rss_caps;
cee4d264
MC
1968 sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
1969 memcpy(sp_rss_params.rss_ind_table,
1970 params->rss_params.rss_ind_table,
1971 QED_RSS_IND_TABLE_SIZE * sizeof(u16));
1972 memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
1973 QED_RSS_KEY_SIZE * sizeof(u32));
83aeb933 1974 sp_params.rss_params = &sp_rss_params;
cee4d264 1975 }
cee4d264
MC
1976
1977 for_each_hwfn(cdev, i) {
1978 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1979
1980 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1981 rc = qed_sp_vport_update(p_hwfn, &sp_params,
1982 QED_SPQ_MODE_EBLOCK,
1983 NULL);
1984 if (rc) {
1985 DP_ERR(cdev, "Failed to update VPORT\n");
1986 return rc;
1987 }
1988
1989 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1990 "Updated V-PORT %d: active_flag %d [update %d]\n",
1991 params->vport_id, params->vport_active_flg,
1992 params->update_vport_active_flg);
1993 }
1994
1995 return 0;
1996}
1997
1998static int qed_start_rxq(struct qed_dev *cdev,
3da7a37a
MY
1999 u8 rss_num,
2000 struct qed_queue_start_common_params *p_params,
cee4d264
MC
2001 u16 bd_max_bytes,
2002 dma_addr_t bd_chain_phys_addr,
2003 dma_addr_t cqe_pbl_addr,
2004 u16 cqe_pbl_size,
3da7a37a 2005 struct qed_rxq_start_ret_params *ret_params)
cee4d264 2006{
cee4d264 2007 struct qed_hwfn *p_hwfn;
1a635e48 2008 int rc, hwfn_index;
cee4d264 2009
3da7a37a 2010 hwfn_index = rss_num % cdev->num_hwfns;
cee4d264
MC
2011 p_hwfn = &cdev->hwfns[hwfn_index];
2012
3da7a37a
MY
2013 p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2014 p_params->stats_id = p_params->vport_id;
cee4d264 2015
3da7a37a
MY
2016 rc = qed_eth_rx_queue_start(p_hwfn,
2017 p_hwfn->hw_info.opaque_fid,
2018 p_params,
2019 bd_max_bytes,
2020 bd_chain_phys_addr,
2021 cqe_pbl_addr, cqe_pbl_size, ret_params);
cee4d264 2022 if (rc) {
3da7a37a 2023 DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id);
cee4d264
MC
2024 return rc;
2025 }
2026
2027 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
3da7a37a
MY
2028 "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
2029 p_params->queue_id, rss_num, p_params->vport_id,
2030 p_params->sb);
cee4d264
MC
2031
2032 return 0;
2033}
2034
3da7a37a 2035static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle)
cee4d264
MC
2036{
2037 int rc, hwfn_index;
2038 struct qed_hwfn *p_hwfn;
2039
3da7a37a
MY
2040 hwfn_index = rss_id % cdev->num_hwfns;
2041 p_hwfn = &cdev->hwfns[hwfn_index];
cee4d264 2042
3da7a37a 2043 rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false);
cee4d264 2044 if (rc) {
3da7a37a 2045 DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id);
cee4d264
MC
2046 return rc;
2047 }
2048
2049 return 0;
2050}
2051
2052static int qed_start_txq(struct qed_dev *cdev,
3da7a37a 2053 u8 rss_num,
cee4d264
MC
2054 struct qed_queue_start_common_params *p_params,
2055 dma_addr_t pbl_addr,
2056 u16 pbl_size,
3da7a37a 2057 struct qed_txq_start_ret_params *ret_params)
cee4d264
MC
2058{
2059 struct qed_hwfn *p_hwfn;
2060 int rc, hwfn_index;
2061
3da7a37a
MY
2062 hwfn_index = rss_num % cdev->num_hwfns;
2063 p_hwfn = &cdev->hwfns[hwfn_index];
2064 p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2065 p_params->stats_id = p_params->vport_id;
cee4d264 2066
3da7a37a
MY
2067 rc = qed_eth_tx_queue_start(p_hwfn,
2068 p_hwfn->hw_info.opaque_fid,
2069 p_params, 0,
2070 pbl_addr, pbl_size, ret_params);
cee4d264
MC
2071
2072 if (rc) {
2073 DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
2074 return rc;
2075 }
2076
2077 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
3da7a37a
MY
2078 "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
2079 p_params->queue_id, rss_num, p_params->vport_id,
cee4d264
MC
2080 p_params->sb);
2081
2082 return 0;
2083}
2084
2085#define QED_HW_STOP_RETRY_LIMIT (10)
2086static int qed_fastpath_stop(struct qed_dev *cdev)
2087{
2088 qed_hw_stop_fastpath(cdev);
2089
2090 return 0;
2091}
2092
3da7a37a 2093static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle)
cee4d264
MC
2094{
2095 struct qed_hwfn *p_hwfn;
2096 int rc, hwfn_index;
2097
3da7a37a
MY
2098 hwfn_index = rss_id % cdev->num_hwfns;
2099 p_hwfn = &cdev->hwfns[hwfn_index];
cee4d264 2100
3da7a37a 2101 rc = qed_eth_tx_queue_stop(p_hwfn, handle);
cee4d264 2102 if (rc) {
3da7a37a 2103 DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id);
cee4d264
MC
2104 return rc;
2105 }
2106
2107 return 0;
2108}
2109
464f6645
MC
2110static int qed_tunn_configure(struct qed_dev *cdev,
2111 struct qed_tunn_params *tunn_params)
2112{
2113 struct qed_tunn_update_params tunn_info;
2114 int i, rc;
2115
1408cc1f
YM
2116 if (IS_VF(cdev))
2117 return 0;
2118
464f6645
MC
2119 memset(&tunn_info, 0, sizeof(tunn_info));
2120 if (tunn_params->update_vxlan_port == 1) {
2121 tunn_info.update_vxlan_udp_port = 1;
2122 tunn_info.vxlan_udp_port = tunn_params->vxlan_port;
2123 }
2124
2125 if (tunn_params->update_geneve_port == 1) {
2126 tunn_info.update_geneve_udp_port = 1;
2127 tunn_info.geneve_udp_port = tunn_params->geneve_port;
2128 }
2129
2130 for_each_hwfn(cdev, i) {
2131 struct qed_hwfn *hwfn = &cdev->hwfns[i];
2132
2133 rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
2134 QED_SPQ_MODE_EBLOCK, NULL);
2135
2136 if (rc)
2137 return rc;
2138 }
2139
2140 return 0;
2141}
2142
cee4d264
MC
2143static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
2144 enum qed_filter_rx_mode_type type)
2145{
2146 struct qed_filter_accept_flags accept_flags;
2147
2148 memset(&accept_flags, 0, sizeof(accept_flags));
2149
1a635e48
YM
2150 accept_flags.update_rx_mode_config = 1;
2151 accept_flags.update_tx_mode_config = 1;
2152 accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2153 QED_ACCEPT_MCAST_MATCHED |
2154 QED_ACCEPT_BCAST;
cee4d264
MC
2155 accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2156 QED_ACCEPT_MCAST_MATCHED |
2157 QED_ACCEPT_BCAST;
2158
2159 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC)
2160 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2161 QED_ACCEPT_MCAST_UNMATCHED;
2162 else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC)
2163 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2164
3f9b4a69 2165 return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
cee4d264
MC
2166 QED_SPQ_MODE_CB, NULL);
2167}
2168
2169static int qed_configure_filter_ucast(struct qed_dev *cdev,
2170 struct qed_filter_ucast_params *params)
2171{
2172 struct qed_filter_ucast ucast;
2173
2174 if (!params->vlan_valid && !params->mac_valid) {
1a635e48
YM
2175 DP_NOTICE(cdev,
2176 "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
cee4d264
MC
2177 return -EINVAL;
2178 }
2179
2180 memset(&ucast, 0, sizeof(ucast));
2181 switch (params->type) {
2182 case QED_FILTER_XCAST_TYPE_ADD:
2183 ucast.opcode = QED_FILTER_ADD;
2184 break;
2185 case QED_FILTER_XCAST_TYPE_DEL:
2186 ucast.opcode = QED_FILTER_REMOVE;
2187 break;
2188 case QED_FILTER_XCAST_TYPE_REPLACE:
2189 ucast.opcode = QED_FILTER_REPLACE;
2190 break;
2191 default:
2192 DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
2193 params->type);
2194 }
2195
2196 if (params->vlan_valid && params->mac_valid) {
2197 ucast.type = QED_FILTER_MAC_VLAN;
2198 ether_addr_copy(ucast.mac, params->mac);
2199 ucast.vlan = params->vlan;
2200 } else if (params->mac_valid) {
2201 ucast.type = QED_FILTER_MAC;
2202 ether_addr_copy(ucast.mac, params->mac);
2203 } else {
2204 ucast.type = QED_FILTER_VLAN;
2205 ucast.vlan = params->vlan;
2206 }
2207
2208 ucast.is_rx_filter = true;
2209 ucast.is_tx_filter = true;
2210
2211 return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
2212}
2213
2214static int qed_configure_filter_mcast(struct qed_dev *cdev,
2215 struct qed_filter_mcast_params *params)
2216{
2217 struct qed_filter_mcast mcast;
2218 int i;
2219
2220 memset(&mcast, 0, sizeof(mcast));
2221 switch (params->type) {
2222 case QED_FILTER_XCAST_TYPE_ADD:
2223 mcast.opcode = QED_FILTER_ADD;
2224 break;
2225 case QED_FILTER_XCAST_TYPE_DEL:
2226 mcast.opcode = QED_FILTER_REMOVE;
2227 break;
2228 default:
2229 DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
2230 params->type);
2231 }
2232
2233 mcast.num_mc_addrs = params->num;
2234 for (i = 0; i < mcast.num_mc_addrs; i++)
2235 ether_addr_copy(mcast.mac[i], params->mac[i]);
2236
1a635e48 2237 return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
cee4d264
MC
2238}
2239
2240static int qed_configure_filter(struct qed_dev *cdev,
2241 struct qed_filter_params *params)
2242{
2243 enum qed_filter_rx_mode_type accept_flags;
2244
2245 switch (params->type) {
2246 case QED_FILTER_TYPE_UCAST:
2247 return qed_configure_filter_ucast(cdev, &params->filter.ucast);
2248 case QED_FILTER_TYPE_MCAST:
2249 return qed_configure_filter_mcast(cdev, &params->filter.mcast);
2250 case QED_FILTER_TYPE_RX_MODE:
2251 accept_flags = params->filter.accept_flags;
2252 return qed_configure_filter_rx_mode(cdev, accept_flags);
2253 default:
1a635e48 2254 DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type);
cee4d264
MC
2255 return -EINVAL;
2256 }
2257}
2258
2259static int qed_fp_cqe_completion(struct qed_dev *dev,
1a635e48 2260 u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
cee4d264
MC
2261{
2262 return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
2263 cqe);
2264}
2265
0b55e27d
YM
2266#ifdef CONFIG_QED_SRIOV
2267extern const struct qed_iov_hv_ops qed_iov_ops_pass;
2268#endif
2269
a1d8d8a5
SRK
2270#ifdef CONFIG_DCB
2271extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
2272#endif
2273
25c089d7
YM
2274static const struct qed_eth_ops qed_eth_ops_pass = {
2275 .common = &qed_common_ops_pass,
0b55e27d
YM
2276#ifdef CONFIG_QED_SRIOV
2277 .iov = &qed_iov_ops_pass,
a1d8d8a5
SRK
2278#endif
2279#ifdef CONFIG_DCB
2280 .dcb = &qed_dcbnl_ops_pass,
0b55e27d 2281#endif
25c089d7 2282 .fill_dev_info = &qed_fill_eth_dev_info,
cc875c2e 2283 .register_ops = &qed_register_eth_ops,
eff16960 2284 .check_mac = &qed_check_mac,
cee4d264
MC
2285 .vport_start = &qed_start_vport,
2286 .vport_stop = &qed_stop_vport,
2287 .vport_update = &qed_update_vport,
2288 .q_rx_start = &qed_start_rxq,
2289 .q_rx_stop = &qed_stop_rxq,
2290 .q_tx_start = &qed_start_txq,
2291 .q_tx_stop = &qed_stop_txq,
2292 .filter_config = &qed_configure_filter,
2293 .fastpath_stop = &qed_fastpath_stop,
2294 .eth_cqe_completion = &qed_fp_cqe_completion,
9df2ed04 2295 .get_vport_stats = &qed_get_vport_stats,
464f6645 2296 .tunn_config = &qed_tunn_configure,
25c089d7
YM
2297};
2298
95114344 2299const struct qed_eth_ops *qed_get_eth_ops(void)
25c089d7 2300{
25c089d7
YM
2301 return &qed_eth_ops_pass;
2302}
2303EXPORT_SYMBOL(qed_get_eth_ops);
2304
2305void qed_put_eth_ops(void)
2306{
2307 /* TODO - reference count for module? */
2308}
2309EXPORT_SYMBOL(qed_put_eth_ops);