]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/qlogic/qed/qed_l2.c
qed*: L2 interface to use the SB structures directly
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / qlogic / qed / qed_l2.c
CommitLineData
25c089d7 1/* QLogic qed NIC Driver
e8f1cb50 2 * Copyright (c) 2015-2017 QLogic Corporation
25c089d7 3 *
e8f1cb50
MY
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
25c089d7
YM
31 */
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <asm/param.h>
36#include <linux/delay.h>
37#include <linux/dma-mapping.h>
38#include <linux/etherdevice.h>
39#include <linux/interrupt.h>
40#include <linux/kernel.h>
41#include <linux/module.h>
42#include <linux/pci.h>
43#include <linux/slab.h>
44#include <linux/stddef.h>
45#include <linux/string.h>
25c089d7
YM
46#include <linux/workqueue.h>
47#include <linux/bitops.h>
48#include <linux/bug.h>
3da7a37a 49#include <linux/vmalloc.h>
25c089d7
YM
50#include "qed.h"
51#include <linux/qed/qed_chain.h>
52#include "qed_cxt.h"
53#include "qed_dev_api.h"
54#include <linux/qed/qed_eth_if.h>
55#include "qed_hsi.h"
56#include "qed_hw.h"
57#include "qed_int.h"
dacd88d6 58#include "qed_l2.h"
86622ee7 59#include "qed_mcp.h"
25c089d7
YM
60#include "qed_reg_addr.h"
61#include "qed_sp.h"
1408cc1f 62#include "qed_sriov.h"
25c089d7 63
088c8618 64
cee4d264
MC
65#define QED_MAX_SGES_NUM 16
66#define CRC32_POLY 0x1edc6f41
67
0db711bb
MY
68struct qed_l2_info {
69 u32 queues;
70 unsigned long **pp_qid_usage;
71
72 /* The lock is meant to synchronize access to the qid usage */
73 struct mutex lock;
74};
75
76int qed_l2_alloc(struct qed_hwfn *p_hwfn)
77{
78 struct qed_l2_info *p_l2_info;
79 unsigned long **pp_qids;
80 u32 i;
81
82 if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
83 p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
84 return 0;
85
86 p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL);
87 if (!p_l2_info)
88 return -ENOMEM;
89 p_hwfn->p_l2_info = p_l2_info;
90
91 if (IS_PF(p_hwfn->cdev)) {
92 p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE);
93 } else {
94 u8 rx = 0, tx = 0;
95
96 qed_vf_get_num_rxqs(p_hwfn, &rx);
97 qed_vf_get_num_txqs(p_hwfn, &tx);
98
99 p_l2_info->queues = max_t(u8, rx, tx);
100 }
101
102 pp_qids = kzalloc(sizeof(unsigned long *) * p_l2_info->queues,
103 GFP_KERNEL);
104 if (!pp_qids)
105 return -ENOMEM;
106 p_l2_info->pp_qid_usage = pp_qids;
107
108 for (i = 0; i < p_l2_info->queues; i++) {
109 pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL);
110 if (!pp_qids[i])
111 return -ENOMEM;
112 }
113
114 return 0;
115}
116
117void qed_l2_setup(struct qed_hwfn *p_hwfn)
118{
119 if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
120 p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
121 return;
122
123 mutex_init(&p_hwfn->p_l2_info->lock);
124}
125
126void qed_l2_free(struct qed_hwfn *p_hwfn)
127{
128 u32 i;
129
130 if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
131 p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
132 return;
133
134 if (!p_hwfn->p_l2_info)
135 return;
136
137 if (!p_hwfn->p_l2_info->pp_qid_usage)
138 goto out_l2_info;
139
140 /* Free until hit first uninitialized entry */
141 for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
142 if (!p_hwfn->p_l2_info->pp_qid_usage[i])
143 break;
144 kfree(p_hwfn->p_l2_info->pp_qid_usage[i]);
145 }
146
147 kfree(p_hwfn->p_l2_info->pp_qid_usage);
148
149out_l2_info:
150 kfree(p_hwfn->p_l2_info);
151 p_hwfn->p_l2_info = NULL;
152}
153
3da7a37a
MY
154void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
155 struct qed_queue_cid *p_cid)
156{
157 /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
158 if (!p_cid->is_vf && IS_PF(p_hwfn->cdev))
159 qed_cxt_release_cid(p_hwfn, p_cid->cid);
160 vfree(p_cid);
161}
162
163/* The internal is only meant to be directly called by PFs initializeing CIDs
164 * for their VFs.
165 */
166struct qed_queue_cid *
167_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
168 u16 opaque_fid,
169 u32 cid,
170 u8 vf_qid,
171 struct qed_queue_start_common_params *p_params)
172{
173 bool b_is_same = (p_hwfn->hw_info.opaque_fid == opaque_fid);
174 struct qed_queue_cid *p_cid;
175 int rc;
176
177 p_cid = vmalloc(sizeof(*p_cid));
178 if (!p_cid)
179 return NULL;
180 memset(p_cid, 0, sizeof(*p_cid));
181
182 p_cid->opaque_fid = opaque_fid;
183 p_cid->cid = cid;
184 p_cid->vf_qid = vf_qid;
f29ffdb6 185 p_cid->p_owner = p_hwfn;
3da7a37a 186
f604b17d
MY
187 /* Fill in parameters */
188 p_cid->rel.vport_id = p_params->vport_id;
189 p_cid->rel.queue_id = p_params->queue_id;
190 p_cid->rel.stats_id = p_params->stats_id;
191 p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
192 p_cid->sb_idx = p_params->sb_idx;
193
3da7a37a
MY
194 /* Don't try calculating the absolute indices for VFs */
195 if (IS_VF(p_hwfn->cdev)) {
196 p_cid->abs = p_cid->rel;
197 goto out;
198 }
199
200 /* Calculate the engine-absolute indices of the resources.
201 * This would guarantee they're valid later on.
202 * In some cases [SBs] we already have the right values.
203 */
204 rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
205 if (rc)
206 goto fail;
207
208 rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id);
209 if (rc)
210 goto fail;
211
212 /* In case of a PF configuring its VF's queues, the stats-id is already
213 * absolute [since there's a single index that's suitable per-VF].
214 */
215 if (b_is_same) {
216 rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id,
217 &p_cid->abs.stats_id);
218 if (rc)
219 goto fail;
220 } else {
221 p_cid->abs.stats_id = p_cid->rel.stats_id;
222 }
223
3da7a37a
MY
224 /* This is tricky - we're actually interested in whehter this is a PF
225 * entry meant for the VF.
226 */
227 if (!b_is_same)
228 p_cid->is_vf = true;
229out:
230 DP_VERBOSE(p_hwfn,
231 QED_MSG_SP,
232 "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
233 p_cid->opaque_fid,
234 p_cid->cid,
235 p_cid->rel.vport_id,
236 p_cid->abs.vport_id,
237 p_cid->rel.queue_id,
238 p_cid->abs.queue_id,
239 p_cid->rel.stats_id,
f604b17d 240 p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx);
3da7a37a
MY
241
242 return p_cid;
243
244fail:
245 vfree(p_cid);
246 return NULL;
247}
248
249static struct qed_queue_cid *qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
250 u16 opaque_fid, struct
251 qed_queue_start_common_params
252 *p_params)
253{
254 struct qed_queue_cid *p_cid;
255 u32 cid = 0;
256
257 /* Get a unique firmware CID for this queue, in case it's a PF.
258 * VF's don't need a CID as the queue configuration will be done
259 * by PF.
260 */
261 if (IS_PF(p_hwfn->cdev)) {
262 if (qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &cid)) {
263 DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
264 return NULL;
265 }
266 }
267
268 p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 0, p_params);
269 if (!p_cid && IS_PF(p_hwfn->cdev))
270 qed_cxt_release_cid(p_hwfn, cid);
271
272 return p_cid;
273}
274
dacd88d6
YM
275int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
276 struct qed_sp_vport_start_params *p_params)
cee4d264 277{
cee4d264
MC
278 struct vport_start_ramrod_data *p_ramrod = NULL;
279 struct qed_spq_entry *p_ent = NULL;
06f56b81 280 struct qed_sp_init_data init_data;
dacd88d6 281 u8 abs_vport_id = 0;
cee4d264
MC
282 int rc = -EINVAL;
283 u16 rx_mode = 0;
cee4d264 284
088c8618 285 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
1a635e48 286 if (rc)
cee4d264
MC
287 return rc;
288
06f56b81
YM
289 memset(&init_data, 0, sizeof(init_data));
290 init_data.cid = qed_spq_get_cid(p_hwfn);
088c8618 291 init_data.opaque_fid = p_params->opaque_fid;
06f56b81 292 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264
MC
293
294 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 295 ETH_RAMROD_VPORT_START,
06f56b81 296 PROTOCOLID_ETH, &init_data);
cee4d264
MC
297 if (rc)
298 return rc;
299
300 p_ramrod = &p_ent->ramrod.vport_start;
301 p_ramrod->vport_id = abs_vport_id;
302
088c8618 303 p_ramrod->mtu = cpu_to_le16(p_params->mtu);
c78c70fa 304 p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
088c8618
MC
305 p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
306 p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
e6bd8923 307 p_ramrod->untagged = p_params->only_untagged;
cee4d264
MC
308
309 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
310 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
311
312 p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
313
314 /* TPA related fields */
1a635e48 315 memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param));
cee4d264 316
088c8618
MC
317 p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
318
319 switch (p_params->tpa_mode) {
320 case QED_TPA_MODE_GRO:
321 p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
322 p_ramrod->tpa_param.tpa_max_size = (u16)-1;
323 p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
324 p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
325 p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
326 p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
327 p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
328 p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
329 break;
330 default:
331 break;
332 }
333
831bfb0e
YM
334 p_ramrod->tx_switching_en = p_params->tx_switching;
335
11a85d75
YM
336 p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
337 p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
338
cee4d264
MC
339 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
340 p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
088c8618 341 p_params->concrete_fid);
cee4d264
MC
342
343 return qed_spq_post(p_hwfn, p_ent, NULL);
344}
345
ba56947a
BX
346static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
347 struct qed_sp_vport_start_params *p_params)
dacd88d6
YM
348{
349 if (IS_VF(p_hwfn->cdev)) {
350 return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
351 p_params->mtu,
352 p_params->remove_inner_vlan,
353 p_params->tpa_mode,
08feecd7
YM
354 p_params->max_buffers_per_cqe,
355 p_params->only_untagged);
dacd88d6
YM
356 }
357
358 return qed_sp_eth_vport_start(p_hwfn, p_params);
359}
360
cee4d264
MC
361static int
362qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
363 struct vport_update_ramrod_data *p_ramrod,
f29ffdb6 364 struct qed_rss_params *p_rss)
cee4d264 365{
f29ffdb6
MY
366 struct eth_vport_rss_config *p_config;
367 u16 capabilities = 0;
368 int i, table_size;
369 int rc = 0;
cee4d264 370
f29ffdb6 371 if (!p_rss) {
cee4d264
MC
372 p_ramrod->common.update_rss_flg = 0;
373 return rc;
374 }
f29ffdb6 375 p_config = &p_ramrod->rss_config;
cee4d264 376
f29ffdb6 377 BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM);
cee4d264 378
f29ffdb6 379 rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
cee4d264
MC
380 if (rc)
381 return rc;
382
f29ffdb6
MY
383 p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
384 p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
385 p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
386 p_config->update_rss_key = p_rss->update_rss_key;
cee4d264 387
f29ffdb6
MY
388 p_config->rss_mode = p_rss->rss_enable ?
389 ETH_VPORT_RSS_MODE_REGULAR :
390 ETH_VPORT_RSS_MODE_DISABLED;
cee4d264
MC
391
392 SET_FIELD(capabilities,
393 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
f29ffdb6 394 !!(p_rss->rss_caps & QED_RSS_IPV4));
cee4d264
MC
395 SET_FIELD(capabilities,
396 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
f29ffdb6 397 !!(p_rss->rss_caps & QED_RSS_IPV6));
cee4d264
MC
398 SET_FIELD(capabilities,
399 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
f29ffdb6 400 !!(p_rss->rss_caps & QED_RSS_IPV4_TCP));
cee4d264
MC
401 SET_FIELD(capabilities,
402 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
f29ffdb6 403 !!(p_rss->rss_caps & QED_RSS_IPV6_TCP));
cee4d264
MC
404 SET_FIELD(capabilities,
405 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
f29ffdb6 406 !!(p_rss->rss_caps & QED_RSS_IPV4_UDP));
cee4d264
MC
407 SET_FIELD(capabilities,
408 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
f29ffdb6
MY
409 !!(p_rss->rss_caps & QED_RSS_IPV6_UDP));
410 p_config->tbl_size = p_rss->rss_table_size_log;
cee4d264 411
f29ffdb6 412 p_config->capabilities = cpu_to_le16(capabilities);
cee4d264
MC
413
414 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
415 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
416 p_ramrod->common.update_rss_flg,
f29ffdb6
MY
417 p_config->rss_mode,
418 p_config->update_rss_capabilities,
419 p_config->capabilities,
420 p_config->update_rss_ind_table, p_config->update_rss_key);
cee4d264 421
f29ffdb6
MY
422 table_size = min_t(int, QED_RSS_IND_TABLE_SIZE,
423 1 << p_config->tbl_size);
424 for (i = 0; i < table_size; i++) {
425 struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i];
cee4d264 426
f29ffdb6
MY
427 if (!p_queue)
428 return -EINVAL;
429
430 p_config->indirection_table[i] =
431 cpu_to_le16(p_queue->abs.queue_id);
432 }
433
434 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
435 "Configured RSS indirection table [%d entries]:\n",
436 table_size);
437 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) {
438 DP_VERBOSE(p_hwfn,
439 NETIF_MSG_IFUP,
440 "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
441 le16_to_cpu(p_config->indirection_table[i]),
442 le16_to_cpu(p_config->indirection_table[i + 1]),
443 le16_to_cpu(p_config->indirection_table[i + 2]),
444 le16_to_cpu(p_config->indirection_table[i + 3]),
445 le16_to_cpu(p_config->indirection_table[i + 4]),
446 le16_to_cpu(p_config->indirection_table[i + 5]),
447 le16_to_cpu(p_config->indirection_table[i + 6]),
448 le16_to_cpu(p_config->indirection_table[i + 7]),
449 le16_to_cpu(p_config->indirection_table[i + 8]),
450 le16_to_cpu(p_config->indirection_table[i + 9]),
451 le16_to_cpu(p_config->indirection_table[i + 10]),
452 le16_to_cpu(p_config->indirection_table[i + 11]),
453 le16_to_cpu(p_config->indirection_table[i + 12]),
454 le16_to_cpu(p_config->indirection_table[i + 13]),
455 le16_to_cpu(p_config->indirection_table[i + 14]),
456 le16_to_cpu(p_config->indirection_table[i + 15]));
cee4d264
MC
457 }
458
459 for (i = 0; i < 10; i++)
f29ffdb6 460 p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]);
cee4d264
MC
461
462 return rc;
463}
464
465static void
466qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
467 struct vport_update_ramrod_data *p_ramrod,
468 struct qed_filter_accept_flags accept_flags)
469{
470 p_ramrod->common.update_rx_mode_flg =
471 accept_flags.update_rx_mode_config;
472
473 p_ramrod->common.update_tx_mode_flg =
474 accept_flags.update_tx_mode_config;
475
476 /* Set Rx mode accept flags */
477 if (p_ramrod->common.update_rx_mode_flg) {
478 u8 accept_filter = accept_flags.rx_accept_filter;
479 u16 state = 0;
480
481 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
482 !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
483 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
484
485 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
486 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
487
488 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
489 !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
490 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
491
492 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
493 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
494 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
495
496 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
497 !!(accept_filter & QED_ACCEPT_BCAST));
498
499 p_ramrod->rx_mode.state = cpu_to_le16(state);
500 DP_VERBOSE(p_hwfn, QED_MSG_SP,
501 "p_ramrod->rx_mode.state = 0x%x\n", state);
502 }
503
504 /* Set Tx mode accept flags */
505 if (p_ramrod->common.update_tx_mode_flg) {
506 u8 accept_filter = accept_flags.tx_accept_filter;
507 u16 state = 0;
508
509 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
510 !!(accept_filter & QED_ACCEPT_NONE));
511
cee4d264
MC
512 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
513 !!(accept_filter & QED_ACCEPT_NONE));
514
515 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
516 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
517 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
518
519 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
520 !!(accept_filter & QED_ACCEPT_BCAST));
521
522 p_ramrod->tx_mode.state = cpu_to_le16(state);
523 DP_VERBOSE(p_hwfn, QED_MSG_SP,
524 "p_ramrod->tx_mode.state = 0x%x\n", state);
525 }
526}
527
17b235c1
YM
528static void
529qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
530 struct vport_update_ramrod_data *p_ramrod,
531 struct qed_sge_tpa_params *p_params)
532{
533 struct eth_vport_tpa_param *p_tpa;
534
535 if (!p_params) {
536 p_ramrod->common.update_tpa_param_flg = 0;
537 p_ramrod->common.update_tpa_en_flg = 0;
538 p_ramrod->common.update_tpa_param_flg = 0;
539 return;
540 }
541
542 p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
543 p_tpa = &p_ramrod->tpa_param;
544 p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
545 p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
546 p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
547 p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
548
549 p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
550 p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
551 p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
552 p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
553 p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
554 p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
555 p_tpa->tpa_max_size = p_params->tpa_max_size;
556 p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
557 p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
558}
559
cee4d264
MC
560static void
561qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
562 struct vport_update_ramrod_data *p_ramrod,
563 struct qed_sp_vport_update_params *p_params)
564{
565 int i;
566
567 memset(&p_ramrod->approx_mcast.bins, 0,
568 sizeof(p_ramrod->approx_mcast.bins));
569
83aeb933
YM
570 if (!p_params->update_approx_mcast_flg)
571 return;
cee4d264 572
83aeb933
YM
573 p_ramrod->common.update_approx_mcast_flg = 1;
574 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
575 u32 *p_bins = (u32 *)p_params->bins;
576
577 p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
cee4d264
MC
578 }
579}
580
dacd88d6
YM
581int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
582 struct qed_sp_vport_update_params *p_params,
583 enum spq_mode comp_mode,
584 struct qed_spq_comp_cb *p_comp_data)
cee4d264
MC
585{
586 struct qed_rss_params *p_rss_params = p_params->rss_params;
587 struct vport_update_ramrod_data_cmn *p_cmn;
06f56b81 588 struct qed_sp_init_data init_data;
cee4d264
MC
589 struct vport_update_ramrod_data *p_ramrod = NULL;
590 struct qed_spq_entry *p_ent = NULL;
17b235c1 591 u8 abs_vport_id = 0, val;
cee4d264
MC
592 int rc = -EINVAL;
593
dacd88d6
YM
594 if (IS_VF(p_hwfn->cdev)) {
595 rc = qed_vf_pf_vport_update(p_hwfn, p_params);
596 return rc;
597 }
598
cee4d264 599 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
1a635e48 600 if (rc)
cee4d264
MC
601 return rc;
602
06f56b81
YM
603 memset(&init_data, 0, sizeof(init_data));
604 init_data.cid = qed_spq_get_cid(p_hwfn);
605 init_data.opaque_fid = p_params->opaque_fid;
606 init_data.comp_mode = comp_mode;
607 init_data.p_comp_data = p_comp_data;
cee4d264
MC
608
609 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 610 ETH_RAMROD_VPORT_UPDATE,
06f56b81 611 PROTOCOLID_ETH, &init_data);
cee4d264
MC
612 if (rc)
613 return rc;
614
615 /* Copy input params to ramrod according to FW struct */
616 p_ramrod = &p_ent->ramrod.vport_update;
617 p_cmn = &p_ramrod->common;
618
619 p_cmn->vport_id = abs_vport_id;
620 p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
621 p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
622 p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
623 p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
3f9b4a69 624 p_cmn->accept_any_vlan = p_params->accept_any_vlan;
83aeb933
YM
625 val = p_params->update_accept_any_vlan_flg;
626 p_cmn->update_accept_any_vlan_flg = val;
17b235c1
YM
627
628 p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
629 val = p_params->update_inner_vlan_removal_flg;
630 p_cmn->update_inner_vlan_removal_en_flg = val;
08feecd7
YM
631
632 p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
633 val = p_params->update_default_vlan_enable_flg;
634 p_cmn->update_default_vlan_en_flg = val;
635
636 p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
637 p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
638
639 p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
640
17b235c1
YM
641 p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
642 p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
643
6ddc7608
YM
644 p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
645 val = p_params->update_anti_spoofing_en_flg;
646 p_ramrod->common.update_anti_spoofing_en_flg = val;
647
cee4d264
MC
648 rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
649 if (rc) {
650 /* Return spq entry which is taken in qed_sp_init_request()*/
651 qed_spq_return_entry(p_hwfn, p_ent);
652 return rc;
653 }
654
655 /* Update mcast bins for VFs, PF doesn't use this functionality */
656 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
657
658 qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
17b235c1 659 qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
cee4d264
MC
660 return qed_spq_post(p_hwfn, p_ent, NULL);
661}
662
dacd88d6 663int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
cee4d264 664{
cee4d264 665 struct vport_stop_ramrod_data *p_ramrod;
06f56b81 666 struct qed_sp_init_data init_data;
cee4d264
MC
667 struct qed_spq_entry *p_ent;
668 u8 abs_vport_id = 0;
669 int rc;
670
dacd88d6
YM
671 if (IS_VF(p_hwfn->cdev))
672 return qed_vf_pf_vport_stop(p_hwfn);
673
cee4d264 674 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
1a635e48 675 if (rc)
cee4d264
MC
676 return rc;
677
06f56b81
YM
678 memset(&init_data, 0, sizeof(init_data));
679 init_data.cid = qed_spq_get_cid(p_hwfn);
680 init_data.opaque_fid = opaque_fid;
681 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264
MC
682
683 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 684 ETH_RAMROD_VPORT_STOP,
06f56b81 685 PROTOCOLID_ETH, &init_data);
cee4d264
MC
686 if (rc)
687 return rc;
688
689 p_ramrod = &p_ent->ramrod.vport_stop;
690 p_ramrod->vport_id = abs_vport_id;
691
692 return qed_spq_post(p_hwfn, p_ent, NULL);
693}
694
dacd88d6
YM
695static int
696qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
697 struct qed_filter_accept_flags *p_accept_flags)
698{
699 struct qed_sp_vport_update_params s_params;
700
701 memset(&s_params, 0, sizeof(s_params));
702 memcpy(&s_params.accept_flags, p_accept_flags,
703 sizeof(struct qed_filter_accept_flags));
704
705 return qed_vf_pf_vport_update(p_hwfn, &s_params);
706}
707
cee4d264
MC
708static int qed_filter_accept_cmd(struct qed_dev *cdev,
709 u8 vport,
710 struct qed_filter_accept_flags accept_flags,
3f9b4a69
YM
711 u8 update_accept_any_vlan,
712 u8 accept_any_vlan,
dacd88d6
YM
713 enum spq_mode comp_mode,
714 struct qed_spq_comp_cb *p_comp_data)
cee4d264
MC
715{
716 struct qed_sp_vport_update_params vport_update_params;
717 int i, rc;
718
719 /* Prepare and send the vport rx_mode change */
720 memset(&vport_update_params, 0, sizeof(vport_update_params));
721 vport_update_params.vport_id = vport;
722 vport_update_params.accept_flags = accept_flags;
3f9b4a69
YM
723 vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
724 vport_update_params.accept_any_vlan = accept_any_vlan;
cee4d264
MC
725
726 for_each_hwfn(cdev, i) {
727 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
728
729 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
730
dacd88d6
YM
731 if (IS_VF(cdev)) {
732 rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
733 if (rc)
734 return rc;
735 continue;
736 }
737
cee4d264
MC
738 rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
739 comp_mode, p_comp_data);
1a635e48 740 if (rc) {
cee4d264
MC
741 DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
742 return rc;
743 }
744
745 DP_VERBOSE(p_hwfn, QED_MSG_SP,
746 "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
747 accept_flags.rx_accept_filter,
748 accept_flags.tx_accept_filter);
3f9b4a69
YM
749 if (update_accept_any_vlan)
750 DP_VERBOSE(p_hwfn, QED_MSG_SP,
751 "accept_any_vlan=%d configured\n",
752 accept_any_vlan);
cee4d264
MC
753 }
754
755 return 0;
756}
757
3da7a37a
MY
758int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
759 struct qed_queue_cid *p_cid,
760 u16 bd_max_bytes,
761 dma_addr_t bd_chain_phys_addr,
762 dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
cee4d264
MC
763{
764 struct rx_queue_start_ramrod_data *p_ramrod = NULL;
cee4d264 765 struct qed_spq_entry *p_ent = NULL;
06f56b81 766 struct qed_sp_init_data init_data;
cee4d264
MC
767 int rc = -EINVAL;
768
cee4d264 769 DP_VERBOSE(p_hwfn, QED_MSG_SP,
3da7a37a
MY
770 "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
771 p_cid->opaque_fid, p_cid->cid,
f604b17d 772 p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id);
cee4d264 773
06f56b81
YM
774 /* Get SPQ entry */
775 memset(&init_data, 0, sizeof(init_data));
3da7a37a
MY
776 init_data.cid = p_cid->cid;
777 init_data.opaque_fid = p_cid->opaque_fid;
06f56b81 778 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264
MC
779
780 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 781 ETH_RAMROD_RX_QUEUE_START,
06f56b81 782 PROTOCOLID_ETH, &init_data);
cee4d264
MC
783 if (rc)
784 return rc;
785
786 p_ramrod = &p_ent->ramrod.rx_queue_start;
787
f604b17d
MY
788 p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
789 p_ramrod->sb_index = p_cid->sb_idx;
3da7a37a
MY
790 p_ramrod->vport_id = p_cid->abs.vport_id;
791 p_ramrod->stats_counter_id = p_cid->abs.stats_id;
792 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
1a635e48
YM
793 p_ramrod->complete_cqe_flg = 0;
794 p_ramrod->complete_event_flg = 1;
cee4d264 795
1a635e48 796 p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
94494598 797 DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
cee4d264 798
1a635e48 799 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
94494598 800 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
cee4d264 801
3da7a37a
MY
802 if (p_cid->is_vf) {
803 p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
351a4ded 804 DP_VERBOSE(p_hwfn, QED_MSG_SP,
a044df83 805 "Queue%s is meant for VF rxq[%02x]\n",
3da7a37a
MY
806 !!p_cid->b_legacy_vf ? " [legacy]" : "",
807 p_cid->vf_qid);
808 p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf;
a044df83 809 }
cee4d264 810
351a4ded 811 return qed_spq_post(p_hwfn, p_ent, NULL);
cee4d264
MC
812}
813
814static int
3da7a37a
MY
815qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn,
816 struct qed_queue_cid *p_cid,
cee4d264
MC
817 u16 bd_max_bytes,
818 dma_addr_t bd_chain_phys_addr,
819 dma_addr_t cqe_pbl_addr,
dacd88d6 820 u16 cqe_pbl_size, void __iomem **pp_prod)
cee4d264 821{
b21290b7 822 u32 init_prod_val = 0;
cee4d264 823
3da7a37a
MY
824 *pp_prod = p_hwfn->regview +
825 GTT_BAR0_MAP_REG_MSDM_RAM +
826 MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
cee4d264
MC
827
828 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
b21290b7 829 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
cee4d264
MC
830 (u32 *)(&init_prod_val));
831
3da7a37a
MY
832 return qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
833 bd_max_bytes,
834 bd_chain_phys_addr,
835 cqe_pbl_addr, cqe_pbl_size);
836}
837
838static int
839qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
840 u16 opaque_fid,
841 struct qed_queue_start_common_params *p_params,
842 u16 bd_max_bytes,
843 dma_addr_t bd_chain_phys_addr,
844 dma_addr_t cqe_pbl_addr,
845 u16 cqe_pbl_size,
846 struct qed_rxq_start_ret_params *p_ret_params)
847{
848 struct qed_queue_cid *p_cid;
849 int rc;
850
cee4d264 851 /* Allocate a CID for the queue */
3da7a37a
MY
852 p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
853 if (!p_cid)
854 return -ENOMEM;
cee4d264 855
3da7a37a
MY
856 if (IS_PF(p_hwfn->cdev)) {
857 rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid,
858 bd_max_bytes,
859 bd_chain_phys_addr,
860 cqe_pbl_addr, cqe_pbl_size,
861 &p_ret_params->p_prod);
862 } else {
863 rc = qed_vf_pf_rxq_start(p_hwfn, p_cid,
cee4d264
MC
864 bd_max_bytes,
865 bd_chain_phys_addr,
3da7a37a
MY
866 cqe_pbl_addr,
867 cqe_pbl_size, &p_ret_params->p_prod);
868 }
cee4d264 869
3da7a37a 870 /* Provide the caller with a reference to as handler */
1a635e48 871 if (rc)
3da7a37a
MY
872 qed_eth_queue_cid_release(p_hwfn, p_cid);
873 else
874 p_ret_params->p_handle = (void *)p_cid;
cee4d264
MC
875
876 return rc;
877}
878
17b235c1 879int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
3da7a37a 880 void **pp_rxq_handles,
17b235c1
YM
881 u8 num_rxqs,
882 u8 complete_cqe_flg,
883 u8 complete_event_flg,
884 enum spq_mode comp_mode,
885 struct qed_spq_comp_cb *p_comp_data)
886{
887 struct rx_queue_update_ramrod_data *p_ramrod = NULL;
888 struct qed_spq_entry *p_ent = NULL;
889 struct qed_sp_init_data init_data;
3da7a37a 890 struct qed_queue_cid *p_cid;
17b235c1
YM
891 int rc = -EINVAL;
892 u8 i;
893
894 memset(&init_data, 0, sizeof(init_data));
895 init_data.comp_mode = comp_mode;
896 init_data.p_comp_data = p_comp_data;
897
898 for (i = 0; i < num_rxqs; i++) {
3da7a37a 899 p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i];
17b235c1
YM
900
901 /* Get SPQ entry */
3da7a37a
MY
902 init_data.cid = p_cid->cid;
903 init_data.opaque_fid = p_cid->opaque_fid;
17b235c1
YM
904
905 rc = qed_sp_init_request(p_hwfn, &p_ent,
906 ETH_RAMROD_RX_QUEUE_UPDATE,
907 PROTOCOLID_ETH, &init_data);
908 if (rc)
909 return rc;
910
911 p_ramrod = &p_ent->ramrod.rx_queue_update;
3da7a37a 912 p_ramrod->vport_id = p_cid->abs.vport_id;
17b235c1 913
3da7a37a 914 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
17b235c1
YM
915 p_ramrod->complete_cqe_flg = complete_cqe_flg;
916 p_ramrod->complete_event_flg = complete_event_flg;
917
918 rc = qed_spq_post(p_hwfn, p_ent, NULL);
919 if (rc)
920 return rc;
921 }
922
923 return rc;
924}
925
3da7a37a
MY
926static int
927qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn,
928 struct qed_queue_cid *p_cid,
929 bool b_eq_completion_only, bool b_cqe_completion)
cee4d264 930{
cee4d264 931 struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
cee4d264 932 struct qed_spq_entry *p_ent = NULL;
06f56b81 933 struct qed_sp_init_data init_data;
3da7a37a 934 int rc;
dacd88d6 935
06f56b81 936 memset(&init_data, 0, sizeof(init_data));
3da7a37a
MY
937 init_data.cid = p_cid->cid;
938 init_data.opaque_fid = p_cid->opaque_fid;
06f56b81 939 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264
MC
940
941 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 942 ETH_RAMROD_RX_QUEUE_STOP,
06f56b81 943 PROTOCOLID_ETH, &init_data);
cee4d264
MC
944 if (rc)
945 return rc;
946
947 p_ramrod = &p_ent->ramrod.rx_queue_stop;
3da7a37a
MY
948 p_ramrod->vport_id = p_cid->abs.vport_id;
949 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
cee4d264
MC
950
951 /* Cleaning the queue requires the completion to arrive there.
952 * In addition, VFs require the answer to come as eqe to PF.
953 */
3da7a37a
MY
954 p_ramrod->complete_cqe_flg = (!p_cid->is_vf &&
955 !b_eq_completion_only) ||
956 b_cqe_completion;
957 p_ramrod->complete_event_flg = p_cid->is_vf || b_eq_completion_only;
cee4d264 958
3da7a37a
MY
959 return qed_spq_post(p_hwfn, p_ent, NULL);
960}
961
962int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
963 void *p_rxq,
964 bool eq_completion_only, bool cqe_completion)
965{
966 struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq;
967 int rc = -EINVAL;
cee4d264 968
3da7a37a
MY
969 if (IS_PF(p_hwfn->cdev))
970 rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid,
971 eq_completion_only,
972 cqe_completion);
973 else
974 rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
975
976 if (!rc)
977 qed_eth_queue_cid_release(p_hwfn, p_cid);
978 return rc;
cee4d264
MC
979}
980
3da7a37a
MY
981int
982qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
983 struct qed_queue_cid *p_cid,
984 dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id)
cee4d264
MC
985{
986 struct tx_queue_start_ramrod_data *p_ramrod = NULL;
cee4d264 987 struct qed_spq_entry *p_ent = NULL;
06f56b81 988 struct qed_sp_init_data init_data;
cee4d264 989 int rc = -EINVAL;
351a4ded 990
06f56b81
YM
991 /* Get SPQ entry */
992 memset(&init_data, 0, sizeof(init_data));
3da7a37a
MY
993 init_data.cid = p_cid->cid;
994 init_data.opaque_fid = p_cid->opaque_fid;
06f56b81 995 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264 996
06f56b81 997 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 998 ETH_RAMROD_TX_QUEUE_START,
06f56b81 999 PROTOCOLID_ETH, &init_data);
cee4d264
MC
1000 if (rc)
1001 return rc;
1002
1a635e48 1003 p_ramrod = &p_ent->ramrod.tx_queue_start;
3da7a37a 1004 p_ramrod->vport_id = p_cid->abs.vport_id;
1a635e48 1005
f604b17d
MY
1006 p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
1007 p_ramrod->sb_index = p_cid->sb_idx;
3da7a37a 1008 p_ramrod->stats_counter_id = p_cid->abs.stats_id;
cee4d264 1009
3da7a37a
MY
1010 p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id);
1011 p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id);
cee4d264 1012
1a635e48 1013 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
94494598 1014 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
cee4d264 1015
1a635e48 1016 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
cee4d264
MC
1017
1018 return qed_spq_post(p_hwfn, p_ent, NULL);
1019}
1020
1021static int
3da7a37a
MY
1022qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
1023 struct qed_queue_cid *p_cid,
1024 u8 tc,
cee4d264 1025 dma_addr_t pbl_addr,
dacd88d6 1026 u16 pbl_size, void __iomem **pp_doorbell)
cee4d264 1027{
cee4d264
MC
1028 int rc;
1029
dacd88d6 1030
3da7a37a
MY
1031 rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
1032 pbl_addr, pbl_size,
b5a9ee7c 1033 qed_get_cm_pq_idx_mcos(p_hwfn, tc));
cee4d264
MC
1034 if (rc)
1035 return rc;
1036
3da7a37a
MY
1037 /* Provide the caller with the necessary return values */
1038 *pp_doorbell = p_hwfn->doorbells +
1039 qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY);
cee4d264 1040
3da7a37a
MY
1041 return 0;
1042}
cee4d264 1043
3da7a37a
MY
1044static int
1045qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
1046 u16 opaque_fid,
1047 struct qed_queue_start_common_params *p_params,
1048 u8 tc,
1049 dma_addr_t pbl_addr,
1050 u16 pbl_size,
1051 struct qed_txq_start_ret_params *p_ret_params)
1052{
1053 struct qed_queue_cid *p_cid;
1054 int rc;
1055
1056 p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
1057 if (!p_cid)
1058 return -EINVAL;
1059
1060 if (IS_PF(p_hwfn->cdev))
1061 rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
1062 pbl_addr, pbl_size,
1063 &p_ret_params->p_doorbell);
1064 else
1065 rc = qed_vf_pf_txq_start(p_hwfn, p_cid,
1066 pbl_addr, pbl_size,
1067 &p_ret_params->p_doorbell);
cee4d264
MC
1068
1069 if (rc)
3da7a37a
MY
1070 qed_eth_queue_cid_release(p_hwfn, p_cid);
1071 else
1072 p_ret_params->p_handle = (void *)p_cid;
cee4d264
MC
1073
1074 return rc;
1075}
1076
3da7a37a
MY
1077static int
1078qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
cee4d264 1079{
cee4d264 1080 struct qed_spq_entry *p_ent = NULL;
06f56b81 1081 struct qed_sp_init_data init_data;
3da7a37a 1082 int rc;
dacd88d6 1083
06f56b81 1084 memset(&init_data, 0, sizeof(init_data));
3da7a37a
MY
1085 init_data.cid = p_cid->cid;
1086 init_data.opaque_fid = p_cid->opaque_fid;
06f56b81 1087 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264
MC
1088
1089 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 1090 ETH_RAMROD_TX_QUEUE_STOP,
06f56b81 1091 PROTOCOLID_ETH, &init_data);
cee4d264
MC
1092 if (rc)
1093 return rc;
1094
3da7a37a
MY
1095 return qed_spq_post(p_hwfn, p_ent, NULL);
1096}
1097
1098int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle)
1099{
1100 struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle;
1101 int rc;
1102
1103 if (IS_PF(p_hwfn->cdev))
1104 rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid);
1105 else
1106 rc = qed_vf_pf_txq_stop(p_hwfn, p_cid);
cee4d264 1107
3da7a37a
MY
1108 if (!rc)
1109 qed_eth_queue_cid_release(p_hwfn, p_cid);
1110 return rc;
cee4d264
MC
1111}
1112
1a635e48 1113static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
cee4d264
MC
1114{
1115 enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
1116
1117 switch (opcode) {
1118 case QED_FILTER_ADD:
1119 action = ETH_FILTER_ACTION_ADD;
1120 break;
1121 case QED_FILTER_REMOVE:
1122 action = ETH_FILTER_ACTION_REMOVE;
1123 break;
cee4d264 1124 case QED_FILTER_FLUSH:
fc48b7a6 1125 action = ETH_FILTER_ACTION_REMOVE_ALL;
cee4d264
MC
1126 break;
1127 default:
1128 action = MAX_ETH_FILTER_ACTION;
1129 }
1130
1131 return action;
1132}
1133
1134static void qed_set_fw_mac_addr(__le16 *fw_msb,
1135 __le16 *fw_mid,
1136 __le16 *fw_lsb,
1137 u8 *mac)
1138{
1139 ((u8 *)fw_msb)[0] = mac[1];
1140 ((u8 *)fw_msb)[1] = mac[0];
1141 ((u8 *)fw_mid)[0] = mac[3];
1142 ((u8 *)fw_mid)[1] = mac[2];
1143 ((u8 *)fw_lsb)[0] = mac[5];
1144 ((u8 *)fw_lsb)[1] = mac[4];
1145}
1146
1147static int
1148qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
1149 u16 opaque_fid,
1150 struct qed_filter_ucast *p_filter_cmd,
1151 struct vport_filter_update_ramrod_data **pp_ramrod,
1152 struct qed_spq_entry **pp_ent,
1153 enum spq_mode comp_mode,
1154 struct qed_spq_comp_cb *p_comp_data)
1155{
1156 u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1157 struct vport_filter_update_ramrod_data *p_ramrod;
cee4d264
MC
1158 struct eth_filter_cmd *p_first_filter;
1159 struct eth_filter_cmd *p_second_filter;
06f56b81 1160 struct qed_sp_init_data init_data;
cee4d264
MC
1161 enum eth_filter_action action;
1162 int rc;
1163
1164 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1165 &vport_to_remove_from);
1166 if (rc)
1167 return rc;
1168
1169 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1170 &vport_to_add_to);
1171 if (rc)
1172 return rc;
1173
06f56b81
YM
1174 /* Get SPQ entry */
1175 memset(&init_data, 0, sizeof(init_data));
1176 init_data.cid = qed_spq_get_cid(p_hwfn);
1177 init_data.opaque_fid = opaque_fid;
1178 init_data.comp_mode = comp_mode;
1179 init_data.p_comp_data = p_comp_data;
cee4d264
MC
1180
1181 rc = qed_sp_init_request(p_hwfn, pp_ent,
cee4d264 1182 ETH_RAMROD_FILTERS_UPDATE,
06f56b81 1183 PROTOCOLID_ETH, &init_data);
cee4d264
MC
1184 if (rc)
1185 return rc;
1186
1187 *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1188 p_ramrod = *pp_ramrod;
1189 p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1190 p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1191
1192 switch (p_filter_cmd->opcode) {
fc48b7a6 1193 case QED_FILTER_REPLACE:
cee4d264
MC
1194 case QED_FILTER_MOVE:
1195 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
1196 default:
1197 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
1198 }
1199
1200 p_first_filter = &p_ramrod->filter_cmds[0];
1201 p_second_filter = &p_ramrod->filter_cmds[1];
1202
1203 switch (p_filter_cmd->type) {
1204 case QED_FILTER_MAC:
1205 p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
1206 case QED_FILTER_VLAN:
1207 p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
1208 case QED_FILTER_MAC_VLAN:
1209 p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
1210 case QED_FILTER_INNER_MAC:
1211 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
1212 case QED_FILTER_INNER_VLAN:
1213 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
1214 case QED_FILTER_INNER_PAIR:
1215 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
1216 case QED_FILTER_INNER_MAC_VNI_PAIR:
1217 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1218 break;
1219 case QED_FILTER_MAC_VNI_PAIR:
1220 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
1221 case QED_FILTER_VNI:
1222 p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
1223 }
1224
1225 if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1226 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1227 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1228 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1229 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1230 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
1231 qed_set_fw_mac_addr(&p_first_filter->mac_msb,
1232 &p_first_filter->mac_mid,
1233 &p_first_filter->mac_lsb,
1234 (u8 *)p_filter_cmd->mac);
1235 }
1236
1237 if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1238 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1239 (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1240 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1241 p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
1242
1243 if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1244 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1245 (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1246 p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
1247
1248 if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
1a635e48
YM
1249 p_second_filter->type = p_first_filter->type;
1250 p_second_filter->mac_msb = p_first_filter->mac_msb;
1251 p_second_filter->mac_mid = p_first_filter->mac_mid;
1252 p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1253 p_second_filter->vlan_id = p_first_filter->vlan_id;
1254 p_second_filter->vni = p_first_filter->vni;
cee4d264
MC
1255
1256 p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1257
1258 p_first_filter->vport_id = vport_to_remove_from;
1259
1a635e48
YM
1260 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1261 p_second_filter->vport_id = vport_to_add_to;
fc48b7a6
YM
1262 } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
1263 p_first_filter->vport_id = vport_to_add_to;
1264 memcpy(p_second_filter, p_first_filter,
1265 sizeof(*p_second_filter));
1266 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
1267 p_second_filter->action = ETH_FILTER_ACTION_ADD;
cee4d264
MC
1268 } else {
1269 action = qed_filter_action(p_filter_cmd->opcode);
1270
1271 if (action == MAX_ETH_FILTER_ACTION) {
1272 DP_NOTICE(p_hwfn,
1273 "%d is not supported yet\n",
1274 p_filter_cmd->opcode);
1275 return -EINVAL;
1276 }
1277
1278 p_first_filter->action = action;
1279 p_first_filter->vport_id = (p_filter_cmd->opcode ==
1280 QED_FILTER_REMOVE) ?
1281 vport_to_remove_from :
1282 vport_to_add_to;
1283 }
1284
1285 return 0;
1286}
1287
dacd88d6
YM
1288int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
1289 u16 opaque_fid,
1290 struct qed_filter_ucast *p_filter_cmd,
1291 enum spq_mode comp_mode,
1292 struct qed_spq_comp_cb *p_comp_data)
cee4d264
MC
1293{
1294 struct vport_filter_update_ramrod_data *p_ramrod = NULL;
1295 struct qed_spq_entry *p_ent = NULL;
1296 struct eth_filter_cmd_header *p_header;
1297 int rc;
1298
1299 rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1300 &p_ramrod, &p_ent,
1301 comp_mode, p_comp_data);
1a635e48 1302 if (rc) {
cee4d264
MC
1303 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1304 return rc;
1305 }
1306 p_header = &p_ramrod->filter_cmd_hdr;
1307 p_header->assert_on_error = p_filter_cmd->assert_on_error;
1308
1309 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1a635e48
YM
1310 if (rc) {
1311 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
cee4d264
MC
1312 return rc;
1313 }
1314
1315 DP_VERBOSE(p_hwfn, QED_MSG_SP,
1316 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1317 (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
1318 ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
1319 "REMOVE" :
1320 ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
1321 "MOVE" : "REPLACE")),
1322 (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
1323 ((p_filter_cmd->type == QED_FILTER_VLAN) ?
1324 "VLAN" : "MAC & VLAN"),
1325 p_ramrod->filter_cmd_hdr.cmd_cnt,
1326 p_filter_cmd->is_rx_filter,
1327 p_filter_cmd->is_tx_filter);
1328 DP_VERBOSE(p_hwfn, QED_MSG_SP,
1329 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1330 p_filter_cmd->vport_to_add_to,
1331 p_filter_cmd->vport_to_remove_from,
1332 p_filter_cmd->mac[0],
1333 p_filter_cmd->mac[1],
1334 p_filter_cmd->mac[2],
1335 p_filter_cmd->mac[3],
1336 p_filter_cmd->mac[4],
1337 p_filter_cmd->mac[5],
1338 p_filter_cmd->vlan);
1339
1340 return 0;
1341}
1342
1343/*******************************************************************************
1344 * Description:
1345 * Calculates crc 32 on a buffer
1346 * Note: crc32_length MUST be aligned to 8
1347 * Return:
1348 ******************************************************************************/
1349static u32 qed_calc_crc32c(u8 *crc32_packet,
1a635e48 1350 u32 crc32_length, u32 crc32_seed, u8 complement)
cee4d264 1351{
1a635e48
YM
1352 u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1353 u8 msb = 0, current_byte = 0;
cee4d264
MC
1354
1355 if ((!crc32_packet) ||
1356 (crc32_length == 0) ||
1357 ((crc32_length % 8) != 0))
1358 return crc32_result;
1359 for (byte = 0; byte < crc32_length; byte++) {
1360 current_byte = crc32_packet[byte];
1361 for (bit = 0; bit < 8; bit++) {
1362 msb = (u8)(crc32_result >> 31);
1363 crc32_result = crc32_result << 1;
1364 if (msb != (0x1 & (current_byte >> bit))) {
1365 crc32_result = crc32_result ^ CRC32_POLY;
1366 crc32_result |= 1; /*crc32_result[0] = 1;*/
1367 }
1368 }
1369 }
1370 return crc32_result;
1371}
1372
1a635e48 1373static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len)
cee4d264
MC
1374{
1375 u32 packet_buf[2] = { 0 };
1376
1377 memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
1378 return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1379}
1380
dacd88d6 1381u8 qed_mcast_bin_from_mac(u8 *mac)
cee4d264
MC
1382{
1383 u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1384 mac, ETH_ALEN);
1385
1386 return crc & 0xff;
1387}
1388
1389static int
1390qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1391 u16 opaque_fid,
1392 struct qed_filter_mcast *p_filter_cmd,
1393 enum spq_mode comp_mode,
1394 struct qed_spq_comp_cb *p_comp_data)
1395{
1396 unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1397 struct vport_update_ramrod_data *p_ramrod = NULL;
cee4d264 1398 struct qed_spq_entry *p_ent = NULL;
06f56b81 1399 struct qed_sp_init_data init_data;
cee4d264
MC
1400 u8 abs_vport_id = 0;
1401 int rc, i;
1402
83aeb933 1403 if (p_filter_cmd->opcode == QED_FILTER_ADD)
cee4d264
MC
1404 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1405 &abs_vport_id);
83aeb933 1406 else
cee4d264
MC
1407 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1408 &abs_vport_id);
83aeb933
YM
1409 if (rc)
1410 return rc;
cee4d264 1411
06f56b81
YM
1412 /* Get SPQ entry */
1413 memset(&init_data, 0, sizeof(init_data));
1414 init_data.cid = qed_spq_get_cid(p_hwfn);
1415 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1416 init_data.comp_mode = comp_mode;
1417 init_data.p_comp_data = p_comp_data;
cee4d264
MC
1418
1419 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 1420 ETH_RAMROD_VPORT_UPDATE,
06f56b81 1421 PROTOCOLID_ETH, &init_data);
cee4d264
MC
1422 if (rc) {
1423 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1424 return rc;
1425 }
1426
1427 p_ramrod = &p_ent->ramrod.vport_update;
1428 p_ramrod->common.update_approx_mcast_flg = 1;
1429
1430 /* explicitly clear out the entire vector */
1431 memset(&p_ramrod->approx_mcast.bins, 0,
1432 sizeof(p_ramrod->approx_mcast.bins));
1433 memset(bins, 0, sizeof(unsigned long) *
1434 ETH_MULTICAST_MAC_BINS_IN_REGS);
1435 /* filter ADD op is explicit set op and it removes
1436 * any existing filters for the vport
1437 */
1438 if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1439 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1440 u32 bit;
1441
1442 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1443 __set_bit(bit, bins);
1444 }
1445
1446 /* Convert to correct endianity */
1447 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1a635e48 1448 struct vport_update_ramrod_mcast *p_ramrod_bins;
cee4d264 1449 u32 *p_bins = (u32 *)bins;
cee4d264 1450
1a635e48
YM
1451 p_ramrod_bins = &p_ramrod->approx_mcast;
1452 p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
cee4d264
MC
1453 }
1454 }
1455
1456 p_ramrod->common.vport_id = abs_vport_id;
1457
1458 return qed_spq_post(p_hwfn, p_ent, NULL);
1459}
1460
dacd88d6
YM
1461static int qed_filter_mcast_cmd(struct qed_dev *cdev,
1462 struct qed_filter_mcast *p_filter_cmd,
1463 enum spq_mode comp_mode,
1464 struct qed_spq_comp_cb *p_comp_data)
cee4d264
MC
1465{
1466 int rc = 0;
1467 int i;
1468
1469 /* only ADD and REMOVE operations are supported for multi-cast */
1470 if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
1471 (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
1472 (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
1473 return -EINVAL;
1474
1475 for_each_hwfn(cdev, i) {
1476 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1477
1478 u16 opaque_fid;
1479
dacd88d6
YM
1480 if (IS_VF(cdev)) {
1481 qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1482 continue;
1483 }
cee4d264
MC
1484
1485 opaque_fid = p_hwfn->hw_info.opaque_fid;
1486
1487 rc = qed_sp_eth_filter_mcast(p_hwfn,
1488 opaque_fid,
1489 p_filter_cmd,
1a635e48 1490 comp_mode, p_comp_data);
cee4d264
MC
1491 }
1492 return rc;
1493}
1494
1495static int qed_filter_ucast_cmd(struct qed_dev *cdev,
1496 struct qed_filter_ucast *p_filter_cmd,
1497 enum spq_mode comp_mode,
1498 struct qed_spq_comp_cb *p_comp_data)
1499{
1500 int rc = 0;
1501 int i;
1502
1503 for_each_hwfn(cdev, i) {
1504 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1505 u16 opaque_fid;
1506
dacd88d6
YM
1507 if (IS_VF(cdev)) {
1508 rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1509 continue;
1510 }
cee4d264
MC
1511
1512 opaque_fid = p_hwfn->hw_info.opaque_fid;
1513
1514 rc = qed_sp_eth_filter_ucast(p_hwfn,
1515 opaque_fid,
1516 p_filter_cmd,
1a635e48
YM
1517 comp_mode, p_comp_data);
1518 if (rc)
dacd88d6 1519 break;
cee4d264
MC
1520 }
1521
1522 return rc;
1523}
1524
86622ee7
YM
1525/* Statistics related code */
1526static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
1527 u32 *p_addr,
dacd88d6 1528 u32 *p_len, u16 statistics_bin)
86622ee7 1529{
dacd88d6
YM
1530 if (IS_PF(p_hwfn->cdev)) {
1531 *p_addr = BAR0_MAP_REG_PSDM_RAM +
1532 PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1533 *p_len = sizeof(struct eth_pstorm_per_queue_stat);
1534 } else {
1535 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1536 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1537
1538 *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1539 *p_len = p_resp->pfdev_info.stats_info.pstats.len;
1540 }
86622ee7
YM
1541}
1542
1543static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
1544 struct qed_ptt *p_ptt,
1545 struct qed_eth_stats *p_stats,
1546 u16 statistics_bin)
1547{
1548 struct eth_pstorm_per_queue_stat pstats;
1549 u32 pstats_addr = 0, pstats_len = 0;
1550
1551 __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1552 statistics_bin);
1553
1554 memset(&pstats, 0, sizeof(pstats));
dacd88d6
YM
1555 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1556
9c79ddaa
MY
1557 p_stats->common.tx_ucast_bytes +=
1558 HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1559 p_stats->common.tx_mcast_bytes +=
1560 HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1561 p_stats->common.tx_bcast_bytes +=
1562 HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1563 p_stats->common.tx_ucast_pkts +=
1564 HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1565 p_stats->common.tx_mcast_pkts +=
1566 HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1567 p_stats->common.tx_bcast_pkts +=
1568 HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1569 p_stats->common.tx_err_drop_pkts +=
1570 HILO_64_REGPAIR(pstats.error_drop_pkts);
86622ee7
YM
1571}
1572
1573static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
1574 struct qed_ptt *p_ptt,
1575 struct qed_eth_stats *p_stats,
1576 u16 statistics_bin)
1577{
86622ee7 1578 struct tstorm_per_port_stat tstats;
dacd88d6 1579 u32 tstats_addr, tstats_len;
86622ee7 1580
dacd88d6
YM
1581 if (IS_PF(p_hwfn->cdev)) {
1582 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1583 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1584 tstats_len = sizeof(struct tstorm_per_port_stat);
1585 } else {
1586 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1587 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1588
1589 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1590 tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1591 }
86622ee7
YM
1592
1593 memset(&tstats, 0, sizeof(tstats));
dacd88d6 1594 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
86622ee7 1595
9c79ddaa
MY
1596 p_stats->common.mftag_filter_discards +=
1597 HILO_64_REGPAIR(tstats.mftag_filter_discard);
1598 p_stats->common.mac_filter_discards +=
1599 HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
86622ee7
YM
1600}
1601
1602static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
1603 u32 *p_addr,
dacd88d6 1604 u32 *p_len, u16 statistics_bin)
86622ee7 1605{
dacd88d6
YM
1606 if (IS_PF(p_hwfn->cdev)) {
1607 *p_addr = BAR0_MAP_REG_USDM_RAM +
1608 USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1609 *p_len = sizeof(struct eth_ustorm_per_queue_stat);
1610 } else {
1611 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1612 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1613
1614 *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1615 *p_len = p_resp->pfdev_info.stats_info.ustats.len;
1616 }
86622ee7
YM
1617}
1618
1619static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
1620 struct qed_ptt *p_ptt,
1621 struct qed_eth_stats *p_stats,
1622 u16 statistics_bin)
1623{
1624 struct eth_ustorm_per_queue_stat ustats;
1625 u32 ustats_addr = 0, ustats_len = 0;
1626
1627 __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1628 statistics_bin);
1629
1630 memset(&ustats, 0, sizeof(ustats));
dacd88d6
YM
1631 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1632
9c79ddaa
MY
1633 p_stats->common.rx_ucast_bytes +=
1634 HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1635 p_stats->common.rx_mcast_bytes +=
1636 HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1637 p_stats->common.rx_bcast_bytes +=
1638 HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1639 p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1640 p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1641 p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
86622ee7
YM
1642}
1643
1644static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
1645 u32 *p_addr,
dacd88d6 1646 u32 *p_len, u16 statistics_bin)
86622ee7 1647{
dacd88d6
YM
1648 if (IS_PF(p_hwfn->cdev)) {
1649 *p_addr = BAR0_MAP_REG_MSDM_RAM +
1650 MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1651 *p_len = sizeof(struct eth_mstorm_per_queue_stat);
1652 } else {
1653 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1654 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1655
1656 *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1657 *p_len = p_resp->pfdev_info.stats_info.mstats.len;
1658 }
86622ee7
YM
1659}
1660
1661static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
1662 struct qed_ptt *p_ptt,
1663 struct qed_eth_stats *p_stats,
1664 u16 statistics_bin)
1665{
1666 struct eth_mstorm_per_queue_stat mstats;
1667 u32 mstats_addr = 0, mstats_len = 0;
1668
1669 __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1670 statistics_bin);
1671
1672 memset(&mstats, 0, sizeof(mstats));
dacd88d6 1673 qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
86622ee7 1674
9c79ddaa
MY
1675 p_stats->common.no_buff_discards +=
1676 HILO_64_REGPAIR(mstats.no_buff_discard);
1677 p_stats->common.packet_too_big_discard +=
1678 HILO_64_REGPAIR(mstats.packet_too_big_discard);
1679 p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
1680 p_stats->common.tpa_coalesced_pkts +=
1681 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1682 p_stats->common.tpa_coalesced_events +=
1683 HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1684 p_stats->common.tpa_aborts_num +=
1685 HILO_64_REGPAIR(mstats.tpa_aborts_num);
1686 p_stats->common.tpa_coalesced_bytes +=
1687 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
86622ee7
YM
1688}
1689
1690static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
1691 struct qed_ptt *p_ptt,
1692 struct qed_eth_stats *p_stats)
1693{
9c79ddaa 1694 struct qed_eth_stats_common *p_common = &p_stats->common;
86622ee7
YM
1695 struct port_stats port_stats;
1696 int j;
1697
1698 memset(&port_stats, 0, sizeof(port_stats));
1699
1700 qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
1701 p_hwfn->mcp_info->port_addr +
1702 offsetof(struct public_port, stats),
1703 sizeof(port_stats));
1704
9c79ddaa
MY
1705 p_common->rx_64_byte_packets += port_stats.eth.r64;
1706 p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
1707 p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
1708 p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
1709 p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1710 p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1711 p_common->rx_crc_errors += port_stats.eth.rfcs;
1712 p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
1713 p_common->rx_pause_frames += port_stats.eth.rxpf;
1714 p_common->rx_pfc_frames += port_stats.eth.rxpp;
1715 p_common->rx_align_errors += port_stats.eth.raln;
1716 p_common->rx_carrier_errors += port_stats.eth.rfcr;
1717 p_common->rx_oversize_packets += port_stats.eth.rovr;
1718 p_common->rx_jabbers += port_stats.eth.rjbr;
1719 p_common->rx_undersize_packets += port_stats.eth.rund;
1720 p_common->rx_fragments += port_stats.eth.rfrg;
1721 p_common->tx_64_byte_packets += port_stats.eth.t64;
1722 p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
1723 p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
1724 p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
1725 p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1726 p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1727 p_common->tx_pause_frames += port_stats.eth.txpf;
1728 p_common->tx_pfc_frames += port_stats.eth.txpp;
1729 p_common->rx_mac_bytes += port_stats.eth.rbyte;
1730 p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
1731 p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
1732 p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
1733 p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
1734 p_common->tx_mac_bytes += port_stats.eth.tbyte;
1735 p_common->tx_mac_uc_packets += port_stats.eth.txuca;
1736 p_common->tx_mac_mc_packets += port_stats.eth.txmca;
1737 p_common->tx_mac_bc_packets += port_stats.eth.txbca;
1738 p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
86622ee7 1739 for (j = 0; j < 8; j++) {
9c79ddaa
MY
1740 p_common->brb_truncates += port_stats.brb.brb_truncate[j];
1741 p_common->brb_discards += port_stats.brb.brb_discard[j];
1742 }
1743
1744 if (QED_IS_BB(p_hwfn->cdev)) {
1745 struct qed_eth_stats_bb *p_bb = &p_stats->bb;
1746
1747 p_bb->rx_1519_to_1522_byte_packets +=
1748 port_stats.eth.u0.bb0.r1522;
1749 p_bb->rx_1519_to_2047_byte_packets +=
1750 port_stats.eth.u0.bb0.r2047;
1751 p_bb->rx_2048_to_4095_byte_packets +=
1752 port_stats.eth.u0.bb0.r4095;
1753 p_bb->rx_4096_to_9216_byte_packets +=
1754 port_stats.eth.u0.bb0.r9216;
1755 p_bb->rx_9217_to_16383_byte_packets +=
1756 port_stats.eth.u0.bb0.r16383;
1757 p_bb->tx_1519_to_2047_byte_packets +=
1758 port_stats.eth.u1.bb1.t2047;
1759 p_bb->tx_2048_to_4095_byte_packets +=
1760 port_stats.eth.u1.bb1.t4095;
1761 p_bb->tx_4096_to_9216_byte_packets +=
1762 port_stats.eth.u1.bb1.t9216;
1763 p_bb->tx_9217_to_16383_byte_packets +=
1764 port_stats.eth.u1.bb1.t16383;
1765 p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
1766 p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
1767 } else {
1768 struct qed_eth_stats_ah *p_ah = &p_stats->ah;
1769
1770 p_ah->rx_1519_to_max_byte_packets +=
1771 port_stats.eth.u0.ah0.r1519_to_max;
1772 p_ah->tx_1519_to_max_byte_packets =
1773 port_stats.eth.u1.ah1.t1519_to_max;
86622ee7
YM
1774 }
1775}
1776
1777static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
1778 struct qed_ptt *p_ptt,
1779 struct qed_eth_stats *stats,
dacd88d6 1780 u16 statistics_bin, bool b_get_port_stats)
86622ee7
YM
1781{
1782 __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1783 __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1784 __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
1785 __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1786
dacd88d6 1787 if (b_get_port_stats && p_hwfn->mcp_info)
86622ee7
YM
1788 __qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
1789}
1790
1791static void _qed_get_vport_stats(struct qed_dev *cdev,
1792 struct qed_eth_stats *stats)
1793{
dacd88d6
YM
1794 u8 fw_vport = 0;
1795 int i;
86622ee7
YM
1796
1797 memset(stats, 0, sizeof(*stats));
1798
1799 for_each_hwfn(cdev, i) {
1800 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
dacd88d6
YM
1801 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1802 : NULL;
1803
1804 if (IS_PF(cdev)) {
1805 /* The main vport index is relative first */
1806 if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
1807 DP_ERR(p_hwfn, "No vport available!\n");
1808 goto out;
1809 }
86622ee7
YM
1810 }
1811
dacd88d6 1812 if (IS_PF(cdev) && !p_ptt) {
86622ee7
YM
1813 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1814 continue;
1815 }
1816
dacd88d6
YM
1817 __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
1818 IS_PF(cdev) ? true : false);
86622ee7 1819
dacd88d6
YM
1820out:
1821 if (IS_PF(cdev) && p_ptt)
1822 qed_ptt_release(p_hwfn, p_ptt);
86622ee7
YM
1823 }
1824}
1825
1a635e48 1826void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
86622ee7
YM
1827{
1828 u32 i;
1829
1830 if (!cdev) {
1831 memset(stats, 0, sizeof(*stats));
1832 return;
1833 }
1834
1835 _qed_get_vport_stats(cdev, stats);
1836
1837 if (!cdev->reset_stats)
1838 return;
1839
1840 /* Reduce the statistics baseline */
1841 for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
1842 ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
1843}
1844
1845/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1846void qed_reset_vport_stats(struct qed_dev *cdev)
1847{
1848 int i;
1849
1850 for_each_hwfn(cdev, i) {
1851 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1852 struct eth_mstorm_per_queue_stat mstats;
1853 struct eth_ustorm_per_queue_stat ustats;
1854 struct eth_pstorm_per_queue_stat pstats;
dacd88d6
YM
1855 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1856 : NULL;
86622ee7
YM
1857 u32 addr = 0, len = 0;
1858
dacd88d6 1859 if (IS_PF(cdev) && !p_ptt) {
86622ee7
YM
1860 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1861 continue;
1862 }
1863
1864 memset(&mstats, 0, sizeof(mstats));
1865 __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
1866 qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
1867
1868 memset(&ustats, 0, sizeof(ustats));
1869 __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
1870 qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
1871
1872 memset(&pstats, 0, sizeof(pstats));
1873 __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
1874 qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
1875
dacd88d6
YM
1876 if (IS_PF(cdev))
1877 qed_ptt_release(p_hwfn, p_ptt);
86622ee7
YM
1878 }
1879
1880 /* PORT statistics are not necessarily reset, so we need to
1881 * read and create a baseline for future statistics.
1882 */
1883 if (!cdev->reset_stats)
1884 DP_INFO(cdev, "Reset stats not allocated\n");
1885 else
1886 _qed_get_vport_stats(cdev, cdev->reset_stats);
1887}
1888
d51e4af5
CM
1889static void
1890qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1891 struct qed_arfs_config_params *p_cfg_params)
1892{
1893 if (p_cfg_params->arfs_enable) {
1894 qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
1895 p_cfg_params->tcp, p_cfg_params->udp,
1896 p_cfg_params->ipv4, p_cfg_params->ipv6);
1897 DP_VERBOSE(p_hwfn, QED_MSG_SP,
1898 "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
1899 p_cfg_params->tcp ? "Enable" : "Disable",
1900 p_cfg_params->udp ? "Enable" : "Disable",
1901 p_cfg_params->ipv4 ? "Enable" : "Disable",
1902 p_cfg_params->ipv6 ? "Enable" : "Disable");
1903 } else {
1904 qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
1905 }
1906
1907 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n",
1908 p_cfg_params->arfs_enable ? "Enable" : "Disable");
1909}
1910
1911static int
1912qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1913 struct qed_spq_comp_cb *p_cb,
1914 dma_addr_t p_addr, u16 length, u16 qid,
1915 u8 vport_id, bool b_is_add)
1916{
1917 struct rx_update_gft_filter_data *p_ramrod = NULL;
1918 struct qed_spq_entry *p_ent = NULL;
1919 struct qed_sp_init_data init_data;
1920 u16 abs_rx_q_id = 0;
1921 u8 abs_vport_id = 0;
1922 int rc = -EINVAL;
1923
1924 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
1925 if (rc)
1926 return rc;
1927
1928 rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
1929 if (rc)
1930 return rc;
1931
1932 /* Get SPQ entry */
1933 memset(&init_data, 0, sizeof(init_data));
1934 init_data.cid = qed_spq_get_cid(p_hwfn);
1935
1936 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1937
1938 if (p_cb) {
1939 init_data.comp_mode = QED_SPQ_MODE_CB;
1940 init_data.p_comp_data = p_cb;
1941 } else {
1942 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1943 }
1944
1945 rc = qed_sp_init_request(p_hwfn, &p_ent,
1946 ETH_RAMROD_GFT_UPDATE_FILTER,
1947 PROTOCOLID_ETH, &init_data);
1948 if (rc)
1949 return rc;
1950
1951 p_ramrod = &p_ent->ramrod.rx_update_gft;
1952 DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
1953 p_ramrod->pkt_hdr_length = cpu_to_le16(length);
1954 p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id);
1955 p_ramrod->vport_id = abs_vport_id;
1956 p_ramrod->filter_type = RFS_FILTER_TYPE;
1957 p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER;
1958
1959 DP_VERBOSE(p_hwfn, QED_MSG_SP,
1960 "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
1961 abs_vport_id, abs_rx_q_id,
1962 b_is_add ? "Adding" : "Removing", (u64)p_addr, length);
1963
1964 return qed_spq_post(p_hwfn, p_ent, NULL);
1965}
1966
25c089d7
YM
1967static int qed_fill_eth_dev_info(struct qed_dev *cdev,
1968 struct qed_dev_eth_info *info)
1969{
1970 int i;
1971
1972 memset(info, 0, sizeof(*info));
1973
1974 info->num_tc = 1;
1975
1408cc1f 1976 if (IS_PF(cdev)) {
25eb8d46 1977 int max_vf_vlan_filters = 0;
7b7e70f9 1978 int max_vf_mac_filters = 0;
25eb8d46 1979
1408cc1f 1980 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
e1d32acb
MY
1981 u16 num_queues = 0;
1982
1983 /* Since the feature controls only queue-zones,
1984 * make sure we have the contexts [rx, tx, xdp] to
1985 * match.
1986 */
1987 for_each_hwfn(cdev, i) {
1988 struct qed_hwfn *hwfn = &cdev->hwfns[i];
1989 u16 l2_queues = (u16)FEAT_NUM(hwfn,
1990 QED_PF_L2_QUE);
1991 u16 cids;
1992
1993 cids = hwfn->pf_params.eth_pf_params.num_cons;
1994 num_queues += min_t(u16, l2_queues, cids / 3);
1995 }
1996
1997 /* queues might theoretically be >256, but interrupts'
1998 * upper-limit guarantes that it would fit in a u8.
1999 */
2000 if (cdev->int_params.fp_msix_cnt) {
2001 u8 irqs = cdev->int_params.fp_msix_cnt;
2002
2003 info->num_queues = (u8)min_t(u16,
2004 num_queues, irqs);
2005 }
1408cc1f
YM
2006 } else {
2007 info->num_queues = cdev->num_hwfns;
2008 }
2009
7b7e70f9 2010 if (IS_QED_SRIOV(cdev)) {
25eb8d46
YM
2011 max_vf_vlan_filters = cdev->p_iov_info->total_vfs *
2012 QED_ETH_VF_NUM_VLAN_FILTERS;
7b7e70f9
YM
2013 max_vf_mac_filters = cdev->p_iov_info->total_vfs *
2014 QED_ETH_VF_NUM_MAC_FILTERS;
2015 }
2016 info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
2017 QED_VLAN) -
25eb8d46 2018 max_vf_vlan_filters;
7b7e70f9
YM
2019 info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
2020 QED_MAC) -
2021 max_vf_mac_filters;
25eb8d46 2022
1408cc1f
YM
2023 ether_addr_copy(info->port_mac,
2024 cdev->hwfns[0].hw_info.hw_mac_addr);
25c089d7 2025 } else {
1408cc1f
YM
2026 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues);
2027 if (cdev->num_hwfns > 1) {
2028 u8 queues = 0;
25c089d7 2029
1408cc1f
YM
2030 qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues);
2031 info->num_queues += queues;
2032 }
2033
2034 qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
2edbff8d 2035 (u8 *)&info->num_vlan_filters);
b0fca312
MY
2036 qed_vf_get_num_mac_filters(&cdev->hwfns[0],
2037 (u8 *)&info->num_mac_filters);
1408cc1f 2038 qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
d8c2c7e3
YM
2039
2040 info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi;
1408cc1f 2041 }
25c089d7
YM
2042
2043 qed_fill_dev_info(cdev, &info->common);
2044
1408cc1f 2045 if (IS_VF(cdev))
0ee28e31 2046 eth_zero_addr(info->common.hw_mac);
1408cc1f 2047
25c089d7
YM
2048 return 0;
2049}
2050
cc875c2e 2051static void qed_register_eth_ops(struct qed_dev *cdev,
1408cc1f 2052 struct qed_eth_cb_ops *ops, void *cookie)
cc875c2e 2053{
1408cc1f
YM
2054 cdev->protocol_ops.eth = ops;
2055 cdev->ops_cookie = cookie;
2056
2057 /* For VF, we start bulletin reading */
2058 if (IS_VF(cdev))
2059 qed_vf_start_iov_wq(cdev);
cc875c2e
YM
2060}
2061
eff16960
YM
2062static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
2063{
2064 if (IS_PF(cdev))
2065 return true;
2066
2067 return qed_vf_check_mac(&cdev->hwfns[0], mac);
2068}
2069
cee4d264 2070static int qed_start_vport(struct qed_dev *cdev,
088c8618 2071 struct qed_start_vport_params *params)
cee4d264
MC
2072{
2073 int rc, i;
2074
2075 for_each_hwfn(cdev, i) {
088c8618 2076 struct qed_sp_vport_start_params start = { 0 };
cee4d264
MC
2077 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2078
088c8618
MC
2079 start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
2080 QED_TPA_MODE_NONE;
2081 start.remove_inner_vlan = params->remove_inner_vlan;
08feecd7 2082 start.only_untagged = true; /* untagged only */
088c8618
MC
2083 start.drop_ttl0 = params->drop_ttl0;
2084 start.opaque_fid = p_hwfn->hw_info.opaque_fid;
2085 start.concrete_fid = p_hwfn->hw_info.concrete_fid;
c78c70fa 2086 start.handle_ptp_pkts = params->handle_ptp_pkts;
088c8618
MC
2087 start.vport_id = params->vport_id;
2088 start.max_buffers_per_cqe = 16;
2089 start.mtu = params->mtu;
2090
2091 rc = qed_sp_vport_start(p_hwfn, &start);
cee4d264
MC
2092 if (rc) {
2093 DP_ERR(cdev, "Failed to start VPORT\n");
2094 return rc;
2095 }
2096
15582962
RV
2097 rc = qed_hw_start_fastpath(p_hwfn);
2098 if (rc) {
2099 DP_ERR(cdev, "Failed to start VPORT fastpath\n");
2100 return rc;
2101 }
cee4d264
MC
2102
2103 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2104 "Started V-PORT %d with MTU %d\n",
088c8618 2105 start.vport_id, start.mtu);
cee4d264
MC
2106 }
2107
a0d26d5a
YM
2108 if (params->clear_stats)
2109 qed_reset_vport_stats(cdev);
9df2ed04 2110
cee4d264
MC
2111 return 0;
2112}
2113
1a635e48 2114static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id)
cee4d264
MC
2115{
2116 int rc, i;
2117
2118 for_each_hwfn(cdev, i) {
2119 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2120
2121 rc = qed_sp_vport_stop(p_hwfn,
1a635e48 2122 p_hwfn->hw_info.opaque_fid, vport_id);
cee4d264
MC
2123
2124 if (rc) {
2125 DP_ERR(cdev, "Failed to stop VPORT\n");
2126 return rc;
2127 }
2128 }
2129 return 0;
2130}
2131
f29ffdb6
MY
2132static int qed_update_vport_rss(struct qed_dev *cdev,
2133 struct qed_update_vport_rss_params *input,
2134 struct qed_rss_params *rss)
2135{
2136 int i, fn;
2137
2138 /* Update configuration with what's correct regardless of CMT */
2139 rss->update_rss_config = 1;
2140 rss->rss_enable = 1;
2141 rss->update_rss_capabilities = 1;
2142 rss->update_rss_ind_table = 1;
2143 rss->update_rss_key = 1;
2144 rss->rss_caps = input->rss_caps;
2145 memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32));
2146
2147 /* In regular scenario, we'd simply need to take input handlers.
2148 * But in CMT, we'd have to split the handlers according to the
2149 * engine they were configured on. We'd then have to understand
2150 * whether RSS is really required, since 2-queues on CMT doesn't
2151 * require RSS.
2152 */
2153 if (cdev->num_hwfns == 1) {
2154 memcpy(rss->rss_ind_table,
2155 input->rss_ind_table,
2156 QED_RSS_IND_TABLE_SIZE * sizeof(void *));
2157 rss->rss_table_size_log = 7;
2158 return 0;
2159 }
2160
2161 /* Start by copying the non-spcific information to the 2nd copy */
2162 memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params));
2163
2164 /* CMT should be round-robin */
2165 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
2166 struct qed_queue_cid *cid = input->rss_ind_table[i];
2167 struct qed_rss_params *t_rss;
2168
2169 if (cid->p_owner == QED_LEADING_HWFN(cdev))
2170 t_rss = &rss[0];
2171 else
2172 t_rss = &rss[1];
2173
2174 t_rss->rss_ind_table[i / cdev->num_hwfns] = cid;
2175 }
2176
2177 /* Make sure RSS is actually required */
2178 for_each_hwfn(cdev, fn) {
2179 for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) {
2180 if (rss[fn].rss_ind_table[i] !=
2181 rss[fn].rss_ind_table[0])
2182 break;
2183 }
2184 if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) {
2185 DP_VERBOSE(cdev, NETIF_MSG_IFUP,
2186 "CMT - 1 queue per-hwfn; Disabling RSS\n");
2187 return -EINVAL;
2188 }
2189 rss[fn].rss_table_size_log = 6;
2190 }
2191
2192 return 0;
2193}
2194
cee4d264
MC
2195static int qed_update_vport(struct qed_dev *cdev,
2196 struct qed_update_vport_params *params)
2197{
2198 struct qed_sp_vport_update_params sp_params;
f29ffdb6
MY
2199 struct qed_rss_params *rss;
2200 int rc = 0, i;
cee4d264
MC
2201
2202 if (!cdev)
2203 return -ENODEV;
2204
f29ffdb6
MY
2205 rss = vzalloc(sizeof(*rss) * cdev->num_hwfns);
2206 if (!rss)
2207 return -ENOMEM;
2208
cee4d264 2209 memset(&sp_params, 0, sizeof(sp_params));
cee4d264
MC
2210
2211 /* Translate protocol params into sp params */
2212 sp_params.vport_id = params->vport_id;
1a635e48
YM
2213 sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
2214 sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
cee4d264
MC
2215 sp_params.vport_active_rx_flg = params->vport_active_flg;
2216 sp_params.vport_active_tx_flg = params->vport_active_flg;
831bfb0e
YM
2217 sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
2218 sp_params.tx_switching_flg = params->tx_switching_flg;
3f9b4a69
YM
2219 sp_params.accept_any_vlan = params->accept_any_vlan;
2220 sp_params.update_accept_any_vlan_flg =
2221 params->update_accept_any_vlan_flg;
cee4d264 2222
f29ffdb6
MY
2223 /* Prepare the RSS configuration */
2224 if (params->update_rss_flg)
2225 if (qed_update_vport_rss(cdev, &params->rss_params, rss))
cee4d264 2226 params->update_rss_flg = 0;
cee4d264
MC
2227
2228 for_each_hwfn(cdev, i) {
2229 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2230
f29ffdb6
MY
2231 if (params->update_rss_flg)
2232 sp_params.rss_params = &rss[i];
2233
cee4d264
MC
2234 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2235 rc = qed_sp_vport_update(p_hwfn, &sp_params,
2236 QED_SPQ_MODE_EBLOCK,
2237 NULL);
2238 if (rc) {
2239 DP_ERR(cdev, "Failed to update VPORT\n");
f29ffdb6 2240 goto out;
cee4d264
MC
2241 }
2242
2243 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2244 "Updated V-PORT %d: active_flag %d [update %d]\n",
2245 params->vport_id, params->vport_active_flg,
2246 params->update_vport_active_flg);
2247 }
2248
f29ffdb6
MY
2249out:
2250 vfree(rss);
2251 return rc;
cee4d264
MC
2252}
2253
2254static int qed_start_rxq(struct qed_dev *cdev,
3da7a37a
MY
2255 u8 rss_num,
2256 struct qed_queue_start_common_params *p_params,
cee4d264
MC
2257 u16 bd_max_bytes,
2258 dma_addr_t bd_chain_phys_addr,
2259 dma_addr_t cqe_pbl_addr,
2260 u16 cqe_pbl_size,
3da7a37a 2261 struct qed_rxq_start_ret_params *ret_params)
cee4d264 2262{
cee4d264 2263 struct qed_hwfn *p_hwfn;
1a635e48 2264 int rc, hwfn_index;
cee4d264 2265
3da7a37a 2266 hwfn_index = rss_num % cdev->num_hwfns;
cee4d264
MC
2267 p_hwfn = &cdev->hwfns[hwfn_index];
2268
3da7a37a
MY
2269 p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2270 p_params->stats_id = p_params->vport_id;
cee4d264 2271
3da7a37a
MY
2272 rc = qed_eth_rx_queue_start(p_hwfn,
2273 p_hwfn->hw_info.opaque_fid,
2274 p_params,
2275 bd_max_bytes,
2276 bd_chain_phys_addr,
2277 cqe_pbl_addr, cqe_pbl_size, ret_params);
cee4d264 2278 if (rc) {
3da7a37a 2279 DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id);
cee4d264
MC
2280 return rc;
2281 }
2282
2283 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
f604b17d 2284 "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
3da7a37a 2285 p_params->queue_id, rss_num, p_params->vport_id,
f604b17d 2286 p_params->p_sb->igu_sb_id);
cee4d264
MC
2287
2288 return 0;
2289}
2290
3da7a37a 2291static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle)
cee4d264
MC
2292{
2293 int rc, hwfn_index;
2294 struct qed_hwfn *p_hwfn;
2295
3da7a37a
MY
2296 hwfn_index = rss_id % cdev->num_hwfns;
2297 p_hwfn = &cdev->hwfns[hwfn_index];
cee4d264 2298
3da7a37a 2299 rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false);
cee4d264 2300 if (rc) {
3da7a37a 2301 DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id);
cee4d264
MC
2302 return rc;
2303 }
2304
2305 return 0;
2306}
2307
2308static int qed_start_txq(struct qed_dev *cdev,
3da7a37a 2309 u8 rss_num,
cee4d264
MC
2310 struct qed_queue_start_common_params *p_params,
2311 dma_addr_t pbl_addr,
2312 u16 pbl_size,
3da7a37a 2313 struct qed_txq_start_ret_params *ret_params)
cee4d264
MC
2314{
2315 struct qed_hwfn *p_hwfn;
2316 int rc, hwfn_index;
2317
3da7a37a
MY
2318 hwfn_index = rss_num % cdev->num_hwfns;
2319 p_hwfn = &cdev->hwfns[hwfn_index];
2320 p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2321 p_params->stats_id = p_params->vport_id;
cee4d264 2322
3da7a37a
MY
2323 rc = qed_eth_tx_queue_start(p_hwfn,
2324 p_hwfn->hw_info.opaque_fid,
2325 p_params, 0,
2326 pbl_addr, pbl_size, ret_params);
cee4d264
MC
2327
2328 if (rc) {
2329 DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
2330 return rc;
2331 }
2332
2333 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
f604b17d 2334 "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
3da7a37a 2335 p_params->queue_id, rss_num, p_params->vport_id,
f604b17d 2336 p_params->p_sb->igu_sb_id);
cee4d264
MC
2337
2338 return 0;
2339}
2340
2341#define QED_HW_STOP_RETRY_LIMIT (10)
2342static int qed_fastpath_stop(struct qed_dev *cdev)
2343{
15582962
RV
2344 int rc;
2345
2346 rc = qed_hw_stop_fastpath(cdev);
2347 if (rc) {
2348 DP_ERR(cdev, "Failed to stop Fastpath\n");
2349 return rc;
2350 }
cee4d264
MC
2351
2352 return 0;
2353}
2354
3da7a37a 2355static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle)
cee4d264
MC
2356{
2357 struct qed_hwfn *p_hwfn;
2358 int rc, hwfn_index;
2359
3da7a37a
MY
2360 hwfn_index = rss_id % cdev->num_hwfns;
2361 p_hwfn = &cdev->hwfns[hwfn_index];
cee4d264 2362
3da7a37a 2363 rc = qed_eth_tx_queue_stop(p_hwfn, handle);
cee4d264 2364 if (rc) {
3da7a37a 2365 DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id);
cee4d264
MC
2366 return rc;
2367 }
2368
2369 return 0;
2370}
2371
464f6645
MC
2372static int qed_tunn_configure(struct qed_dev *cdev,
2373 struct qed_tunn_params *tunn_params)
2374{
19968430 2375 struct qed_tunnel_info tunn_info;
464f6645
MC
2376 int i, rc;
2377
2378 memset(&tunn_info, 0, sizeof(tunn_info));
19968430
CM
2379 if (tunn_params->update_vxlan_port) {
2380 tunn_info.vxlan_port.b_update_port = true;
2381 tunn_info.vxlan_port.port = tunn_params->vxlan_port;
464f6645
MC
2382 }
2383
19968430
CM
2384 if (tunn_params->update_geneve_port) {
2385 tunn_info.geneve_port.b_update_port = true;
2386 tunn_info.geneve_port.port = tunn_params->geneve_port;
464f6645
MC
2387 }
2388
2389 for_each_hwfn(cdev, i) {
2390 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4f64675f 2391 struct qed_ptt *p_ptt;
97379f15
CM
2392 struct qed_tunnel_info *tun;
2393
2394 tun = &hwfn->cdev->tunnel;
4f64675f
MC
2395 if (IS_PF(cdev)) {
2396 p_ptt = qed_ptt_acquire(hwfn);
2397 if (!p_ptt)
2398 return -EAGAIN;
2399 } else {
2400 p_ptt = NULL;
2401 }
464f6645 2402
4f64675f 2403 rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info,
464f6645 2404 QED_SPQ_MODE_EBLOCK, NULL);
4f64675f
MC
2405 if (rc) {
2406 if (IS_PF(cdev))
2407 qed_ptt_release(hwfn, p_ptt);
464f6645 2408 return rc;
4f64675f 2409 }
97379f15
CM
2410
2411 if (IS_PF_SRIOV(hwfn)) {
2412 u16 vxlan_port, geneve_port;
2413 int j;
2414
2415 vxlan_port = tun->vxlan_port.port;
2416 geneve_port = tun->geneve_port.port;
2417
2418 qed_for_each_vf(hwfn, j) {
2419 qed_iov_bulletin_set_udp_ports(hwfn, j,
2420 vxlan_port,
2421 geneve_port);
2422 }
2423
2424 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
2425 }
4f64675f
MC
2426 if (IS_PF(cdev))
2427 qed_ptt_release(hwfn, p_ptt);
464f6645
MC
2428 }
2429
2430 return 0;
2431}
2432
cee4d264
MC
2433static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
2434 enum qed_filter_rx_mode_type type)
2435{
2436 struct qed_filter_accept_flags accept_flags;
2437
2438 memset(&accept_flags, 0, sizeof(accept_flags));
2439
1a635e48
YM
2440 accept_flags.update_rx_mode_config = 1;
2441 accept_flags.update_tx_mode_config = 1;
2442 accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2443 QED_ACCEPT_MCAST_MATCHED |
2444 QED_ACCEPT_BCAST;
cee4d264
MC
2445 accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2446 QED_ACCEPT_MCAST_MATCHED |
2447 QED_ACCEPT_BCAST;
2448
88067876 2449 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
cee4d264
MC
2450 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2451 QED_ACCEPT_MCAST_UNMATCHED;
88067876
MY
2452 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2453 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
cee4d264 2454 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
88067876
MY
2455 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2456 }
cee4d264 2457
3f9b4a69 2458 return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
cee4d264
MC
2459 QED_SPQ_MODE_CB, NULL);
2460}
2461
2462static int qed_configure_filter_ucast(struct qed_dev *cdev,
2463 struct qed_filter_ucast_params *params)
2464{
2465 struct qed_filter_ucast ucast;
2466
2467 if (!params->vlan_valid && !params->mac_valid) {
1a635e48
YM
2468 DP_NOTICE(cdev,
2469 "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
cee4d264
MC
2470 return -EINVAL;
2471 }
2472
2473 memset(&ucast, 0, sizeof(ucast));
2474 switch (params->type) {
2475 case QED_FILTER_XCAST_TYPE_ADD:
2476 ucast.opcode = QED_FILTER_ADD;
2477 break;
2478 case QED_FILTER_XCAST_TYPE_DEL:
2479 ucast.opcode = QED_FILTER_REMOVE;
2480 break;
2481 case QED_FILTER_XCAST_TYPE_REPLACE:
2482 ucast.opcode = QED_FILTER_REPLACE;
2483 break;
2484 default:
2485 DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
2486 params->type);
2487 }
2488
2489 if (params->vlan_valid && params->mac_valid) {
2490 ucast.type = QED_FILTER_MAC_VLAN;
2491 ether_addr_copy(ucast.mac, params->mac);
2492 ucast.vlan = params->vlan;
2493 } else if (params->mac_valid) {
2494 ucast.type = QED_FILTER_MAC;
2495 ether_addr_copy(ucast.mac, params->mac);
2496 } else {
2497 ucast.type = QED_FILTER_VLAN;
2498 ucast.vlan = params->vlan;
2499 }
2500
2501 ucast.is_rx_filter = true;
2502 ucast.is_tx_filter = true;
2503
2504 return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
2505}
2506
2507static int qed_configure_filter_mcast(struct qed_dev *cdev,
2508 struct qed_filter_mcast_params *params)
2509{
2510 struct qed_filter_mcast mcast;
2511 int i;
2512
2513 memset(&mcast, 0, sizeof(mcast));
2514 switch (params->type) {
2515 case QED_FILTER_XCAST_TYPE_ADD:
2516 mcast.opcode = QED_FILTER_ADD;
2517 break;
2518 case QED_FILTER_XCAST_TYPE_DEL:
2519 mcast.opcode = QED_FILTER_REMOVE;
2520 break;
2521 default:
2522 DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
2523 params->type);
2524 }
2525
2526 mcast.num_mc_addrs = params->num;
2527 for (i = 0; i < mcast.num_mc_addrs; i++)
2528 ether_addr_copy(mcast.mac[i], params->mac[i]);
2529
1a635e48 2530 return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
cee4d264
MC
2531}
2532
2533static int qed_configure_filter(struct qed_dev *cdev,
2534 struct qed_filter_params *params)
2535{
2536 enum qed_filter_rx_mode_type accept_flags;
2537
2538 switch (params->type) {
2539 case QED_FILTER_TYPE_UCAST:
2540 return qed_configure_filter_ucast(cdev, &params->filter.ucast);
2541 case QED_FILTER_TYPE_MCAST:
2542 return qed_configure_filter_mcast(cdev, &params->filter.mcast);
2543 case QED_FILTER_TYPE_RX_MODE:
2544 accept_flags = params->filter.accept_flags;
2545 return qed_configure_filter_rx_mode(cdev, accept_flags);
2546 default:
1a635e48 2547 DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type);
cee4d264
MC
2548 return -EINVAL;
2549 }
2550}
2551
d51e4af5
CM
2552static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
2553{
2554 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2555 struct qed_arfs_config_params arfs_config_params;
2556
2557 memset(&arfs_config_params, 0, sizeof(arfs_config_params));
2558 arfs_config_params.tcp = true;
2559 arfs_config_params.udp = true;
2560 arfs_config_params.ipv4 = true;
2561 arfs_config_params.ipv6 = true;
2562 arfs_config_params.arfs_enable = en_searcher;
2563
2564 qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
2565 &arfs_config_params);
2566 return 0;
2567}
2568
2569static void
2570qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
2571 void *cookie, union event_ring_data *data,
2572 u8 fw_return_code)
2573{
2574 struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
2575 void *dev = p_hwfn->cdev->ops_cookie;
2576
2577 op->arfs_filter_op(dev, cookie, fw_return_code);
2578}
2579
2580static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
2581 dma_addr_t mapping, u16 length,
2582 u16 vport_id, u16 rx_queue_id,
2583 bool add_filter)
2584{
2585 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2586 struct qed_spq_comp_cb cb;
2587 int rc = -EINVAL;
2588
2589 cb.function = qed_arfs_sp_response_handler;
2590 cb.cookie = cookie;
2591
2592 rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt,
2593 &cb, mapping, length, rx_queue_id,
2594 vport_id, add_filter);
2595 if (rc)
2596 DP_NOTICE(p_hwfn,
2597 "Failed to issue a-RFS filter configuration\n");
2598 else
2599 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV,
2600 "Successfully issued a-RFS filter configuration\n");
2601
2602 return rc;
2603}
2604
cee4d264 2605static int qed_fp_cqe_completion(struct qed_dev *dev,
1a635e48 2606 u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
cee4d264
MC
2607{
2608 return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
2609 cqe);
2610}
2611
0b55e27d
YM
2612#ifdef CONFIG_QED_SRIOV
2613extern const struct qed_iov_hv_ops qed_iov_ops_pass;
2614#endif
2615
a1d8d8a5
SRK
2616#ifdef CONFIG_DCB
2617extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
2618#endif
2619
c78c70fa
SRK
2620extern const struct qed_eth_ptp_ops qed_ptp_ops_pass;
2621
25c089d7
YM
2622static const struct qed_eth_ops qed_eth_ops_pass = {
2623 .common = &qed_common_ops_pass,
0b55e27d
YM
2624#ifdef CONFIG_QED_SRIOV
2625 .iov = &qed_iov_ops_pass,
a1d8d8a5
SRK
2626#endif
2627#ifdef CONFIG_DCB
2628 .dcb = &qed_dcbnl_ops_pass,
0b55e27d 2629#endif
c78c70fa 2630 .ptp = &qed_ptp_ops_pass,
25c089d7 2631 .fill_dev_info = &qed_fill_eth_dev_info,
cc875c2e 2632 .register_ops = &qed_register_eth_ops,
eff16960 2633 .check_mac = &qed_check_mac,
cee4d264
MC
2634 .vport_start = &qed_start_vport,
2635 .vport_stop = &qed_stop_vport,
2636 .vport_update = &qed_update_vport,
2637 .q_rx_start = &qed_start_rxq,
2638 .q_rx_stop = &qed_stop_rxq,
2639 .q_tx_start = &qed_start_txq,
2640 .q_tx_stop = &qed_stop_txq,
2641 .filter_config = &qed_configure_filter,
2642 .fastpath_stop = &qed_fastpath_stop,
2643 .eth_cqe_completion = &qed_fp_cqe_completion,
9df2ed04 2644 .get_vport_stats = &qed_get_vport_stats,
464f6645 2645 .tunn_config = &qed_tunn_configure,
d51e4af5
CM
2646 .ntuple_filter_config = &qed_ntuple_arfs_filter_config,
2647 .configure_arfs_searcher = &qed_configure_arfs_searcher,
25c089d7
YM
2648};
2649
95114344 2650const struct qed_eth_ops *qed_get_eth_ops(void)
25c089d7 2651{
25c089d7
YM
2652 return &qed_eth_ops_pass;
2653}
2654EXPORT_SYMBOL(qed_get_eth_ops);
2655
2656void qed_put_eth_ops(void)
2657{
2658 /* TODO - reference count for module? */
2659}
2660EXPORT_SYMBOL(qed_put_eth_ops);