]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/qlogic/qed/qed_l2.c
qed: Introduce VFs
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / qlogic / qed / qed_l2.c
CommitLineData
25c089d7
YM
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#include <linux/types.h>
10#include <asm/byteorder.h>
11#include <asm/param.h>
12#include <linux/delay.h>
13#include <linux/dma-mapping.h>
14#include <linux/etherdevice.h>
15#include <linux/interrupt.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/pci.h>
19#include <linux/slab.h>
20#include <linux/stddef.h>
21#include <linux/string.h>
22#include <linux/version.h>
23#include <linux/workqueue.h>
24#include <linux/bitops.h>
25#include <linux/bug.h>
26#include "qed.h"
27#include <linux/qed/qed_chain.h>
28#include "qed_cxt.h"
29#include "qed_dev_api.h"
30#include <linux/qed/qed_eth_if.h>
31#include "qed_hsi.h"
32#include "qed_hw.h"
33#include "qed_int.h"
86622ee7 34#include "qed_mcp.h"
25c089d7
YM
35#include "qed_reg_addr.h"
36#include "qed_sp.h"
1408cc1f 37#include "qed_sriov.h"
25c089d7 38
cee4d264
MC
39struct qed_rss_params {
40 u8 update_rss_config;
41 u8 rss_enable;
42 u8 rss_eng_id;
43 u8 update_rss_capabilities;
44 u8 update_rss_ind_table;
45 u8 update_rss_key;
46 u8 rss_caps;
47 u8 rss_table_size_log;
48 u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
49 u32 rss_key[QED_RSS_KEY_SIZE];
50};
51
52enum qed_filter_opcode {
53 QED_FILTER_ADD,
54 QED_FILTER_REMOVE,
55 QED_FILTER_MOVE,
56 QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
57 QED_FILTER_FLUSH, /* Removes all filters */
58};
59
60enum qed_filter_ucast_type {
61 QED_FILTER_MAC,
62 QED_FILTER_VLAN,
63 QED_FILTER_MAC_VLAN,
64 QED_FILTER_INNER_MAC,
65 QED_FILTER_INNER_VLAN,
66 QED_FILTER_INNER_PAIR,
67 QED_FILTER_INNER_MAC_VNI_PAIR,
68 QED_FILTER_MAC_VNI_PAIR,
69 QED_FILTER_VNI,
70};
71
72struct qed_filter_ucast {
73 enum qed_filter_opcode opcode;
74 enum qed_filter_ucast_type type;
75 u8 is_rx_filter;
76 u8 is_tx_filter;
77 u8 vport_to_add_to;
78 u8 vport_to_remove_from;
79 unsigned char mac[ETH_ALEN];
80 u8 assert_on_error;
81 u16 vlan;
82 u32 vni;
83};
84
85struct qed_filter_mcast {
86 /* MOVE is not supported for multicast */
87 enum qed_filter_opcode opcode;
88 u8 vport_to_add_to;
89 u8 vport_to_remove_from;
90 u8 num_mc_addrs;
91#define QED_MAX_MC_ADDRS 64
92 unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
93};
94
95struct qed_filter_accept_flags {
96 u8 update_rx_mode_config;
97 u8 update_tx_mode_config;
98 u8 rx_accept_filter;
99 u8 tx_accept_filter;
100#define QED_ACCEPT_NONE 0x01
101#define QED_ACCEPT_UCAST_MATCHED 0x02
102#define QED_ACCEPT_UCAST_UNMATCHED 0x04
103#define QED_ACCEPT_MCAST_MATCHED 0x08
104#define QED_ACCEPT_MCAST_UNMATCHED 0x10
105#define QED_ACCEPT_BCAST 0x20
106};
107
108struct qed_sp_vport_update_params {
109 u16 opaque_fid;
110 u8 vport_id;
111 u8 update_vport_active_rx_flg;
112 u8 vport_active_rx_flg;
113 u8 update_vport_active_tx_flg;
114 u8 vport_active_tx_flg;
115 u8 update_approx_mcast_flg;
3f9b4a69
YM
116 u8 update_accept_any_vlan_flg;
117 u8 accept_any_vlan;
cee4d264
MC
118 unsigned long bins[8];
119 struct qed_rss_params *rss_params;
120 struct qed_filter_accept_flags accept_flags;
121};
122
088c8618
MC
123enum qed_tpa_mode {
124 QED_TPA_MODE_NONE,
125 QED_TPA_MODE_UNUSED,
126 QED_TPA_MODE_GRO,
127 QED_TPA_MODE_MAX
128};
129
130struct qed_sp_vport_start_params {
131 enum qed_tpa_mode tpa_mode;
132 bool remove_inner_vlan;
133 bool drop_ttl0;
134 u8 max_buffers_per_cqe;
135 u32 concrete_fid;
136 u16 opaque_fid;
137 u8 vport_id;
138 u16 mtu;
139};
140
cee4d264
MC
141#define QED_MAX_SGES_NUM 16
142#define CRC32_POLY 0x1edc6f41
143
144static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
088c8618 145 struct qed_sp_vport_start_params *p_params)
cee4d264 146{
cee4d264
MC
147 struct vport_start_ramrod_data *p_ramrod = NULL;
148 struct qed_spq_entry *p_ent = NULL;
06f56b81 149 struct qed_sp_init_data init_data;
cee4d264
MC
150 int rc = -EINVAL;
151 u16 rx_mode = 0;
152 u8 abs_vport_id = 0;
153
088c8618 154 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
cee4d264
MC
155 if (rc != 0)
156 return rc;
157
06f56b81
YM
158 memset(&init_data, 0, sizeof(init_data));
159 init_data.cid = qed_spq_get_cid(p_hwfn);
088c8618 160 init_data.opaque_fid = p_params->opaque_fid;
06f56b81 161 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264
MC
162
163 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 164 ETH_RAMROD_VPORT_START,
06f56b81 165 PROTOCOLID_ETH, &init_data);
cee4d264
MC
166 if (rc)
167 return rc;
168
169 p_ramrod = &p_ent->ramrod.vport_start;
170 p_ramrod->vport_id = abs_vport_id;
171
088c8618
MC
172 p_ramrod->mtu = cpu_to_le16(p_params->mtu);
173 p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
174 p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
cee4d264
MC
175
176 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
177 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
178
179 p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
180
181 /* TPA related fields */
182 memset(&p_ramrod->tpa_param, 0,
183 sizeof(struct eth_vport_tpa_param));
184
088c8618
MC
185 p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
186
187 switch (p_params->tpa_mode) {
188 case QED_TPA_MODE_GRO:
189 p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
190 p_ramrod->tpa_param.tpa_max_size = (u16)-1;
191 p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
192 p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
193 p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
194 p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
195 p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
196 p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
197 break;
198 default:
199 break;
200 }
201
cee4d264
MC
202 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
203 p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
088c8618 204 p_params->concrete_fid);
cee4d264
MC
205
206 return qed_spq_post(p_hwfn, p_ent, NULL);
207}
208
209static int
210qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
211 struct vport_update_ramrod_data *p_ramrod,
212 struct qed_rss_params *p_params)
213{
214 struct eth_vport_rss_config *rss = &p_ramrod->rss_config;
215 u16 abs_l2_queue = 0, capabilities = 0;
216 int rc = 0, i;
217
218 if (!p_params) {
219 p_ramrod->common.update_rss_flg = 0;
220 return rc;
221 }
222
223 BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE !=
224 ETH_RSS_IND_TABLE_ENTRIES_NUM);
225
226 rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id);
227 if (rc)
228 return rc;
229
230 p_ramrod->common.update_rss_flg = p_params->update_rss_config;
231 rss->update_rss_capabilities = p_params->update_rss_capabilities;
232 rss->update_rss_ind_table = p_params->update_rss_ind_table;
233 rss->update_rss_key = p_params->update_rss_key;
234
235 rss->rss_mode = p_params->rss_enable ?
236 ETH_VPORT_RSS_MODE_REGULAR :
237 ETH_VPORT_RSS_MODE_DISABLED;
238
239 SET_FIELD(capabilities,
240 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
241 !!(p_params->rss_caps & QED_RSS_IPV4));
242 SET_FIELD(capabilities,
243 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
244 !!(p_params->rss_caps & QED_RSS_IPV6));
245 SET_FIELD(capabilities,
246 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
247 !!(p_params->rss_caps & QED_RSS_IPV4_TCP));
248 SET_FIELD(capabilities,
249 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
250 !!(p_params->rss_caps & QED_RSS_IPV6_TCP));
251 SET_FIELD(capabilities,
252 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
253 !!(p_params->rss_caps & QED_RSS_IPV4_UDP));
254 SET_FIELD(capabilities,
255 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
256 !!(p_params->rss_caps & QED_RSS_IPV6_UDP));
257 rss->tbl_size = p_params->rss_table_size_log;
258
259 rss->capabilities = cpu_to_le16(capabilities);
260
261 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
262 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
263 p_ramrod->common.update_rss_flg,
264 rss->rss_mode, rss->update_rss_capabilities,
265 capabilities, rss->update_rss_ind_table,
266 rss->update_rss_key);
267
268 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
269 rc = qed_fw_l2_queue(p_hwfn,
270 (u8)p_params->rss_ind_table[i],
271 &abs_l2_queue);
272 if (rc)
273 return rc;
274
275 rss->indirection_table[i] = cpu_to_le16(abs_l2_queue);
276 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n",
277 i, rss->indirection_table[i]);
278 }
279
280 for (i = 0; i < 10; i++)
281 rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]);
282
283 return rc;
284}
285
286static void
287qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
288 struct vport_update_ramrod_data *p_ramrod,
289 struct qed_filter_accept_flags accept_flags)
290{
291 p_ramrod->common.update_rx_mode_flg =
292 accept_flags.update_rx_mode_config;
293
294 p_ramrod->common.update_tx_mode_flg =
295 accept_flags.update_tx_mode_config;
296
297 /* Set Rx mode accept flags */
298 if (p_ramrod->common.update_rx_mode_flg) {
299 u8 accept_filter = accept_flags.rx_accept_filter;
300 u16 state = 0;
301
302 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
303 !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
304 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
305
306 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
307 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
308
309 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
310 !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
311 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
312
313 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
314 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
315 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
316
317 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
318 !!(accept_filter & QED_ACCEPT_BCAST));
319
320 p_ramrod->rx_mode.state = cpu_to_le16(state);
321 DP_VERBOSE(p_hwfn, QED_MSG_SP,
322 "p_ramrod->rx_mode.state = 0x%x\n", state);
323 }
324
325 /* Set Tx mode accept flags */
326 if (p_ramrod->common.update_tx_mode_flg) {
327 u8 accept_filter = accept_flags.tx_accept_filter;
328 u16 state = 0;
329
330 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
331 !!(accept_filter & QED_ACCEPT_NONE));
332
333 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
334 (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
335 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
336
337 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
338 !!(accept_filter & QED_ACCEPT_NONE));
339
340 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
341 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
342 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
343
344 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
345 !!(accept_filter & QED_ACCEPT_BCAST));
346
347 p_ramrod->tx_mode.state = cpu_to_le16(state);
348 DP_VERBOSE(p_hwfn, QED_MSG_SP,
349 "p_ramrod->tx_mode.state = 0x%x\n", state);
350 }
351}
352
353static void
354qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
355 struct vport_update_ramrod_data *p_ramrod,
356 struct qed_sp_vport_update_params *p_params)
357{
358 int i;
359
360 memset(&p_ramrod->approx_mcast.bins, 0,
361 sizeof(p_ramrod->approx_mcast.bins));
362
363 if (p_params->update_approx_mcast_flg) {
364 p_ramrod->common.update_approx_mcast_flg = 1;
365 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
366 u32 *p_bins = (u32 *)p_params->bins;
367 __le32 val = cpu_to_le32(p_bins[i]);
368
369 p_ramrod->approx_mcast.bins[i] = val;
370 }
371 }
372}
373
374static int
375qed_sp_vport_update(struct qed_hwfn *p_hwfn,
376 struct qed_sp_vport_update_params *p_params,
377 enum spq_mode comp_mode,
378 struct qed_spq_comp_cb *p_comp_data)
379{
380 struct qed_rss_params *p_rss_params = p_params->rss_params;
381 struct vport_update_ramrod_data_cmn *p_cmn;
06f56b81 382 struct qed_sp_init_data init_data;
cee4d264
MC
383 struct vport_update_ramrod_data *p_ramrod = NULL;
384 struct qed_spq_entry *p_ent = NULL;
385 u8 abs_vport_id = 0;
386 int rc = -EINVAL;
387
388 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
389 if (rc != 0)
390 return rc;
391
06f56b81
YM
392 memset(&init_data, 0, sizeof(init_data));
393 init_data.cid = qed_spq_get_cid(p_hwfn);
394 init_data.opaque_fid = p_params->opaque_fid;
395 init_data.comp_mode = comp_mode;
396 init_data.p_comp_data = p_comp_data;
cee4d264
MC
397
398 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 399 ETH_RAMROD_VPORT_UPDATE,
06f56b81 400 PROTOCOLID_ETH, &init_data);
cee4d264
MC
401 if (rc)
402 return rc;
403
404 /* Copy input params to ramrod according to FW struct */
405 p_ramrod = &p_ent->ramrod.vport_update;
406 p_cmn = &p_ramrod->common;
407
408 p_cmn->vport_id = abs_vport_id;
409 p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
410 p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
411 p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
412 p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
3f9b4a69
YM
413 p_cmn->accept_any_vlan = p_params->accept_any_vlan;
414 p_cmn->update_accept_any_vlan_flg =
415 p_params->update_accept_any_vlan_flg;
cee4d264
MC
416 rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
417 if (rc) {
418 /* Return spq entry which is taken in qed_sp_init_request()*/
419 qed_spq_return_entry(p_hwfn, p_ent);
420 return rc;
421 }
422
423 /* Update mcast bins for VFs, PF doesn't use this functionality */
424 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
425
426 qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
427 return qed_spq_post(p_hwfn, p_ent, NULL);
428}
429
430static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
431 u16 opaque_fid,
432 u8 vport_id)
433{
cee4d264 434 struct vport_stop_ramrod_data *p_ramrod;
06f56b81 435 struct qed_sp_init_data init_data;
cee4d264
MC
436 struct qed_spq_entry *p_ent;
437 u8 abs_vport_id = 0;
438 int rc;
439
440 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
441 if (rc != 0)
442 return rc;
443
06f56b81
YM
444 memset(&init_data, 0, sizeof(init_data));
445 init_data.cid = qed_spq_get_cid(p_hwfn);
446 init_data.opaque_fid = opaque_fid;
447 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264
MC
448
449 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 450 ETH_RAMROD_VPORT_STOP,
06f56b81 451 PROTOCOLID_ETH, &init_data);
cee4d264
MC
452 if (rc)
453 return rc;
454
455 p_ramrod = &p_ent->ramrod.vport_stop;
456 p_ramrod->vport_id = abs_vport_id;
457
458 return qed_spq_post(p_hwfn, p_ent, NULL);
459}
460
461static int qed_filter_accept_cmd(struct qed_dev *cdev,
462 u8 vport,
463 struct qed_filter_accept_flags accept_flags,
3f9b4a69
YM
464 u8 update_accept_any_vlan,
465 u8 accept_any_vlan,
466 enum spq_mode comp_mode,
467 struct qed_spq_comp_cb *p_comp_data)
cee4d264
MC
468{
469 struct qed_sp_vport_update_params vport_update_params;
470 int i, rc;
471
472 /* Prepare and send the vport rx_mode change */
473 memset(&vport_update_params, 0, sizeof(vport_update_params));
474 vport_update_params.vport_id = vport;
475 vport_update_params.accept_flags = accept_flags;
3f9b4a69
YM
476 vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
477 vport_update_params.accept_any_vlan = accept_any_vlan;
cee4d264
MC
478
479 for_each_hwfn(cdev, i) {
480 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
481
482 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
483
484 rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
485 comp_mode, p_comp_data);
486 if (rc != 0) {
487 DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
488 return rc;
489 }
490
491 DP_VERBOSE(p_hwfn, QED_MSG_SP,
492 "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
493 accept_flags.rx_accept_filter,
494 accept_flags.tx_accept_filter);
3f9b4a69
YM
495 if (update_accept_any_vlan)
496 DP_VERBOSE(p_hwfn, QED_MSG_SP,
497 "accept_any_vlan=%d configured\n",
498 accept_any_vlan);
cee4d264
MC
499 }
500
501 return 0;
502}
503
504static int qed_sp_release_queue_cid(
505 struct qed_hwfn *p_hwfn,
506 struct qed_hw_cid_data *p_cid_data)
507{
508 if (!p_cid_data->b_cid_allocated)
509 return 0;
510
511 qed_cxt_release_cid(p_hwfn, p_cid_data->cid);
512
513 p_cid_data->b_cid_allocated = false;
514
515 return 0;
516}
517
518static int
519qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
520 u16 opaque_fid,
521 u32 cid,
522 struct qed_queue_start_common_params *params,
523 u8 stats_id,
524 u16 bd_max_bytes,
525 dma_addr_t bd_chain_phys_addr,
526 dma_addr_t cqe_pbl_addr,
527 u16 cqe_pbl_size)
528{
529 struct rx_queue_start_ramrod_data *p_ramrod = NULL;
cee4d264 530 struct qed_spq_entry *p_ent = NULL;
06f56b81 531 struct qed_sp_init_data init_data;
cee4d264
MC
532 struct qed_hw_cid_data *p_rx_cid;
533 u16 abs_rx_q_id = 0;
534 u8 abs_vport_id = 0;
535 int rc = -EINVAL;
536
537 /* Store information for the stop */
538 p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
539 p_rx_cid->cid = cid;
540 p_rx_cid->opaque_fid = opaque_fid;
541 p_rx_cid->vport_id = params->vport_id;
542
543 rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id);
544 if (rc != 0)
545 return rc;
546
547 rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id);
548 if (rc != 0)
549 return rc;
550
551 DP_VERBOSE(p_hwfn, QED_MSG_SP,
552 "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
553 opaque_fid, cid, params->queue_id, params->vport_id,
554 params->sb);
555
06f56b81
YM
556 /* Get SPQ entry */
557 memset(&init_data, 0, sizeof(init_data));
558 init_data.cid = cid;
559 init_data.opaque_fid = opaque_fid;
560 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264
MC
561
562 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 563 ETH_RAMROD_RX_QUEUE_START,
06f56b81 564 PROTOCOLID_ETH, &init_data);
cee4d264
MC
565 if (rc)
566 return rc;
567
568 p_ramrod = &p_ent->ramrod.rx_queue_start;
569
570 p_ramrod->sb_id = cpu_to_le16(params->sb);
571 p_ramrod->sb_index = params->sb_idx;
572 p_ramrod->vport_id = abs_vport_id;
573 p_ramrod->stats_counter_id = stats_id;
574 p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
575 p_ramrod->complete_cqe_flg = 0;
576 p_ramrod->complete_event_flg = 1;
577
578 p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
94494598 579 DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
cee4d264
MC
580
581 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
94494598 582 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
cee4d264
MC
583
584 rc = qed_spq_post(p_hwfn, p_ent, NULL);
585
586 return rc;
587}
588
589static int
590qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
591 u16 opaque_fid,
592 struct qed_queue_start_common_params *params,
593 u16 bd_max_bytes,
594 dma_addr_t bd_chain_phys_addr,
595 dma_addr_t cqe_pbl_addr,
596 u16 cqe_pbl_size,
597 void __iomem **pp_prod)
598{
599 struct qed_hw_cid_data *p_rx_cid;
600 u64 init_prod_val = 0;
601 u16 abs_l2_queue = 0;
602 u8 abs_stats_id = 0;
603 int rc;
604
605 rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
606 if (rc != 0)
607 return rc;
608
609 rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id);
610 if (rc != 0)
611 return rc;
612
613 *pp_prod = (u8 __iomem *)p_hwfn->regview +
614 GTT_BAR0_MAP_REG_MSDM_RAM +
615 MSTORM_PRODS_OFFSET(abs_l2_queue);
616
617 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
618 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
619 (u32 *)(&init_prod_val));
620
621 /* Allocate a CID for the queue */
622 p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
623 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
624 &p_rx_cid->cid);
625 if (rc) {
626 DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
627 return rc;
628 }
629 p_rx_cid->b_cid_allocated = true;
630
631 rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
632 opaque_fid,
633 p_rx_cid->cid,
634 params,
635 abs_stats_id,
636 bd_max_bytes,
637 bd_chain_phys_addr,
638 cqe_pbl_addr,
639 cqe_pbl_size);
640
641 if (rc != 0)
642 qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
643
644 return rc;
645}
646
647static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
648 u16 rx_queue_id,
649 bool eq_completion_only,
650 bool cqe_completion)
651{
652 struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
653 struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
cee4d264 654 struct qed_spq_entry *p_ent = NULL;
06f56b81 655 struct qed_sp_init_data init_data;
cee4d264
MC
656 u16 abs_rx_q_id = 0;
657 int rc = -EINVAL;
658
06f56b81
YM
659 /* Get SPQ entry */
660 memset(&init_data, 0, sizeof(init_data));
661 init_data.cid = p_rx_cid->cid;
662 init_data.opaque_fid = p_rx_cid->opaque_fid;
663 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264
MC
664
665 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 666 ETH_RAMROD_RX_QUEUE_STOP,
06f56b81 667 PROTOCOLID_ETH, &init_data);
cee4d264
MC
668 if (rc)
669 return rc;
670
671 p_ramrod = &p_ent->ramrod.rx_queue_stop;
672
673 qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
674 qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
675 p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
676
677 /* Cleaning the queue requires the completion to arrive there.
678 * In addition, VFs require the answer to come as eqe to PF.
679 */
680 p_ramrod->complete_cqe_flg =
681 (!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) &&
682 !eq_completion_only) || cqe_completion;
683 p_ramrod->complete_event_flg =
684 !(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) ||
685 eq_completion_only;
686
687 rc = qed_spq_post(p_hwfn, p_ent, NULL);
688 if (rc)
689 return rc;
690
691 return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
692}
693
694static int
695qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
696 u16 opaque_fid,
697 u32 cid,
698 struct qed_queue_start_common_params *p_params,
699 u8 stats_id,
700 dma_addr_t pbl_addr,
701 u16 pbl_size,
702 union qed_qm_pq_params *p_pq_params)
703{
704 struct tx_queue_start_ramrod_data *p_ramrod = NULL;
cee4d264 705 struct qed_spq_entry *p_ent = NULL;
06f56b81 706 struct qed_sp_init_data init_data;
cee4d264
MC
707 struct qed_hw_cid_data *p_tx_cid;
708 u8 abs_vport_id;
709 int rc = -EINVAL;
710 u16 pq_id;
711
712 /* Store information for the stop */
713 p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
714 p_tx_cid->cid = cid;
715 p_tx_cid->opaque_fid = opaque_fid;
716
717 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
718 if (rc)
719 return rc;
720
06f56b81
YM
721 /* Get SPQ entry */
722 memset(&init_data, 0, sizeof(init_data));
723 init_data.cid = cid;
724 init_data.opaque_fid = opaque_fid;
725 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264 726
06f56b81 727 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 728 ETH_RAMROD_TX_QUEUE_START,
06f56b81 729 PROTOCOLID_ETH, &init_data);
cee4d264
MC
730 if (rc)
731 return rc;
732
733 p_ramrod = &p_ent->ramrod.tx_queue_start;
734 p_ramrod->vport_id = abs_vport_id;
735
736 p_ramrod->sb_id = cpu_to_le16(p_params->sb);
737 p_ramrod->sb_index = p_params->sb_idx;
738 p_ramrod->stats_counter_id = stats_id;
cee4d264
MC
739
740 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
94494598 741 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
cee4d264
MC
742
743 pq_id = qed_get_qm_pq(p_hwfn,
744 PROTOCOLID_ETH,
745 p_pq_params);
746 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
747
748 return qed_spq_post(p_hwfn, p_ent, NULL);
749}
750
751static int
752qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
753 u16 opaque_fid,
754 struct qed_queue_start_common_params *p_params,
755 dma_addr_t pbl_addr,
756 u16 pbl_size,
757 void __iomem **pp_doorbell)
758{
759 struct qed_hw_cid_data *p_tx_cid;
760 union qed_qm_pq_params pq_params;
761 u8 abs_stats_id = 0;
762 int rc;
763
764 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
765 if (rc)
766 return rc;
767
768 p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
769 memset(p_tx_cid, 0, sizeof(*p_tx_cid));
770 memset(&pq_params, 0, sizeof(pq_params));
771
772 /* Allocate a CID for the queue */
773 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
774 &p_tx_cid->cid);
775 if (rc) {
776 DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
777 return rc;
778 }
779 p_tx_cid->b_cid_allocated = true;
780
781 DP_VERBOSE(p_hwfn, QED_MSG_SP,
782 "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
783 opaque_fid, p_tx_cid->cid,
784 p_params->queue_id, p_params->vport_id, p_params->sb);
785
786 rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
787 opaque_fid,
788 p_tx_cid->cid,
789 p_params,
790 abs_stats_id,
791 pbl_addr,
792 pbl_size,
793 &pq_params);
794
795 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
796 qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY);
797
798 if (rc)
799 qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
800
801 return rc;
802}
803
804static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn,
805 u16 tx_queue_id)
806{
807 struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
cee4d264 808 struct qed_spq_entry *p_ent = NULL;
06f56b81 809 struct qed_sp_init_data init_data;
cee4d264
MC
810 int rc = -EINVAL;
811
06f56b81
YM
812 /* Get SPQ entry */
813 memset(&init_data, 0, sizeof(init_data));
814 init_data.cid = p_tx_cid->cid;
815 init_data.opaque_fid = p_tx_cid->opaque_fid;
816 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264
MC
817
818 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 819 ETH_RAMROD_TX_QUEUE_STOP,
06f56b81 820 PROTOCOLID_ETH, &init_data);
cee4d264
MC
821 if (rc)
822 return rc;
823
824 rc = qed_spq_post(p_hwfn, p_ent, NULL);
825 if (rc)
826 return rc;
827
828 return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
829}
830
831static enum eth_filter_action
832qed_filter_action(enum qed_filter_opcode opcode)
833{
834 enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
835
836 switch (opcode) {
837 case QED_FILTER_ADD:
838 action = ETH_FILTER_ACTION_ADD;
839 break;
840 case QED_FILTER_REMOVE:
841 action = ETH_FILTER_ACTION_REMOVE;
842 break;
cee4d264 843 case QED_FILTER_FLUSH:
fc48b7a6 844 action = ETH_FILTER_ACTION_REMOVE_ALL;
cee4d264
MC
845 break;
846 default:
847 action = MAX_ETH_FILTER_ACTION;
848 }
849
850 return action;
851}
852
853static void qed_set_fw_mac_addr(__le16 *fw_msb,
854 __le16 *fw_mid,
855 __le16 *fw_lsb,
856 u8 *mac)
857{
858 ((u8 *)fw_msb)[0] = mac[1];
859 ((u8 *)fw_msb)[1] = mac[0];
860 ((u8 *)fw_mid)[0] = mac[3];
861 ((u8 *)fw_mid)[1] = mac[2];
862 ((u8 *)fw_lsb)[0] = mac[5];
863 ((u8 *)fw_lsb)[1] = mac[4];
864}
865
866static int
867qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
868 u16 opaque_fid,
869 struct qed_filter_ucast *p_filter_cmd,
870 struct vport_filter_update_ramrod_data **pp_ramrod,
871 struct qed_spq_entry **pp_ent,
872 enum spq_mode comp_mode,
873 struct qed_spq_comp_cb *p_comp_data)
874{
875 u8 vport_to_add_to = 0, vport_to_remove_from = 0;
876 struct vport_filter_update_ramrod_data *p_ramrod;
cee4d264
MC
877 struct eth_filter_cmd *p_first_filter;
878 struct eth_filter_cmd *p_second_filter;
06f56b81 879 struct qed_sp_init_data init_data;
cee4d264
MC
880 enum eth_filter_action action;
881 int rc;
882
883 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
884 &vport_to_remove_from);
885 if (rc)
886 return rc;
887
888 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
889 &vport_to_add_to);
890 if (rc)
891 return rc;
892
06f56b81
YM
893 /* Get SPQ entry */
894 memset(&init_data, 0, sizeof(init_data));
895 init_data.cid = qed_spq_get_cid(p_hwfn);
896 init_data.opaque_fid = opaque_fid;
897 init_data.comp_mode = comp_mode;
898 init_data.p_comp_data = p_comp_data;
cee4d264
MC
899
900 rc = qed_sp_init_request(p_hwfn, pp_ent,
cee4d264 901 ETH_RAMROD_FILTERS_UPDATE,
06f56b81 902 PROTOCOLID_ETH, &init_data);
cee4d264
MC
903 if (rc)
904 return rc;
905
906 *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
907 p_ramrod = *pp_ramrod;
908 p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
909 p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
910
911 switch (p_filter_cmd->opcode) {
fc48b7a6 912 case QED_FILTER_REPLACE:
cee4d264
MC
913 case QED_FILTER_MOVE:
914 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
915 default:
916 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
917 }
918
919 p_first_filter = &p_ramrod->filter_cmds[0];
920 p_second_filter = &p_ramrod->filter_cmds[1];
921
922 switch (p_filter_cmd->type) {
923 case QED_FILTER_MAC:
924 p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
925 case QED_FILTER_VLAN:
926 p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
927 case QED_FILTER_MAC_VLAN:
928 p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
929 case QED_FILTER_INNER_MAC:
930 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
931 case QED_FILTER_INNER_VLAN:
932 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
933 case QED_FILTER_INNER_PAIR:
934 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
935 case QED_FILTER_INNER_MAC_VNI_PAIR:
936 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
937 break;
938 case QED_FILTER_MAC_VNI_PAIR:
939 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
940 case QED_FILTER_VNI:
941 p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
942 }
943
944 if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
945 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
946 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
947 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
948 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
949 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
950 qed_set_fw_mac_addr(&p_first_filter->mac_msb,
951 &p_first_filter->mac_mid,
952 &p_first_filter->mac_lsb,
953 (u8 *)p_filter_cmd->mac);
954 }
955
956 if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
957 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
958 (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
959 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
960 p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
961
962 if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
963 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
964 (p_first_filter->type == ETH_FILTER_TYPE_VNI))
965 p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
966
967 if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
968 p_second_filter->type = p_first_filter->type;
969 p_second_filter->mac_msb = p_first_filter->mac_msb;
970 p_second_filter->mac_mid = p_first_filter->mac_mid;
971 p_second_filter->mac_lsb = p_first_filter->mac_lsb;
972 p_second_filter->vlan_id = p_first_filter->vlan_id;
973 p_second_filter->vni = p_first_filter->vni;
974
975 p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
976
977 p_first_filter->vport_id = vport_to_remove_from;
978
979 p_second_filter->action = ETH_FILTER_ACTION_ADD;
980 p_second_filter->vport_id = vport_to_add_to;
fc48b7a6
YM
981 } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
982 p_first_filter->vport_id = vport_to_add_to;
983 memcpy(p_second_filter, p_first_filter,
984 sizeof(*p_second_filter));
985 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
986 p_second_filter->action = ETH_FILTER_ACTION_ADD;
cee4d264
MC
987 } else {
988 action = qed_filter_action(p_filter_cmd->opcode);
989
990 if (action == MAX_ETH_FILTER_ACTION) {
991 DP_NOTICE(p_hwfn,
992 "%d is not supported yet\n",
993 p_filter_cmd->opcode);
994 return -EINVAL;
995 }
996
997 p_first_filter->action = action;
998 p_first_filter->vport_id = (p_filter_cmd->opcode ==
999 QED_FILTER_REMOVE) ?
1000 vport_to_remove_from :
1001 vport_to_add_to;
1002 }
1003
1004 return 0;
1005}
1006
1007static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
1008 u16 opaque_fid,
1009 struct qed_filter_ucast *p_filter_cmd,
1010 enum spq_mode comp_mode,
1011 struct qed_spq_comp_cb *p_comp_data)
1012{
1013 struct vport_filter_update_ramrod_data *p_ramrod = NULL;
1014 struct qed_spq_entry *p_ent = NULL;
1015 struct eth_filter_cmd_header *p_header;
1016 int rc;
1017
1018 rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1019 &p_ramrod, &p_ent,
1020 comp_mode, p_comp_data);
1021 if (rc != 0) {
1022 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1023 return rc;
1024 }
1025 p_header = &p_ramrod->filter_cmd_hdr;
1026 p_header->assert_on_error = p_filter_cmd->assert_on_error;
1027
1028 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1029 if (rc != 0) {
1030 DP_ERR(p_hwfn,
1031 "Unicast filter ADD command failed %d\n",
1032 rc);
1033 return rc;
1034 }
1035
1036 DP_VERBOSE(p_hwfn, QED_MSG_SP,
1037 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1038 (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
1039 ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
1040 "REMOVE" :
1041 ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
1042 "MOVE" : "REPLACE")),
1043 (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
1044 ((p_filter_cmd->type == QED_FILTER_VLAN) ?
1045 "VLAN" : "MAC & VLAN"),
1046 p_ramrod->filter_cmd_hdr.cmd_cnt,
1047 p_filter_cmd->is_rx_filter,
1048 p_filter_cmd->is_tx_filter);
1049 DP_VERBOSE(p_hwfn, QED_MSG_SP,
1050 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1051 p_filter_cmd->vport_to_add_to,
1052 p_filter_cmd->vport_to_remove_from,
1053 p_filter_cmd->mac[0],
1054 p_filter_cmd->mac[1],
1055 p_filter_cmd->mac[2],
1056 p_filter_cmd->mac[3],
1057 p_filter_cmd->mac[4],
1058 p_filter_cmd->mac[5],
1059 p_filter_cmd->vlan);
1060
1061 return 0;
1062}
1063
1064/*******************************************************************************
1065 * Description:
1066 * Calculates crc 32 on a buffer
1067 * Note: crc32_length MUST be aligned to 8
1068 * Return:
1069 ******************************************************************************/
1070static u32 qed_calc_crc32c(u8 *crc32_packet,
1071 u32 crc32_length,
1072 u32 crc32_seed,
1073 u8 complement)
1074{
1075 u32 byte = 0;
1076 u32 bit = 0;
1077 u8 msb = 0;
1078 u8 current_byte = 0;
1079 u32 crc32_result = crc32_seed;
1080
1081 if ((!crc32_packet) ||
1082 (crc32_length == 0) ||
1083 ((crc32_length % 8) != 0))
1084 return crc32_result;
1085 for (byte = 0; byte < crc32_length; byte++) {
1086 current_byte = crc32_packet[byte];
1087 for (bit = 0; bit < 8; bit++) {
1088 msb = (u8)(crc32_result >> 31);
1089 crc32_result = crc32_result << 1;
1090 if (msb != (0x1 & (current_byte >> bit))) {
1091 crc32_result = crc32_result ^ CRC32_POLY;
1092 crc32_result |= 1; /*crc32_result[0] = 1;*/
1093 }
1094 }
1095 }
1096 return crc32_result;
1097}
1098
1099static inline u32 qed_crc32c_le(u32 seed,
1100 u8 *mac,
1101 u32 len)
1102{
1103 u32 packet_buf[2] = { 0 };
1104
1105 memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
1106 return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1107}
1108
1109static u8 qed_mcast_bin_from_mac(u8 *mac)
1110{
1111 u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1112 mac, ETH_ALEN);
1113
1114 return crc & 0xff;
1115}
1116
1117static int
1118qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1119 u16 opaque_fid,
1120 struct qed_filter_mcast *p_filter_cmd,
1121 enum spq_mode comp_mode,
1122 struct qed_spq_comp_cb *p_comp_data)
1123{
1124 unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1125 struct vport_update_ramrod_data *p_ramrod = NULL;
cee4d264 1126 struct qed_spq_entry *p_ent = NULL;
06f56b81 1127 struct qed_sp_init_data init_data;
cee4d264
MC
1128 u8 abs_vport_id = 0;
1129 int rc, i;
1130
1131 if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1132 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1133 &abs_vport_id);
1134 if (rc)
1135 return rc;
1136 } else {
1137 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1138 &abs_vport_id);
1139 if (rc)
1140 return rc;
1141 }
1142
06f56b81
YM
1143 /* Get SPQ entry */
1144 memset(&init_data, 0, sizeof(init_data));
1145 init_data.cid = qed_spq_get_cid(p_hwfn);
1146 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1147 init_data.comp_mode = comp_mode;
1148 init_data.p_comp_data = p_comp_data;
cee4d264
MC
1149
1150 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 1151 ETH_RAMROD_VPORT_UPDATE,
06f56b81 1152 PROTOCOLID_ETH, &init_data);
cee4d264
MC
1153 if (rc) {
1154 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1155 return rc;
1156 }
1157
1158 p_ramrod = &p_ent->ramrod.vport_update;
1159 p_ramrod->common.update_approx_mcast_flg = 1;
1160
1161 /* explicitly clear out the entire vector */
1162 memset(&p_ramrod->approx_mcast.bins, 0,
1163 sizeof(p_ramrod->approx_mcast.bins));
1164 memset(bins, 0, sizeof(unsigned long) *
1165 ETH_MULTICAST_MAC_BINS_IN_REGS);
1166 /* filter ADD op is explicit set op and it removes
1167 * any existing filters for the vport
1168 */
1169 if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1170 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1171 u32 bit;
1172
1173 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1174 __set_bit(bit, bins);
1175 }
1176
1177 /* Convert to correct endianity */
1178 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1179 u32 *p_bins = (u32 *)bins;
1180 struct vport_update_ramrod_mcast *approx_mcast;
1181
1182 approx_mcast = &p_ramrod->approx_mcast;
1183 approx_mcast->bins[i] = cpu_to_le32(p_bins[i]);
1184 }
1185 }
1186
1187 p_ramrod->common.vport_id = abs_vport_id;
1188
1189 return qed_spq_post(p_hwfn, p_ent, NULL);
1190}
1191
1192static int
1193qed_filter_mcast_cmd(struct qed_dev *cdev,
1194 struct qed_filter_mcast *p_filter_cmd,
1195 enum spq_mode comp_mode,
1196 struct qed_spq_comp_cb *p_comp_data)
1197{
1198 int rc = 0;
1199 int i;
1200
1201 /* only ADD and REMOVE operations are supported for multi-cast */
1202 if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
1203 (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
1204 (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
1205 return -EINVAL;
1206
1207 for_each_hwfn(cdev, i) {
1208 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1209
1210 u16 opaque_fid;
1211
1212 if (rc != 0)
1213 break;
1214
1215 opaque_fid = p_hwfn->hw_info.opaque_fid;
1216
1217 rc = qed_sp_eth_filter_mcast(p_hwfn,
1218 opaque_fid,
1219 p_filter_cmd,
1220 comp_mode,
1221 p_comp_data);
1222 }
1223 return rc;
1224}
1225
1226static int qed_filter_ucast_cmd(struct qed_dev *cdev,
1227 struct qed_filter_ucast *p_filter_cmd,
1228 enum spq_mode comp_mode,
1229 struct qed_spq_comp_cb *p_comp_data)
1230{
1231 int rc = 0;
1232 int i;
1233
1234 for_each_hwfn(cdev, i) {
1235 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1236 u16 opaque_fid;
1237
1238 if (rc != 0)
1239 break;
1240
1241 opaque_fid = p_hwfn->hw_info.opaque_fid;
1242
1243 rc = qed_sp_eth_filter_ucast(p_hwfn,
1244 opaque_fid,
1245 p_filter_cmd,
1246 comp_mode,
1247 p_comp_data);
1248 }
1249
1250 return rc;
1251}
1252
86622ee7
YM
1253/* Statistics related code */
1254static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
1255 u32 *p_addr,
1256 u32 *p_len,
1257 u16 statistics_bin)
1258{
1259 *p_addr = BAR0_MAP_REG_PSDM_RAM +
1260 PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1261 *p_len = sizeof(struct eth_pstorm_per_queue_stat);
1262}
1263
1264static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
1265 struct qed_ptt *p_ptt,
1266 struct qed_eth_stats *p_stats,
1267 u16 statistics_bin)
1268{
1269 struct eth_pstorm_per_queue_stat pstats;
1270 u32 pstats_addr = 0, pstats_len = 0;
1271
1272 __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1273 statistics_bin);
1274
1275 memset(&pstats, 0, sizeof(pstats));
1276 qed_memcpy_from(p_hwfn, p_ptt, &pstats,
1277 pstats_addr, pstats_len);
1278
1279 p_stats->tx_ucast_bytes +=
1280 HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1281 p_stats->tx_mcast_bytes +=
1282 HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1283 p_stats->tx_bcast_bytes +=
1284 HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1285 p_stats->tx_ucast_pkts +=
1286 HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1287 p_stats->tx_mcast_pkts +=
1288 HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1289 p_stats->tx_bcast_pkts +=
1290 HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1291 p_stats->tx_err_drop_pkts +=
1292 HILO_64_REGPAIR(pstats.error_drop_pkts);
1293}
1294
1295static void __qed_get_vport_tstats_addrlen(struct qed_hwfn *p_hwfn,
1296 u32 *p_addr,
1297 u32 *p_len)
1298{
1299 *p_addr = BAR0_MAP_REG_TSDM_RAM +
1300 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1301 *p_len = sizeof(struct tstorm_per_port_stat);
1302}
1303
1304static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
1305 struct qed_ptt *p_ptt,
1306 struct qed_eth_stats *p_stats,
1307 u16 statistics_bin)
1308{
1309 u32 tstats_addr = 0, tstats_len = 0;
1310 struct tstorm_per_port_stat tstats;
1311
1312 __qed_get_vport_tstats_addrlen(p_hwfn, &tstats_addr, &tstats_len);
1313
1314 memset(&tstats, 0, sizeof(tstats));
1315 qed_memcpy_from(p_hwfn, p_ptt, &tstats,
1316 tstats_addr, tstats_len);
1317
1318 p_stats->mftag_filter_discards +=
1319 HILO_64_REGPAIR(tstats.mftag_filter_discard);
1320 p_stats->mac_filter_discards +=
1321 HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1322}
1323
1324static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
1325 u32 *p_addr,
1326 u32 *p_len,
1327 u16 statistics_bin)
1328{
1329 *p_addr = BAR0_MAP_REG_USDM_RAM +
1330 USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1331 *p_len = sizeof(struct eth_ustorm_per_queue_stat);
1332}
1333
1334static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
1335 struct qed_ptt *p_ptt,
1336 struct qed_eth_stats *p_stats,
1337 u16 statistics_bin)
1338{
1339 struct eth_ustorm_per_queue_stat ustats;
1340 u32 ustats_addr = 0, ustats_len = 0;
1341
1342 __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1343 statistics_bin);
1344
1345 memset(&ustats, 0, sizeof(ustats));
1346 qed_memcpy_from(p_hwfn, p_ptt, &ustats,
1347 ustats_addr, ustats_len);
1348
1349 p_stats->rx_ucast_bytes +=
1350 HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1351 p_stats->rx_mcast_bytes +=
1352 HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1353 p_stats->rx_bcast_bytes +=
1354 HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1355 p_stats->rx_ucast_pkts +=
1356 HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1357 p_stats->rx_mcast_pkts +=
1358 HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1359 p_stats->rx_bcast_pkts +=
1360 HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1361}
1362
1363static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
1364 u32 *p_addr,
1365 u32 *p_len,
1366 u16 statistics_bin)
1367{
1368 *p_addr = BAR0_MAP_REG_MSDM_RAM +
1369 MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1370 *p_len = sizeof(struct eth_mstorm_per_queue_stat);
1371}
1372
1373static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
1374 struct qed_ptt *p_ptt,
1375 struct qed_eth_stats *p_stats,
1376 u16 statistics_bin)
1377{
1378 struct eth_mstorm_per_queue_stat mstats;
1379 u32 mstats_addr = 0, mstats_len = 0;
1380
1381 __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1382 statistics_bin);
1383
1384 memset(&mstats, 0, sizeof(mstats));
1385 qed_memcpy_from(p_hwfn, p_ptt, &mstats,
1386 mstats_addr, mstats_len);
1387
1388 p_stats->no_buff_discards +=
1389 HILO_64_REGPAIR(mstats.no_buff_discard);
1390 p_stats->packet_too_big_discard +=
1391 HILO_64_REGPAIR(mstats.packet_too_big_discard);
1392 p_stats->ttl0_discard +=
1393 HILO_64_REGPAIR(mstats.ttl0_discard);
1394 p_stats->tpa_coalesced_pkts +=
1395 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1396 p_stats->tpa_coalesced_events +=
1397 HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1398 p_stats->tpa_aborts_num +=
1399 HILO_64_REGPAIR(mstats.tpa_aborts_num);
1400 p_stats->tpa_coalesced_bytes +=
1401 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1402}
1403
1404static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
1405 struct qed_ptt *p_ptt,
1406 struct qed_eth_stats *p_stats)
1407{
1408 struct port_stats port_stats;
1409 int j;
1410
1411 memset(&port_stats, 0, sizeof(port_stats));
1412
1413 qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
1414 p_hwfn->mcp_info->port_addr +
1415 offsetof(struct public_port, stats),
1416 sizeof(port_stats));
1417
1418 p_stats->rx_64_byte_packets += port_stats.pmm.r64;
d4967cf3
YM
1419 p_stats->rx_65_to_127_byte_packets += port_stats.pmm.r127;
1420 p_stats->rx_128_to_255_byte_packets += port_stats.pmm.r255;
1421 p_stats->rx_256_to_511_byte_packets += port_stats.pmm.r511;
1422 p_stats->rx_512_to_1023_byte_packets += port_stats.pmm.r1023;
1423 p_stats->rx_1024_to_1518_byte_packets += port_stats.pmm.r1518;
1424 p_stats->rx_1519_to_1522_byte_packets += port_stats.pmm.r1522;
1425 p_stats->rx_1519_to_2047_byte_packets += port_stats.pmm.r2047;
1426 p_stats->rx_2048_to_4095_byte_packets += port_stats.pmm.r4095;
1427 p_stats->rx_4096_to_9216_byte_packets += port_stats.pmm.r9216;
1428 p_stats->rx_9217_to_16383_byte_packets += port_stats.pmm.r16383;
86622ee7
YM
1429 p_stats->rx_crc_errors += port_stats.pmm.rfcs;
1430 p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
1431 p_stats->rx_pause_frames += port_stats.pmm.rxpf;
1432 p_stats->rx_pfc_frames += port_stats.pmm.rxpp;
1433 p_stats->rx_align_errors += port_stats.pmm.raln;
1434 p_stats->rx_carrier_errors += port_stats.pmm.rfcr;
1435 p_stats->rx_oversize_packets += port_stats.pmm.rovr;
1436 p_stats->rx_jabbers += port_stats.pmm.rjbr;
1437 p_stats->rx_undersize_packets += port_stats.pmm.rund;
1438 p_stats->rx_fragments += port_stats.pmm.rfrg;
1439 p_stats->tx_64_byte_packets += port_stats.pmm.t64;
1440 p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
1441 p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
1442 p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
1443 p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
1444 p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
1445 p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
1446 p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
1447 p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
1448 p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
1449 p_stats->tx_pause_frames += port_stats.pmm.txpf;
1450 p_stats->tx_pfc_frames += port_stats.pmm.txpp;
1451 p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
1452 p_stats->tx_total_collisions += port_stats.pmm.tncl;
1453 p_stats->rx_mac_bytes += port_stats.pmm.rbyte;
1454 p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
1455 p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
1456 p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
1457 p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
1458 p_stats->tx_mac_bytes += port_stats.pmm.tbyte;
1459 p_stats->tx_mac_uc_packets += port_stats.pmm.txuca;
1460 p_stats->tx_mac_mc_packets += port_stats.pmm.txmca;
1461 p_stats->tx_mac_bc_packets += port_stats.pmm.txbca;
1462 p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
1463 for (j = 0; j < 8; j++) {
1464 p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
1465 p_stats->brb_discards += port_stats.brb.brb_discard[j];
1466 }
1467}
1468
1469static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
1470 struct qed_ptt *p_ptt,
1471 struct qed_eth_stats *stats,
1472 u16 statistics_bin)
1473{
1474 __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1475 __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1476 __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
1477 __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1478
1479 if (p_hwfn->mcp_info)
1480 __qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
1481}
1482
1483static void _qed_get_vport_stats(struct qed_dev *cdev,
1484 struct qed_eth_stats *stats)
1485{
1486 u8 fw_vport = 0;
1487 int i;
1488
1489 memset(stats, 0, sizeof(*stats));
1490
1491 for_each_hwfn(cdev, i) {
1492 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1493 struct qed_ptt *p_ptt;
1494
1495 /* The main vport index is relative first */
1496 if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
1497 DP_ERR(p_hwfn, "No vport available!\n");
1498 continue;
1499 }
1500
1501 p_ptt = qed_ptt_acquire(p_hwfn);
1502 if (!p_ptt) {
1503 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1504 continue;
1505 }
1506
1507 __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport);
1508
1509 qed_ptt_release(p_hwfn, p_ptt);
1510 }
1511}
1512
1513void qed_get_vport_stats(struct qed_dev *cdev,
1514 struct qed_eth_stats *stats)
1515{
1516 u32 i;
1517
1518 if (!cdev) {
1519 memset(stats, 0, sizeof(*stats));
1520 return;
1521 }
1522
1523 _qed_get_vport_stats(cdev, stats);
1524
1525 if (!cdev->reset_stats)
1526 return;
1527
1528 /* Reduce the statistics baseline */
1529 for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
1530 ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
1531}
1532
1533/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1534void qed_reset_vport_stats(struct qed_dev *cdev)
1535{
1536 int i;
1537
1538 for_each_hwfn(cdev, i) {
1539 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1540 struct eth_mstorm_per_queue_stat mstats;
1541 struct eth_ustorm_per_queue_stat ustats;
1542 struct eth_pstorm_per_queue_stat pstats;
1543 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
1544 u32 addr = 0, len = 0;
1545
1546 if (!p_ptt) {
1547 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1548 continue;
1549 }
1550
1551 memset(&mstats, 0, sizeof(mstats));
1552 __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
1553 qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
1554
1555 memset(&ustats, 0, sizeof(ustats));
1556 __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
1557 qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
1558
1559 memset(&pstats, 0, sizeof(pstats));
1560 __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
1561 qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
1562
1563 qed_ptt_release(p_hwfn, p_ptt);
1564 }
1565
1566 /* PORT statistics are not necessarily reset, so we need to
1567 * read and create a baseline for future statistics.
1568 */
1569 if (!cdev->reset_stats)
1570 DP_INFO(cdev, "Reset stats not allocated\n");
1571 else
1572 _qed_get_vport_stats(cdev, cdev->reset_stats);
1573}
1574
25c089d7
YM
1575static int qed_fill_eth_dev_info(struct qed_dev *cdev,
1576 struct qed_dev_eth_info *info)
1577{
1578 int i;
1579
1580 memset(info, 0, sizeof(*info));
1581
1582 info->num_tc = 1;
1583
1408cc1f
YM
1584 if (IS_PF(cdev)) {
1585 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
1586 for_each_hwfn(cdev, i)
1587 info->num_queues +=
1588 FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
1589 if (cdev->int_params.fp_msix_cnt)
1590 info->num_queues =
1591 min_t(u8, info->num_queues,
1592 cdev->int_params.fp_msix_cnt);
1593 } else {
1594 info->num_queues = cdev->num_hwfns;
1595 }
1596
1597 info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
1598 ether_addr_copy(info->port_mac,
1599 cdev->hwfns[0].hw_info.hw_mac_addr);
25c089d7 1600 } else {
1408cc1f
YM
1601 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues);
1602 if (cdev->num_hwfns > 1) {
1603 u8 queues = 0;
25c089d7 1604
1408cc1f
YM
1605 qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues);
1606 info->num_queues += queues;
1607 }
1608
1609 qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
1610 &info->num_vlan_filters);
1611 qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
1612 }
25c089d7
YM
1613
1614 qed_fill_dev_info(cdev, &info->common);
1615
1408cc1f
YM
1616 if (IS_VF(cdev))
1617 memset(info->common.hw_mac, 0, ETH_ALEN);
1618
25c089d7
YM
1619 return 0;
1620}
1621
cc875c2e 1622static void qed_register_eth_ops(struct qed_dev *cdev,
1408cc1f 1623 struct qed_eth_cb_ops *ops, void *cookie)
cc875c2e 1624{
1408cc1f
YM
1625 cdev->protocol_ops.eth = ops;
1626 cdev->ops_cookie = cookie;
1627
1628 /* For VF, we start bulletin reading */
1629 if (IS_VF(cdev))
1630 qed_vf_start_iov_wq(cdev);
cc875c2e
YM
1631}
1632
cee4d264 1633static int qed_start_vport(struct qed_dev *cdev,
088c8618 1634 struct qed_start_vport_params *params)
cee4d264
MC
1635{
1636 int rc, i;
1637
1638 for_each_hwfn(cdev, i) {
088c8618 1639 struct qed_sp_vport_start_params start = { 0 };
cee4d264
MC
1640 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1641
088c8618
MC
1642 start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
1643 QED_TPA_MODE_NONE;
1644 start.remove_inner_vlan = params->remove_inner_vlan;
1645 start.drop_ttl0 = params->drop_ttl0;
1646 start.opaque_fid = p_hwfn->hw_info.opaque_fid;
1647 start.concrete_fid = p_hwfn->hw_info.concrete_fid;
1648 start.vport_id = params->vport_id;
1649 start.max_buffers_per_cqe = 16;
1650 start.mtu = params->mtu;
1651
1652 rc = qed_sp_vport_start(p_hwfn, &start);
cee4d264
MC
1653 if (rc) {
1654 DP_ERR(cdev, "Failed to start VPORT\n");
1655 return rc;
1656 }
1657
1658 qed_hw_start_fastpath(p_hwfn);
1659
1660 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1661 "Started V-PORT %d with MTU %d\n",
088c8618 1662 start.vport_id, start.mtu);
cee4d264
MC
1663 }
1664
9df2ed04
MC
1665 qed_reset_vport_stats(cdev);
1666
cee4d264
MC
1667 return 0;
1668}
1669
1670static int qed_stop_vport(struct qed_dev *cdev,
1671 u8 vport_id)
1672{
1673 int rc, i;
1674
1675 for_each_hwfn(cdev, i) {
1676 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1677
1678 rc = qed_sp_vport_stop(p_hwfn,
1679 p_hwfn->hw_info.opaque_fid,
1680 vport_id);
1681
1682 if (rc) {
1683 DP_ERR(cdev, "Failed to stop VPORT\n");
1684 return rc;
1685 }
1686 }
1687 return 0;
1688}
1689
1690static int qed_update_vport(struct qed_dev *cdev,
1691 struct qed_update_vport_params *params)
1692{
1693 struct qed_sp_vport_update_params sp_params;
1694 struct qed_rss_params sp_rss_params;
1695 int rc, i;
1696
1697 if (!cdev)
1698 return -ENODEV;
1699
1700 memset(&sp_params, 0, sizeof(sp_params));
1701 memset(&sp_rss_params, 0, sizeof(sp_rss_params));
1702
1703 /* Translate protocol params into sp params */
1704 sp_params.vport_id = params->vport_id;
1705 sp_params.update_vport_active_rx_flg =
1706 params->update_vport_active_flg;
1707 sp_params.update_vport_active_tx_flg =
1708 params->update_vport_active_flg;
1709 sp_params.vport_active_rx_flg = params->vport_active_flg;
1710 sp_params.vport_active_tx_flg = params->vport_active_flg;
3f9b4a69
YM
1711 sp_params.accept_any_vlan = params->accept_any_vlan;
1712 sp_params.update_accept_any_vlan_flg =
1713 params->update_accept_any_vlan_flg;
cee4d264
MC
1714
1715 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
1716 * We need to re-fix the rss values per engine for CMT.
1717 */
1718 if (cdev->num_hwfns > 1 && params->update_rss_flg) {
1719 struct qed_update_vport_rss_params *rss =
1720 &params->rss_params;
1721 int k, max = 0;
1722
1723 /* Find largest entry, since it's possible RSS needs to
1724 * be disabled [in case only 1 queue per-hwfn]
1725 */
1726 for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
1727 max = (max > rss->rss_ind_table[k]) ?
1728 max : rss->rss_ind_table[k];
1729
1730 /* Either fix RSS values or disable RSS */
1731 if (cdev->num_hwfns < max + 1) {
1732 int divisor = (max + cdev->num_hwfns - 1) /
1733 cdev->num_hwfns;
1734
1735 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1736 "CMT - fixing RSS values (modulo %02x)\n",
1737 divisor);
1738
1739 for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
1740 rss->rss_ind_table[k] =
1741 rss->rss_ind_table[k] % divisor;
1742 } else {
1743 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1744 "CMT - 1 queue per-hwfn; Disabling RSS\n");
1745 params->update_rss_flg = 0;
1746 }
1747 }
1748
1749 /* Now, update the RSS configuration for actual configuration */
1750 if (params->update_rss_flg) {
1751 sp_rss_params.update_rss_config = 1;
1752 sp_rss_params.rss_enable = 1;
1753 sp_rss_params.update_rss_capabilities = 1;
1754 sp_rss_params.update_rss_ind_table = 1;
1755 sp_rss_params.update_rss_key = 1;
8c5ebd0c 1756 sp_rss_params.rss_caps = params->rss_params.rss_caps;
cee4d264
MC
1757 sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
1758 memcpy(sp_rss_params.rss_ind_table,
1759 params->rss_params.rss_ind_table,
1760 QED_RSS_IND_TABLE_SIZE * sizeof(u16));
1761 memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
1762 QED_RSS_KEY_SIZE * sizeof(u32));
1763 }
1764 sp_params.rss_params = &sp_rss_params;
1765
1766 for_each_hwfn(cdev, i) {
1767 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1768
1769 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1770 rc = qed_sp_vport_update(p_hwfn, &sp_params,
1771 QED_SPQ_MODE_EBLOCK,
1772 NULL);
1773 if (rc) {
1774 DP_ERR(cdev, "Failed to update VPORT\n");
1775 return rc;
1776 }
1777
1778 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1779 "Updated V-PORT %d: active_flag %d [update %d]\n",
1780 params->vport_id, params->vport_active_flg,
1781 params->update_vport_active_flg);
1782 }
1783
1784 return 0;
1785}
1786
1787static int qed_start_rxq(struct qed_dev *cdev,
1788 struct qed_queue_start_common_params *params,
1789 u16 bd_max_bytes,
1790 dma_addr_t bd_chain_phys_addr,
1791 dma_addr_t cqe_pbl_addr,
1792 u16 cqe_pbl_size,
1793 void __iomem **pp_prod)
1794{
1795 int rc, hwfn_index;
1796 struct qed_hwfn *p_hwfn;
1797
1798 hwfn_index = params->rss_id % cdev->num_hwfns;
1799 p_hwfn = &cdev->hwfns[hwfn_index];
1800
1801 /* Fix queue ID in 100g mode */
1802 params->queue_id /= cdev->num_hwfns;
1803
1804 rc = qed_sp_eth_rx_queue_start(p_hwfn,
1805 p_hwfn->hw_info.opaque_fid,
1806 params,
1807 bd_max_bytes,
1808 bd_chain_phys_addr,
1809 cqe_pbl_addr,
1810 cqe_pbl_size,
1811 pp_prod);
1812
1813 if (rc) {
1814 DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id);
1815 return rc;
1816 }
1817
1818 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1819 "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
1820 params->queue_id, params->rss_id, params->vport_id,
1821 params->sb);
1822
1823 return 0;
1824}
1825
1826static int qed_stop_rxq(struct qed_dev *cdev,
1827 struct qed_stop_rxq_params *params)
1828{
1829 int rc, hwfn_index;
1830 struct qed_hwfn *p_hwfn;
1831
1832 hwfn_index = params->rss_id % cdev->num_hwfns;
1833 p_hwfn = &cdev->hwfns[hwfn_index];
1834
1835 rc = qed_sp_eth_rx_queue_stop(p_hwfn,
1836 params->rx_queue_id / cdev->num_hwfns,
1837 params->eq_completion_only,
1838 false);
1839 if (rc) {
1840 DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
1841 return rc;
1842 }
1843
1844 return 0;
1845}
1846
1847static int qed_start_txq(struct qed_dev *cdev,
1848 struct qed_queue_start_common_params *p_params,
1849 dma_addr_t pbl_addr,
1850 u16 pbl_size,
1851 void __iomem **pp_doorbell)
1852{
1853 struct qed_hwfn *p_hwfn;
1854 int rc, hwfn_index;
1855
1856 hwfn_index = p_params->rss_id % cdev->num_hwfns;
1857 p_hwfn = &cdev->hwfns[hwfn_index];
1858
1859 /* Fix queue ID in 100g mode */
1860 p_params->queue_id /= cdev->num_hwfns;
1861
1862 rc = qed_sp_eth_tx_queue_start(p_hwfn,
1863 p_hwfn->hw_info.opaque_fid,
1864 p_params,
1865 pbl_addr,
1866 pbl_size,
1867 pp_doorbell);
1868
1869 if (rc) {
1870 DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
1871 return rc;
1872 }
1873
1874 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1875 "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
1876 p_params->queue_id, p_params->rss_id, p_params->vport_id,
1877 p_params->sb);
1878
1879 return 0;
1880}
1881
1882#define QED_HW_STOP_RETRY_LIMIT (10)
1883static int qed_fastpath_stop(struct qed_dev *cdev)
1884{
1885 qed_hw_stop_fastpath(cdev);
1886
1887 return 0;
1888}
1889
1890static int qed_stop_txq(struct qed_dev *cdev,
1891 struct qed_stop_txq_params *params)
1892{
1893 struct qed_hwfn *p_hwfn;
1894 int rc, hwfn_index;
1895
1896 hwfn_index = params->rss_id % cdev->num_hwfns;
1897 p_hwfn = &cdev->hwfns[hwfn_index];
1898
1899 rc = qed_sp_eth_tx_queue_stop(p_hwfn,
1900 params->tx_queue_id / cdev->num_hwfns);
1901 if (rc) {
1902 DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
1903 return rc;
1904 }
1905
1906 return 0;
1907}
1908
464f6645
MC
1909static int qed_tunn_configure(struct qed_dev *cdev,
1910 struct qed_tunn_params *tunn_params)
1911{
1912 struct qed_tunn_update_params tunn_info;
1913 int i, rc;
1914
1408cc1f
YM
1915 if (IS_VF(cdev))
1916 return 0;
1917
464f6645
MC
1918 memset(&tunn_info, 0, sizeof(tunn_info));
1919 if (tunn_params->update_vxlan_port == 1) {
1920 tunn_info.update_vxlan_udp_port = 1;
1921 tunn_info.vxlan_udp_port = tunn_params->vxlan_port;
1922 }
1923
1924 if (tunn_params->update_geneve_port == 1) {
1925 tunn_info.update_geneve_udp_port = 1;
1926 tunn_info.geneve_udp_port = tunn_params->geneve_port;
1927 }
1928
1929 for_each_hwfn(cdev, i) {
1930 struct qed_hwfn *hwfn = &cdev->hwfns[i];
1931
1932 rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
1933 QED_SPQ_MODE_EBLOCK, NULL);
1934
1935 if (rc)
1936 return rc;
1937 }
1938
1939 return 0;
1940}
1941
cee4d264
MC
1942static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
1943 enum qed_filter_rx_mode_type type)
1944{
1945 struct qed_filter_accept_flags accept_flags;
1946
1947 memset(&accept_flags, 0, sizeof(accept_flags));
1948
1949 accept_flags.update_rx_mode_config = 1;
1950 accept_flags.update_tx_mode_config = 1;
1951 accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
1952 QED_ACCEPT_MCAST_MATCHED |
1953 QED_ACCEPT_BCAST;
1954 accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
1955 QED_ACCEPT_MCAST_MATCHED |
1956 QED_ACCEPT_BCAST;
1957
1958 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC)
1959 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
1960 QED_ACCEPT_MCAST_UNMATCHED;
1961 else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC)
1962 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
1963
3f9b4a69 1964 return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
cee4d264
MC
1965 QED_SPQ_MODE_CB, NULL);
1966}
1967
1968static int qed_configure_filter_ucast(struct qed_dev *cdev,
1969 struct qed_filter_ucast_params *params)
1970{
1971 struct qed_filter_ucast ucast;
1972
1973 if (!params->vlan_valid && !params->mac_valid) {
1974 DP_NOTICE(
1975 cdev,
1976 "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
1977 return -EINVAL;
1978 }
1979
1980 memset(&ucast, 0, sizeof(ucast));
1981 switch (params->type) {
1982 case QED_FILTER_XCAST_TYPE_ADD:
1983 ucast.opcode = QED_FILTER_ADD;
1984 break;
1985 case QED_FILTER_XCAST_TYPE_DEL:
1986 ucast.opcode = QED_FILTER_REMOVE;
1987 break;
1988 case QED_FILTER_XCAST_TYPE_REPLACE:
1989 ucast.opcode = QED_FILTER_REPLACE;
1990 break;
1991 default:
1992 DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
1993 params->type);
1994 }
1995
1996 if (params->vlan_valid && params->mac_valid) {
1997 ucast.type = QED_FILTER_MAC_VLAN;
1998 ether_addr_copy(ucast.mac, params->mac);
1999 ucast.vlan = params->vlan;
2000 } else if (params->mac_valid) {
2001 ucast.type = QED_FILTER_MAC;
2002 ether_addr_copy(ucast.mac, params->mac);
2003 } else {
2004 ucast.type = QED_FILTER_VLAN;
2005 ucast.vlan = params->vlan;
2006 }
2007
2008 ucast.is_rx_filter = true;
2009 ucast.is_tx_filter = true;
2010
2011 return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
2012}
2013
2014static int qed_configure_filter_mcast(struct qed_dev *cdev,
2015 struct qed_filter_mcast_params *params)
2016{
2017 struct qed_filter_mcast mcast;
2018 int i;
2019
2020 memset(&mcast, 0, sizeof(mcast));
2021 switch (params->type) {
2022 case QED_FILTER_XCAST_TYPE_ADD:
2023 mcast.opcode = QED_FILTER_ADD;
2024 break;
2025 case QED_FILTER_XCAST_TYPE_DEL:
2026 mcast.opcode = QED_FILTER_REMOVE;
2027 break;
2028 default:
2029 DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
2030 params->type);
2031 }
2032
2033 mcast.num_mc_addrs = params->num;
2034 for (i = 0; i < mcast.num_mc_addrs; i++)
2035 ether_addr_copy(mcast.mac[i], params->mac[i]);
2036
2037 return qed_filter_mcast_cmd(cdev, &mcast,
2038 QED_SPQ_MODE_CB, NULL);
2039}
2040
2041static int qed_configure_filter(struct qed_dev *cdev,
2042 struct qed_filter_params *params)
2043{
2044 enum qed_filter_rx_mode_type accept_flags;
2045
2046 switch (params->type) {
2047 case QED_FILTER_TYPE_UCAST:
2048 return qed_configure_filter_ucast(cdev, &params->filter.ucast);
2049 case QED_FILTER_TYPE_MCAST:
2050 return qed_configure_filter_mcast(cdev, &params->filter.mcast);
2051 case QED_FILTER_TYPE_RX_MODE:
2052 accept_flags = params->filter.accept_flags;
2053 return qed_configure_filter_rx_mode(cdev, accept_flags);
2054 default:
2055 DP_NOTICE(cdev, "Unknown filter type %d\n",
2056 (int)params->type);
2057 return -EINVAL;
2058 }
2059}
2060
2061static int qed_fp_cqe_completion(struct qed_dev *dev,
2062 u8 rss_id,
2063 struct eth_slow_path_rx_cqe *cqe)
2064{
2065 return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
2066 cqe);
2067}
2068
25c089d7
YM
2069static const struct qed_eth_ops qed_eth_ops_pass = {
2070 .common = &qed_common_ops_pass,
2071 .fill_dev_info = &qed_fill_eth_dev_info,
cc875c2e 2072 .register_ops = &qed_register_eth_ops,
cee4d264
MC
2073 .vport_start = &qed_start_vport,
2074 .vport_stop = &qed_stop_vport,
2075 .vport_update = &qed_update_vport,
2076 .q_rx_start = &qed_start_rxq,
2077 .q_rx_stop = &qed_stop_rxq,
2078 .q_tx_start = &qed_start_txq,
2079 .q_tx_stop = &qed_stop_txq,
2080 .filter_config = &qed_configure_filter,
2081 .fastpath_stop = &qed_fastpath_stop,
2082 .eth_cqe_completion = &qed_fp_cqe_completion,
9df2ed04 2083 .get_vport_stats = &qed_get_vport_stats,
464f6645 2084 .tunn_config = &qed_tunn_configure,
25c089d7
YM
2085};
2086
95114344 2087const struct qed_eth_ops *qed_get_eth_ops(void)
25c089d7 2088{
25c089d7
YM
2089 return &qed_eth_ops_pass;
2090}
2091EXPORT_SYMBOL(qed_get_eth_ops);
2092
2093void qed_put_eth_ops(void)
2094{
2095 /* TODO - reference count for module? */
2096}
2097EXPORT_SYMBOL(qed_put_eth_ops);