]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/qlogic/qed/qed_l2.c
qede: classification configuration
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / qlogic / qed / qed_l2.c
CommitLineData
25c089d7
YM
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#include <linux/types.h>
10#include <asm/byteorder.h>
11#include <asm/param.h>
12#include <linux/delay.h>
13#include <linux/dma-mapping.h>
14#include <linux/etherdevice.h>
15#include <linux/interrupt.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/pci.h>
19#include <linux/slab.h>
20#include <linux/stddef.h>
21#include <linux/string.h>
22#include <linux/version.h>
23#include <linux/workqueue.h>
24#include <linux/bitops.h>
25#include <linux/bug.h>
26#include "qed.h"
27#include <linux/qed/qed_chain.h>
28#include "qed_cxt.h"
29#include "qed_dev_api.h"
30#include <linux/qed/qed_eth_if.h>
31#include "qed_hsi.h"
32#include "qed_hw.h"
33#include "qed_int.h"
34#include "qed_reg_addr.h"
35#include "qed_sp.h"
36
cee4d264
MC
37enum qed_rss_caps {
38 QED_RSS_IPV4 = 0x1,
39 QED_RSS_IPV6 = 0x2,
40 QED_RSS_IPV4_TCP = 0x4,
41 QED_RSS_IPV6_TCP = 0x8,
42 QED_RSS_IPV4_UDP = 0x10,
43 QED_RSS_IPV6_UDP = 0x20,
44};
45
46/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
47#define QED_RSS_IND_TABLE_SIZE 128
48#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
49
50struct qed_rss_params {
51 u8 update_rss_config;
52 u8 rss_enable;
53 u8 rss_eng_id;
54 u8 update_rss_capabilities;
55 u8 update_rss_ind_table;
56 u8 update_rss_key;
57 u8 rss_caps;
58 u8 rss_table_size_log;
59 u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
60 u32 rss_key[QED_RSS_KEY_SIZE];
61};
62
63enum qed_filter_opcode {
64 QED_FILTER_ADD,
65 QED_FILTER_REMOVE,
66 QED_FILTER_MOVE,
67 QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
68 QED_FILTER_FLUSH, /* Removes all filters */
69};
70
71enum qed_filter_ucast_type {
72 QED_FILTER_MAC,
73 QED_FILTER_VLAN,
74 QED_FILTER_MAC_VLAN,
75 QED_FILTER_INNER_MAC,
76 QED_FILTER_INNER_VLAN,
77 QED_FILTER_INNER_PAIR,
78 QED_FILTER_INNER_MAC_VNI_PAIR,
79 QED_FILTER_MAC_VNI_PAIR,
80 QED_FILTER_VNI,
81};
82
83struct qed_filter_ucast {
84 enum qed_filter_opcode opcode;
85 enum qed_filter_ucast_type type;
86 u8 is_rx_filter;
87 u8 is_tx_filter;
88 u8 vport_to_add_to;
89 u8 vport_to_remove_from;
90 unsigned char mac[ETH_ALEN];
91 u8 assert_on_error;
92 u16 vlan;
93 u32 vni;
94};
95
96struct qed_filter_mcast {
97 /* MOVE is not supported for multicast */
98 enum qed_filter_opcode opcode;
99 u8 vport_to_add_to;
100 u8 vport_to_remove_from;
101 u8 num_mc_addrs;
102#define QED_MAX_MC_ADDRS 64
103 unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
104};
105
106struct qed_filter_accept_flags {
107 u8 update_rx_mode_config;
108 u8 update_tx_mode_config;
109 u8 rx_accept_filter;
110 u8 tx_accept_filter;
111#define QED_ACCEPT_NONE 0x01
112#define QED_ACCEPT_UCAST_MATCHED 0x02
113#define QED_ACCEPT_UCAST_UNMATCHED 0x04
114#define QED_ACCEPT_MCAST_MATCHED 0x08
115#define QED_ACCEPT_MCAST_UNMATCHED 0x10
116#define QED_ACCEPT_BCAST 0x20
117};
118
119struct qed_sp_vport_update_params {
120 u16 opaque_fid;
121 u8 vport_id;
122 u8 update_vport_active_rx_flg;
123 u8 vport_active_rx_flg;
124 u8 update_vport_active_tx_flg;
125 u8 vport_active_tx_flg;
126 u8 update_approx_mcast_flg;
127 unsigned long bins[8];
128 struct qed_rss_params *rss_params;
129 struct qed_filter_accept_flags accept_flags;
130};
131
132#define QED_MAX_SGES_NUM 16
133#define CRC32_POLY 0x1edc6f41
134
135static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
136 u32 concrete_fid,
137 u16 opaque_fid,
138 u8 vport_id,
139 u16 mtu,
140 u8 drop_ttl0_flg,
141 u8 inner_vlan_removal_en_flg)
142{
143 struct qed_sp_init_request_params params;
144 struct vport_start_ramrod_data *p_ramrod = NULL;
145 struct qed_spq_entry *p_ent = NULL;
146 int rc = -EINVAL;
147 u16 rx_mode = 0;
148 u8 abs_vport_id = 0;
149
150 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
151 if (rc != 0)
152 return rc;
153
154 memset(&params, 0, sizeof(params));
155 params.ramrod_data_size = sizeof(*p_ramrod);
156 params.comp_mode = QED_SPQ_MODE_EBLOCK;
157
158 rc = qed_sp_init_request(p_hwfn, &p_ent,
159 qed_spq_get_cid(p_hwfn),
160 opaque_fid,
161 ETH_RAMROD_VPORT_START,
162 PROTOCOLID_ETH,
163 &params);
164 if (rc)
165 return rc;
166
167 p_ramrod = &p_ent->ramrod.vport_start;
168 p_ramrod->vport_id = abs_vport_id;
169
170 p_ramrod->mtu = cpu_to_le16(mtu);
171 p_ramrod->inner_vlan_removal_en = inner_vlan_removal_en_flg;
172 p_ramrod->drop_ttl0_en = drop_ttl0_flg;
173
174 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
175 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
176
177 p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
178
179 /* TPA related fields */
180 memset(&p_ramrod->tpa_param, 0,
181 sizeof(struct eth_vport_tpa_param));
182
183 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
184 p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
185 concrete_fid);
186
187 return qed_spq_post(p_hwfn, p_ent, NULL);
188}
189
190static int
191qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
192 struct vport_update_ramrod_data *p_ramrod,
193 struct qed_rss_params *p_params)
194{
195 struct eth_vport_rss_config *rss = &p_ramrod->rss_config;
196 u16 abs_l2_queue = 0, capabilities = 0;
197 int rc = 0, i;
198
199 if (!p_params) {
200 p_ramrod->common.update_rss_flg = 0;
201 return rc;
202 }
203
204 BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE !=
205 ETH_RSS_IND_TABLE_ENTRIES_NUM);
206
207 rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id);
208 if (rc)
209 return rc;
210
211 p_ramrod->common.update_rss_flg = p_params->update_rss_config;
212 rss->update_rss_capabilities = p_params->update_rss_capabilities;
213 rss->update_rss_ind_table = p_params->update_rss_ind_table;
214 rss->update_rss_key = p_params->update_rss_key;
215
216 rss->rss_mode = p_params->rss_enable ?
217 ETH_VPORT_RSS_MODE_REGULAR :
218 ETH_VPORT_RSS_MODE_DISABLED;
219
220 SET_FIELD(capabilities,
221 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
222 !!(p_params->rss_caps & QED_RSS_IPV4));
223 SET_FIELD(capabilities,
224 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
225 !!(p_params->rss_caps & QED_RSS_IPV6));
226 SET_FIELD(capabilities,
227 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
228 !!(p_params->rss_caps & QED_RSS_IPV4_TCP));
229 SET_FIELD(capabilities,
230 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
231 !!(p_params->rss_caps & QED_RSS_IPV6_TCP));
232 SET_FIELD(capabilities,
233 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
234 !!(p_params->rss_caps & QED_RSS_IPV4_UDP));
235 SET_FIELD(capabilities,
236 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
237 !!(p_params->rss_caps & QED_RSS_IPV6_UDP));
238 rss->tbl_size = p_params->rss_table_size_log;
239
240 rss->capabilities = cpu_to_le16(capabilities);
241
242 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
243 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
244 p_ramrod->common.update_rss_flg,
245 rss->rss_mode, rss->update_rss_capabilities,
246 capabilities, rss->update_rss_ind_table,
247 rss->update_rss_key);
248
249 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
250 rc = qed_fw_l2_queue(p_hwfn,
251 (u8)p_params->rss_ind_table[i],
252 &abs_l2_queue);
253 if (rc)
254 return rc;
255
256 rss->indirection_table[i] = cpu_to_le16(abs_l2_queue);
257 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n",
258 i, rss->indirection_table[i]);
259 }
260
261 for (i = 0; i < 10; i++)
262 rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]);
263
264 return rc;
265}
266
267static void
268qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
269 struct vport_update_ramrod_data *p_ramrod,
270 struct qed_filter_accept_flags accept_flags)
271{
272 p_ramrod->common.update_rx_mode_flg =
273 accept_flags.update_rx_mode_config;
274
275 p_ramrod->common.update_tx_mode_flg =
276 accept_flags.update_tx_mode_config;
277
278 /* Set Rx mode accept flags */
279 if (p_ramrod->common.update_rx_mode_flg) {
280 u8 accept_filter = accept_flags.rx_accept_filter;
281 u16 state = 0;
282
283 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
284 !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
285 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
286
287 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
288 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
289
290 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
291 !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
292 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
293
294 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
295 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
296 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
297
298 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
299 !!(accept_filter & QED_ACCEPT_BCAST));
300
301 p_ramrod->rx_mode.state = cpu_to_le16(state);
302 DP_VERBOSE(p_hwfn, QED_MSG_SP,
303 "p_ramrod->rx_mode.state = 0x%x\n", state);
304 }
305
306 /* Set Tx mode accept flags */
307 if (p_ramrod->common.update_tx_mode_flg) {
308 u8 accept_filter = accept_flags.tx_accept_filter;
309 u16 state = 0;
310
311 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
312 !!(accept_filter & QED_ACCEPT_NONE));
313
314 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
315 (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
316 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
317
318 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
319 !!(accept_filter & QED_ACCEPT_NONE));
320
321 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
322 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
323 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
324
325 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
326 !!(accept_filter & QED_ACCEPT_BCAST));
327
328 p_ramrod->tx_mode.state = cpu_to_le16(state);
329 DP_VERBOSE(p_hwfn, QED_MSG_SP,
330 "p_ramrod->tx_mode.state = 0x%x\n", state);
331 }
332}
333
334static void
335qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
336 struct vport_update_ramrod_data *p_ramrod,
337 struct qed_sp_vport_update_params *p_params)
338{
339 int i;
340
341 memset(&p_ramrod->approx_mcast.bins, 0,
342 sizeof(p_ramrod->approx_mcast.bins));
343
344 if (p_params->update_approx_mcast_flg) {
345 p_ramrod->common.update_approx_mcast_flg = 1;
346 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
347 u32 *p_bins = (u32 *)p_params->bins;
348 __le32 val = cpu_to_le32(p_bins[i]);
349
350 p_ramrod->approx_mcast.bins[i] = val;
351 }
352 }
353}
354
355static int
356qed_sp_vport_update(struct qed_hwfn *p_hwfn,
357 struct qed_sp_vport_update_params *p_params,
358 enum spq_mode comp_mode,
359 struct qed_spq_comp_cb *p_comp_data)
360{
361 struct qed_rss_params *p_rss_params = p_params->rss_params;
362 struct vport_update_ramrod_data_cmn *p_cmn;
363 struct qed_sp_init_request_params sp_params;
364 struct vport_update_ramrod_data *p_ramrod = NULL;
365 struct qed_spq_entry *p_ent = NULL;
366 u8 abs_vport_id = 0;
367 int rc = -EINVAL;
368
369 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
370 if (rc != 0)
371 return rc;
372
373 memset(&sp_params, 0, sizeof(sp_params));
374 sp_params.ramrod_data_size = sizeof(*p_ramrod);
375 sp_params.comp_mode = comp_mode;
376 sp_params.p_comp_data = p_comp_data;
377
378 rc = qed_sp_init_request(p_hwfn, &p_ent,
379 qed_spq_get_cid(p_hwfn),
380 p_params->opaque_fid,
381 ETH_RAMROD_VPORT_UPDATE,
382 PROTOCOLID_ETH,
383 &sp_params);
384 if (rc)
385 return rc;
386
387 /* Copy input params to ramrod according to FW struct */
388 p_ramrod = &p_ent->ramrod.vport_update;
389 p_cmn = &p_ramrod->common;
390
391 p_cmn->vport_id = abs_vport_id;
392 p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
393 p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
394 p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
395 p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
396
397 rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
398 if (rc) {
399 /* Return spq entry which is taken in qed_sp_init_request()*/
400 qed_spq_return_entry(p_hwfn, p_ent);
401 return rc;
402 }
403
404 /* Update mcast bins for VFs, PF doesn't use this functionality */
405 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
406
407 qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
408 return qed_spq_post(p_hwfn, p_ent, NULL);
409}
410
411static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
412 u16 opaque_fid,
413 u8 vport_id)
414{
415 struct qed_sp_init_request_params sp_params;
416 struct vport_stop_ramrod_data *p_ramrod;
417 struct qed_spq_entry *p_ent;
418 u8 abs_vport_id = 0;
419 int rc;
420
421 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
422 if (rc != 0)
423 return rc;
424
425 memset(&sp_params, 0, sizeof(sp_params));
426 sp_params.ramrod_data_size = sizeof(*p_ramrod);
427 sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
428
429 rc = qed_sp_init_request(p_hwfn, &p_ent,
430 qed_spq_get_cid(p_hwfn),
431 opaque_fid,
432 ETH_RAMROD_VPORT_STOP,
433 PROTOCOLID_ETH,
434 &sp_params);
435 if (rc)
436 return rc;
437
438 p_ramrod = &p_ent->ramrod.vport_stop;
439 p_ramrod->vport_id = abs_vport_id;
440
441 return qed_spq_post(p_hwfn, p_ent, NULL);
442}
443
444static int qed_filter_accept_cmd(struct qed_dev *cdev,
445 u8 vport,
446 struct qed_filter_accept_flags accept_flags,
447 enum spq_mode comp_mode,
448 struct qed_spq_comp_cb *p_comp_data)
449{
450 struct qed_sp_vport_update_params vport_update_params;
451 int i, rc;
452
453 /* Prepare and send the vport rx_mode change */
454 memset(&vport_update_params, 0, sizeof(vport_update_params));
455 vport_update_params.vport_id = vport;
456 vport_update_params.accept_flags = accept_flags;
457
458 for_each_hwfn(cdev, i) {
459 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
460
461 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
462
463 rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
464 comp_mode, p_comp_data);
465 if (rc != 0) {
466 DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
467 return rc;
468 }
469
470 DP_VERBOSE(p_hwfn, QED_MSG_SP,
471 "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
472 accept_flags.rx_accept_filter,
473 accept_flags.tx_accept_filter);
474 }
475
476 return 0;
477}
478
479static int qed_sp_release_queue_cid(
480 struct qed_hwfn *p_hwfn,
481 struct qed_hw_cid_data *p_cid_data)
482{
483 if (!p_cid_data->b_cid_allocated)
484 return 0;
485
486 qed_cxt_release_cid(p_hwfn, p_cid_data->cid);
487
488 p_cid_data->b_cid_allocated = false;
489
490 return 0;
491}
492
493static int
494qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
495 u16 opaque_fid,
496 u32 cid,
497 struct qed_queue_start_common_params *params,
498 u8 stats_id,
499 u16 bd_max_bytes,
500 dma_addr_t bd_chain_phys_addr,
501 dma_addr_t cqe_pbl_addr,
502 u16 cqe_pbl_size)
503{
504 struct rx_queue_start_ramrod_data *p_ramrod = NULL;
505 struct qed_sp_init_request_params sp_params;
506 struct qed_spq_entry *p_ent = NULL;
507 struct qed_hw_cid_data *p_rx_cid;
508 u16 abs_rx_q_id = 0;
509 u8 abs_vport_id = 0;
510 int rc = -EINVAL;
511
512 /* Store information for the stop */
513 p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
514 p_rx_cid->cid = cid;
515 p_rx_cid->opaque_fid = opaque_fid;
516 p_rx_cid->vport_id = params->vport_id;
517
518 rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id);
519 if (rc != 0)
520 return rc;
521
522 rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id);
523 if (rc != 0)
524 return rc;
525
526 DP_VERBOSE(p_hwfn, QED_MSG_SP,
527 "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
528 opaque_fid, cid, params->queue_id, params->vport_id,
529 params->sb);
530
531 memset(&sp_params, 0, sizeof(params));
532 sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
533 sp_params.ramrod_data_size = sizeof(*p_ramrod);
534
535 rc = qed_sp_init_request(p_hwfn, &p_ent,
536 cid, opaque_fid,
537 ETH_RAMROD_RX_QUEUE_START,
538 PROTOCOLID_ETH,
539 &sp_params);
540 if (rc)
541 return rc;
542
543 p_ramrod = &p_ent->ramrod.rx_queue_start;
544
545 p_ramrod->sb_id = cpu_to_le16(params->sb);
546 p_ramrod->sb_index = params->sb_idx;
547 p_ramrod->vport_id = abs_vport_id;
548 p_ramrod->stats_counter_id = stats_id;
549 p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
550 p_ramrod->complete_cqe_flg = 0;
551 p_ramrod->complete_event_flg = 1;
552
553 p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
554 p_ramrod->bd_base.hi = DMA_HI_LE(bd_chain_phys_addr);
555 p_ramrod->bd_base.lo = DMA_LO_LE(bd_chain_phys_addr);
556
557 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
558 p_ramrod->cqe_pbl_addr.hi = DMA_HI_LE(cqe_pbl_addr);
559 p_ramrod->cqe_pbl_addr.lo = DMA_LO_LE(cqe_pbl_addr);
560
561 rc = qed_spq_post(p_hwfn, p_ent, NULL);
562
563 return rc;
564}
565
566static int
567qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
568 u16 opaque_fid,
569 struct qed_queue_start_common_params *params,
570 u16 bd_max_bytes,
571 dma_addr_t bd_chain_phys_addr,
572 dma_addr_t cqe_pbl_addr,
573 u16 cqe_pbl_size,
574 void __iomem **pp_prod)
575{
576 struct qed_hw_cid_data *p_rx_cid;
577 u64 init_prod_val = 0;
578 u16 abs_l2_queue = 0;
579 u8 abs_stats_id = 0;
580 int rc;
581
582 rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
583 if (rc != 0)
584 return rc;
585
586 rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id);
587 if (rc != 0)
588 return rc;
589
590 *pp_prod = (u8 __iomem *)p_hwfn->regview +
591 GTT_BAR0_MAP_REG_MSDM_RAM +
592 MSTORM_PRODS_OFFSET(abs_l2_queue);
593
594 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
595 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
596 (u32 *)(&init_prod_val));
597
598 /* Allocate a CID for the queue */
599 p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
600 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
601 &p_rx_cid->cid);
602 if (rc) {
603 DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
604 return rc;
605 }
606 p_rx_cid->b_cid_allocated = true;
607
608 rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
609 opaque_fid,
610 p_rx_cid->cid,
611 params,
612 abs_stats_id,
613 bd_max_bytes,
614 bd_chain_phys_addr,
615 cqe_pbl_addr,
616 cqe_pbl_size);
617
618 if (rc != 0)
619 qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
620
621 return rc;
622}
623
624static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
625 u16 rx_queue_id,
626 bool eq_completion_only,
627 bool cqe_completion)
628{
629 struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
630 struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
631 struct qed_sp_init_request_params sp_params;
632 struct qed_spq_entry *p_ent = NULL;
633 u16 abs_rx_q_id = 0;
634 int rc = -EINVAL;
635
636 memset(&sp_params, 0, sizeof(sp_params));
637 sp_params.ramrod_data_size = sizeof(*p_ramrod);
638 sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
639
640 rc = qed_sp_init_request(p_hwfn, &p_ent,
641 p_rx_cid->cid,
642 p_rx_cid->opaque_fid,
643 ETH_RAMROD_RX_QUEUE_STOP,
644 PROTOCOLID_ETH,
645 &sp_params);
646 if (rc)
647 return rc;
648
649 p_ramrod = &p_ent->ramrod.rx_queue_stop;
650
651 qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
652 qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
653 p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
654
655 /* Cleaning the queue requires the completion to arrive there.
656 * In addition, VFs require the answer to come as eqe to PF.
657 */
658 p_ramrod->complete_cqe_flg =
659 (!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) &&
660 !eq_completion_only) || cqe_completion;
661 p_ramrod->complete_event_flg =
662 !(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) ||
663 eq_completion_only;
664
665 rc = qed_spq_post(p_hwfn, p_ent, NULL);
666 if (rc)
667 return rc;
668
669 return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
670}
671
672static int
673qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
674 u16 opaque_fid,
675 u32 cid,
676 struct qed_queue_start_common_params *p_params,
677 u8 stats_id,
678 dma_addr_t pbl_addr,
679 u16 pbl_size,
680 union qed_qm_pq_params *p_pq_params)
681{
682 struct tx_queue_start_ramrod_data *p_ramrod = NULL;
683 struct qed_sp_init_request_params sp_params;
684 struct qed_spq_entry *p_ent = NULL;
685 struct qed_hw_cid_data *p_tx_cid;
686 u8 abs_vport_id;
687 int rc = -EINVAL;
688 u16 pq_id;
689
690 /* Store information for the stop */
691 p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
692 p_tx_cid->cid = cid;
693 p_tx_cid->opaque_fid = opaque_fid;
694
695 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
696 if (rc)
697 return rc;
698
699 memset(&sp_params, 0, sizeof(sp_params));
700 sp_params.ramrod_data_size = sizeof(*p_ramrod);
701 sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
702
703 rc = qed_sp_init_request(p_hwfn, &p_ent, cid,
704 opaque_fid,
705 ETH_RAMROD_TX_QUEUE_START,
706 PROTOCOLID_ETH,
707 &sp_params);
708 if (rc)
709 return rc;
710
711 p_ramrod = &p_ent->ramrod.tx_queue_start;
712 p_ramrod->vport_id = abs_vport_id;
713
714 p_ramrod->sb_id = cpu_to_le16(p_params->sb);
715 p_ramrod->sb_index = p_params->sb_idx;
716 p_ramrod->stats_counter_id = stats_id;
717 p_ramrod->tc = p_pq_params->eth.tc;
718
719 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
720 p_ramrod->pbl_base_addr.hi = DMA_HI_LE(pbl_addr);
721 p_ramrod->pbl_base_addr.lo = DMA_LO_LE(pbl_addr);
722
723 pq_id = qed_get_qm_pq(p_hwfn,
724 PROTOCOLID_ETH,
725 p_pq_params);
726 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
727
728 return qed_spq_post(p_hwfn, p_ent, NULL);
729}
730
731static int
732qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
733 u16 opaque_fid,
734 struct qed_queue_start_common_params *p_params,
735 dma_addr_t pbl_addr,
736 u16 pbl_size,
737 void __iomem **pp_doorbell)
738{
739 struct qed_hw_cid_data *p_tx_cid;
740 union qed_qm_pq_params pq_params;
741 u8 abs_stats_id = 0;
742 int rc;
743
744 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
745 if (rc)
746 return rc;
747
748 p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
749 memset(p_tx_cid, 0, sizeof(*p_tx_cid));
750 memset(&pq_params, 0, sizeof(pq_params));
751
752 /* Allocate a CID for the queue */
753 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
754 &p_tx_cid->cid);
755 if (rc) {
756 DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
757 return rc;
758 }
759 p_tx_cid->b_cid_allocated = true;
760
761 DP_VERBOSE(p_hwfn, QED_MSG_SP,
762 "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
763 opaque_fid, p_tx_cid->cid,
764 p_params->queue_id, p_params->vport_id, p_params->sb);
765
766 rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
767 opaque_fid,
768 p_tx_cid->cid,
769 p_params,
770 abs_stats_id,
771 pbl_addr,
772 pbl_size,
773 &pq_params);
774
775 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
776 qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY);
777
778 if (rc)
779 qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
780
781 return rc;
782}
783
784static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn,
785 u16 tx_queue_id)
786{
787 struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
788 struct qed_sp_init_request_params sp_params;
789 struct qed_spq_entry *p_ent = NULL;
790 int rc = -EINVAL;
791
792 memset(&sp_params, 0, sizeof(sp_params));
793 sp_params.ramrod_data_size = sizeof(struct tx_queue_stop_ramrod_data);
794 sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
795
796 rc = qed_sp_init_request(p_hwfn, &p_ent,
797 p_tx_cid->cid,
798 p_tx_cid->opaque_fid,
799 ETH_RAMROD_TX_QUEUE_STOP,
800 PROTOCOLID_ETH,
801 &sp_params);
802 if (rc)
803 return rc;
804
805 rc = qed_spq_post(p_hwfn, p_ent, NULL);
806 if (rc)
807 return rc;
808
809 return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
810}
811
812static enum eth_filter_action
813qed_filter_action(enum qed_filter_opcode opcode)
814{
815 enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
816
817 switch (opcode) {
818 case QED_FILTER_ADD:
819 action = ETH_FILTER_ACTION_ADD;
820 break;
821 case QED_FILTER_REMOVE:
822 action = ETH_FILTER_ACTION_REMOVE;
823 break;
824 case QED_FILTER_REPLACE:
825 case QED_FILTER_FLUSH:
826 action = ETH_FILTER_ACTION_REPLACE;
827 break;
828 default:
829 action = MAX_ETH_FILTER_ACTION;
830 }
831
832 return action;
833}
834
835static void qed_set_fw_mac_addr(__le16 *fw_msb,
836 __le16 *fw_mid,
837 __le16 *fw_lsb,
838 u8 *mac)
839{
840 ((u8 *)fw_msb)[0] = mac[1];
841 ((u8 *)fw_msb)[1] = mac[0];
842 ((u8 *)fw_mid)[0] = mac[3];
843 ((u8 *)fw_mid)[1] = mac[2];
844 ((u8 *)fw_lsb)[0] = mac[5];
845 ((u8 *)fw_lsb)[1] = mac[4];
846}
847
848static int
849qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
850 u16 opaque_fid,
851 struct qed_filter_ucast *p_filter_cmd,
852 struct vport_filter_update_ramrod_data **pp_ramrod,
853 struct qed_spq_entry **pp_ent,
854 enum spq_mode comp_mode,
855 struct qed_spq_comp_cb *p_comp_data)
856{
857 u8 vport_to_add_to = 0, vport_to_remove_from = 0;
858 struct vport_filter_update_ramrod_data *p_ramrod;
859 struct qed_sp_init_request_params sp_params;
860 struct eth_filter_cmd *p_first_filter;
861 struct eth_filter_cmd *p_second_filter;
862 enum eth_filter_action action;
863 int rc;
864
865 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
866 &vport_to_remove_from);
867 if (rc)
868 return rc;
869
870 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
871 &vport_to_add_to);
872 if (rc)
873 return rc;
874
875 memset(&sp_params, 0, sizeof(sp_params));
876 sp_params.ramrod_data_size = sizeof(**pp_ramrod);
877 sp_params.comp_mode = comp_mode;
878 sp_params.p_comp_data = p_comp_data;
879
880 rc = qed_sp_init_request(p_hwfn, pp_ent,
881 qed_spq_get_cid(p_hwfn),
882 opaque_fid,
883 ETH_RAMROD_FILTERS_UPDATE,
884 PROTOCOLID_ETH,
885 &sp_params);
886 if (rc)
887 return rc;
888
889 *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
890 p_ramrod = *pp_ramrod;
891 p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
892 p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
893
894 switch (p_filter_cmd->opcode) {
895 case QED_FILTER_FLUSH:
896 p_ramrod->filter_cmd_hdr.cmd_cnt = 0; break;
897 case QED_FILTER_MOVE:
898 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
899 default:
900 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
901 }
902
903 p_first_filter = &p_ramrod->filter_cmds[0];
904 p_second_filter = &p_ramrod->filter_cmds[1];
905
906 switch (p_filter_cmd->type) {
907 case QED_FILTER_MAC:
908 p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
909 case QED_FILTER_VLAN:
910 p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
911 case QED_FILTER_MAC_VLAN:
912 p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
913 case QED_FILTER_INNER_MAC:
914 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
915 case QED_FILTER_INNER_VLAN:
916 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
917 case QED_FILTER_INNER_PAIR:
918 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
919 case QED_FILTER_INNER_MAC_VNI_PAIR:
920 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
921 break;
922 case QED_FILTER_MAC_VNI_PAIR:
923 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
924 case QED_FILTER_VNI:
925 p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
926 }
927
928 if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
929 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
930 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
931 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
932 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
933 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
934 qed_set_fw_mac_addr(&p_first_filter->mac_msb,
935 &p_first_filter->mac_mid,
936 &p_first_filter->mac_lsb,
937 (u8 *)p_filter_cmd->mac);
938 }
939
940 if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
941 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
942 (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
943 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
944 p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
945
946 if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
947 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
948 (p_first_filter->type == ETH_FILTER_TYPE_VNI))
949 p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
950
951 if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
952 p_second_filter->type = p_first_filter->type;
953 p_second_filter->mac_msb = p_first_filter->mac_msb;
954 p_second_filter->mac_mid = p_first_filter->mac_mid;
955 p_second_filter->mac_lsb = p_first_filter->mac_lsb;
956 p_second_filter->vlan_id = p_first_filter->vlan_id;
957 p_second_filter->vni = p_first_filter->vni;
958
959 p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
960
961 p_first_filter->vport_id = vport_to_remove_from;
962
963 p_second_filter->action = ETH_FILTER_ACTION_ADD;
964 p_second_filter->vport_id = vport_to_add_to;
965 } else {
966 action = qed_filter_action(p_filter_cmd->opcode);
967
968 if (action == MAX_ETH_FILTER_ACTION) {
969 DP_NOTICE(p_hwfn,
970 "%d is not supported yet\n",
971 p_filter_cmd->opcode);
972 return -EINVAL;
973 }
974
975 p_first_filter->action = action;
976 p_first_filter->vport_id = (p_filter_cmd->opcode ==
977 QED_FILTER_REMOVE) ?
978 vport_to_remove_from :
979 vport_to_add_to;
980 }
981
982 return 0;
983}
984
985static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
986 u16 opaque_fid,
987 struct qed_filter_ucast *p_filter_cmd,
988 enum spq_mode comp_mode,
989 struct qed_spq_comp_cb *p_comp_data)
990{
991 struct vport_filter_update_ramrod_data *p_ramrod = NULL;
992 struct qed_spq_entry *p_ent = NULL;
993 struct eth_filter_cmd_header *p_header;
994 int rc;
995
996 rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
997 &p_ramrod, &p_ent,
998 comp_mode, p_comp_data);
999 if (rc != 0) {
1000 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1001 return rc;
1002 }
1003 p_header = &p_ramrod->filter_cmd_hdr;
1004 p_header->assert_on_error = p_filter_cmd->assert_on_error;
1005
1006 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1007 if (rc != 0) {
1008 DP_ERR(p_hwfn,
1009 "Unicast filter ADD command failed %d\n",
1010 rc);
1011 return rc;
1012 }
1013
1014 DP_VERBOSE(p_hwfn, QED_MSG_SP,
1015 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1016 (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
1017 ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
1018 "REMOVE" :
1019 ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
1020 "MOVE" : "REPLACE")),
1021 (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
1022 ((p_filter_cmd->type == QED_FILTER_VLAN) ?
1023 "VLAN" : "MAC & VLAN"),
1024 p_ramrod->filter_cmd_hdr.cmd_cnt,
1025 p_filter_cmd->is_rx_filter,
1026 p_filter_cmd->is_tx_filter);
1027 DP_VERBOSE(p_hwfn, QED_MSG_SP,
1028 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1029 p_filter_cmd->vport_to_add_to,
1030 p_filter_cmd->vport_to_remove_from,
1031 p_filter_cmd->mac[0],
1032 p_filter_cmd->mac[1],
1033 p_filter_cmd->mac[2],
1034 p_filter_cmd->mac[3],
1035 p_filter_cmd->mac[4],
1036 p_filter_cmd->mac[5],
1037 p_filter_cmd->vlan);
1038
1039 return 0;
1040}
1041
1042/*******************************************************************************
1043 * Description:
1044 * Calculates crc 32 on a buffer
1045 * Note: crc32_length MUST be aligned to 8
1046 * Return:
1047 ******************************************************************************/
1048static u32 qed_calc_crc32c(u8 *crc32_packet,
1049 u32 crc32_length,
1050 u32 crc32_seed,
1051 u8 complement)
1052{
1053 u32 byte = 0;
1054 u32 bit = 0;
1055 u8 msb = 0;
1056 u8 current_byte = 0;
1057 u32 crc32_result = crc32_seed;
1058
1059 if ((!crc32_packet) ||
1060 (crc32_length == 0) ||
1061 ((crc32_length % 8) != 0))
1062 return crc32_result;
1063 for (byte = 0; byte < crc32_length; byte++) {
1064 current_byte = crc32_packet[byte];
1065 for (bit = 0; bit < 8; bit++) {
1066 msb = (u8)(crc32_result >> 31);
1067 crc32_result = crc32_result << 1;
1068 if (msb != (0x1 & (current_byte >> bit))) {
1069 crc32_result = crc32_result ^ CRC32_POLY;
1070 crc32_result |= 1; /*crc32_result[0] = 1;*/
1071 }
1072 }
1073 }
1074 return crc32_result;
1075}
1076
1077static inline u32 qed_crc32c_le(u32 seed,
1078 u8 *mac,
1079 u32 len)
1080{
1081 u32 packet_buf[2] = { 0 };
1082
1083 memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
1084 return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1085}
1086
1087static u8 qed_mcast_bin_from_mac(u8 *mac)
1088{
1089 u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1090 mac, ETH_ALEN);
1091
1092 return crc & 0xff;
1093}
1094
1095static int
1096qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1097 u16 opaque_fid,
1098 struct qed_filter_mcast *p_filter_cmd,
1099 enum spq_mode comp_mode,
1100 struct qed_spq_comp_cb *p_comp_data)
1101{
1102 unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1103 struct vport_update_ramrod_data *p_ramrod = NULL;
1104 struct qed_sp_init_request_params sp_params;
1105 struct qed_spq_entry *p_ent = NULL;
1106 u8 abs_vport_id = 0;
1107 int rc, i;
1108
1109 if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1110 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1111 &abs_vport_id);
1112 if (rc)
1113 return rc;
1114 } else {
1115 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1116 &abs_vport_id);
1117 if (rc)
1118 return rc;
1119 }
1120
1121 memset(&sp_params, 0, sizeof(sp_params));
1122 sp_params.ramrod_data_size = sizeof(*p_ramrod);
1123 sp_params.comp_mode = comp_mode;
1124 sp_params.p_comp_data = p_comp_data;
1125
1126 rc = qed_sp_init_request(p_hwfn, &p_ent,
1127 qed_spq_get_cid(p_hwfn),
1128 p_hwfn->hw_info.opaque_fid,
1129 ETH_RAMROD_VPORT_UPDATE,
1130 PROTOCOLID_ETH,
1131 &sp_params);
1132
1133 if (rc) {
1134 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1135 return rc;
1136 }
1137
1138 p_ramrod = &p_ent->ramrod.vport_update;
1139 p_ramrod->common.update_approx_mcast_flg = 1;
1140
1141 /* explicitly clear out the entire vector */
1142 memset(&p_ramrod->approx_mcast.bins, 0,
1143 sizeof(p_ramrod->approx_mcast.bins));
1144 memset(bins, 0, sizeof(unsigned long) *
1145 ETH_MULTICAST_MAC_BINS_IN_REGS);
1146 /* filter ADD op is explicit set op and it removes
1147 * any existing filters for the vport
1148 */
1149 if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1150 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1151 u32 bit;
1152
1153 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1154 __set_bit(bit, bins);
1155 }
1156
1157 /* Convert to correct endianity */
1158 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1159 u32 *p_bins = (u32 *)bins;
1160 struct vport_update_ramrod_mcast *approx_mcast;
1161
1162 approx_mcast = &p_ramrod->approx_mcast;
1163 approx_mcast->bins[i] = cpu_to_le32(p_bins[i]);
1164 }
1165 }
1166
1167 p_ramrod->common.vport_id = abs_vport_id;
1168
1169 return qed_spq_post(p_hwfn, p_ent, NULL);
1170}
1171
1172static int
1173qed_filter_mcast_cmd(struct qed_dev *cdev,
1174 struct qed_filter_mcast *p_filter_cmd,
1175 enum spq_mode comp_mode,
1176 struct qed_spq_comp_cb *p_comp_data)
1177{
1178 int rc = 0;
1179 int i;
1180
1181 /* only ADD and REMOVE operations are supported for multi-cast */
1182 if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
1183 (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
1184 (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
1185 return -EINVAL;
1186
1187 for_each_hwfn(cdev, i) {
1188 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1189
1190 u16 opaque_fid;
1191
1192 if (rc != 0)
1193 break;
1194
1195 opaque_fid = p_hwfn->hw_info.opaque_fid;
1196
1197 rc = qed_sp_eth_filter_mcast(p_hwfn,
1198 opaque_fid,
1199 p_filter_cmd,
1200 comp_mode,
1201 p_comp_data);
1202 }
1203 return rc;
1204}
1205
1206static int qed_filter_ucast_cmd(struct qed_dev *cdev,
1207 struct qed_filter_ucast *p_filter_cmd,
1208 enum spq_mode comp_mode,
1209 struct qed_spq_comp_cb *p_comp_data)
1210{
1211 int rc = 0;
1212 int i;
1213
1214 for_each_hwfn(cdev, i) {
1215 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1216 u16 opaque_fid;
1217
1218 if (rc != 0)
1219 break;
1220
1221 opaque_fid = p_hwfn->hw_info.opaque_fid;
1222
1223 rc = qed_sp_eth_filter_ucast(p_hwfn,
1224 opaque_fid,
1225 p_filter_cmd,
1226 comp_mode,
1227 p_comp_data);
1228 }
1229
1230 return rc;
1231}
1232
25c089d7
YM
1233static int qed_fill_eth_dev_info(struct qed_dev *cdev,
1234 struct qed_dev_eth_info *info)
1235{
1236 int i;
1237
1238 memset(info, 0, sizeof(*info));
1239
1240 info->num_tc = 1;
1241
1242 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
1243 for_each_hwfn(cdev, i)
1244 info->num_queues += FEAT_NUM(&cdev->hwfns[i],
1245 QED_PF_L2_QUE);
1246 if (cdev->int_params.fp_msix_cnt)
1247 info->num_queues = min_t(u8, info->num_queues,
1248 cdev->int_params.fp_msix_cnt);
1249 } else {
1250 info->num_queues = cdev->num_hwfns;
1251 }
1252
1253 info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
1254 ether_addr_copy(info->port_mac,
1255 cdev->hwfns[0].hw_info.hw_mac_addr);
1256
1257 qed_fill_dev_info(cdev, &info->common);
1258
1259 return 0;
1260}
1261
cee4d264
MC
1262static int qed_start_vport(struct qed_dev *cdev,
1263 u8 vport_id,
1264 u16 mtu,
1265 u8 drop_ttl0_flg,
1266 u8 inner_vlan_removal_en_flg)
1267{
1268 int rc, i;
1269
1270 for_each_hwfn(cdev, i) {
1271 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1272
1273 rc = qed_sp_vport_start(p_hwfn,
1274 p_hwfn->hw_info.concrete_fid,
1275 p_hwfn->hw_info.opaque_fid,
1276 vport_id,
1277 mtu,
1278 drop_ttl0_flg,
1279 inner_vlan_removal_en_flg);
1280
1281 if (rc) {
1282 DP_ERR(cdev, "Failed to start VPORT\n");
1283 return rc;
1284 }
1285
1286 qed_hw_start_fastpath(p_hwfn);
1287
1288 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1289 "Started V-PORT %d with MTU %d\n",
1290 vport_id, mtu);
1291 }
1292
1293 return 0;
1294}
1295
1296static int qed_stop_vport(struct qed_dev *cdev,
1297 u8 vport_id)
1298{
1299 int rc, i;
1300
1301 for_each_hwfn(cdev, i) {
1302 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1303
1304 rc = qed_sp_vport_stop(p_hwfn,
1305 p_hwfn->hw_info.opaque_fid,
1306 vport_id);
1307
1308 if (rc) {
1309 DP_ERR(cdev, "Failed to stop VPORT\n");
1310 return rc;
1311 }
1312 }
1313 return 0;
1314}
1315
1316static int qed_update_vport(struct qed_dev *cdev,
1317 struct qed_update_vport_params *params)
1318{
1319 struct qed_sp_vport_update_params sp_params;
1320 struct qed_rss_params sp_rss_params;
1321 int rc, i;
1322
1323 if (!cdev)
1324 return -ENODEV;
1325
1326 memset(&sp_params, 0, sizeof(sp_params));
1327 memset(&sp_rss_params, 0, sizeof(sp_rss_params));
1328
1329 /* Translate protocol params into sp params */
1330 sp_params.vport_id = params->vport_id;
1331 sp_params.update_vport_active_rx_flg =
1332 params->update_vport_active_flg;
1333 sp_params.update_vport_active_tx_flg =
1334 params->update_vport_active_flg;
1335 sp_params.vport_active_rx_flg = params->vport_active_flg;
1336 sp_params.vport_active_tx_flg = params->vport_active_flg;
1337
1338 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
1339 * We need to re-fix the rss values per engine for CMT.
1340 */
1341 if (cdev->num_hwfns > 1 && params->update_rss_flg) {
1342 struct qed_update_vport_rss_params *rss =
1343 &params->rss_params;
1344 int k, max = 0;
1345
1346 /* Find largest entry, since it's possible RSS needs to
1347 * be disabled [in case only 1 queue per-hwfn]
1348 */
1349 for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
1350 max = (max > rss->rss_ind_table[k]) ?
1351 max : rss->rss_ind_table[k];
1352
1353 /* Either fix RSS values or disable RSS */
1354 if (cdev->num_hwfns < max + 1) {
1355 int divisor = (max + cdev->num_hwfns - 1) /
1356 cdev->num_hwfns;
1357
1358 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1359 "CMT - fixing RSS values (modulo %02x)\n",
1360 divisor);
1361
1362 for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
1363 rss->rss_ind_table[k] =
1364 rss->rss_ind_table[k] % divisor;
1365 } else {
1366 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1367 "CMT - 1 queue per-hwfn; Disabling RSS\n");
1368 params->update_rss_flg = 0;
1369 }
1370 }
1371
1372 /* Now, update the RSS configuration for actual configuration */
1373 if (params->update_rss_flg) {
1374 sp_rss_params.update_rss_config = 1;
1375 sp_rss_params.rss_enable = 1;
1376 sp_rss_params.update_rss_capabilities = 1;
1377 sp_rss_params.update_rss_ind_table = 1;
1378 sp_rss_params.update_rss_key = 1;
1379 sp_rss_params.rss_caps = QED_RSS_IPV4 |
1380 QED_RSS_IPV6 |
1381 QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
1382 sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
1383 memcpy(sp_rss_params.rss_ind_table,
1384 params->rss_params.rss_ind_table,
1385 QED_RSS_IND_TABLE_SIZE * sizeof(u16));
1386 memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
1387 QED_RSS_KEY_SIZE * sizeof(u32));
1388 }
1389 sp_params.rss_params = &sp_rss_params;
1390
1391 for_each_hwfn(cdev, i) {
1392 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1393
1394 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1395 rc = qed_sp_vport_update(p_hwfn, &sp_params,
1396 QED_SPQ_MODE_EBLOCK,
1397 NULL);
1398 if (rc) {
1399 DP_ERR(cdev, "Failed to update VPORT\n");
1400 return rc;
1401 }
1402
1403 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1404 "Updated V-PORT %d: active_flag %d [update %d]\n",
1405 params->vport_id, params->vport_active_flg,
1406 params->update_vport_active_flg);
1407 }
1408
1409 return 0;
1410}
1411
1412static int qed_start_rxq(struct qed_dev *cdev,
1413 struct qed_queue_start_common_params *params,
1414 u16 bd_max_bytes,
1415 dma_addr_t bd_chain_phys_addr,
1416 dma_addr_t cqe_pbl_addr,
1417 u16 cqe_pbl_size,
1418 void __iomem **pp_prod)
1419{
1420 int rc, hwfn_index;
1421 struct qed_hwfn *p_hwfn;
1422
1423 hwfn_index = params->rss_id % cdev->num_hwfns;
1424 p_hwfn = &cdev->hwfns[hwfn_index];
1425
1426 /* Fix queue ID in 100g mode */
1427 params->queue_id /= cdev->num_hwfns;
1428
1429 rc = qed_sp_eth_rx_queue_start(p_hwfn,
1430 p_hwfn->hw_info.opaque_fid,
1431 params,
1432 bd_max_bytes,
1433 bd_chain_phys_addr,
1434 cqe_pbl_addr,
1435 cqe_pbl_size,
1436 pp_prod);
1437
1438 if (rc) {
1439 DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id);
1440 return rc;
1441 }
1442
1443 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1444 "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
1445 params->queue_id, params->rss_id, params->vport_id,
1446 params->sb);
1447
1448 return 0;
1449}
1450
1451static int qed_stop_rxq(struct qed_dev *cdev,
1452 struct qed_stop_rxq_params *params)
1453{
1454 int rc, hwfn_index;
1455 struct qed_hwfn *p_hwfn;
1456
1457 hwfn_index = params->rss_id % cdev->num_hwfns;
1458 p_hwfn = &cdev->hwfns[hwfn_index];
1459
1460 rc = qed_sp_eth_rx_queue_stop(p_hwfn,
1461 params->rx_queue_id / cdev->num_hwfns,
1462 params->eq_completion_only,
1463 false);
1464 if (rc) {
1465 DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
1466 return rc;
1467 }
1468
1469 return 0;
1470}
1471
1472static int qed_start_txq(struct qed_dev *cdev,
1473 struct qed_queue_start_common_params *p_params,
1474 dma_addr_t pbl_addr,
1475 u16 pbl_size,
1476 void __iomem **pp_doorbell)
1477{
1478 struct qed_hwfn *p_hwfn;
1479 int rc, hwfn_index;
1480
1481 hwfn_index = p_params->rss_id % cdev->num_hwfns;
1482 p_hwfn = &cdev->hwfns[hwfn_index];
1483
1484 /* Fix queue ID in 100g mode */
1485 p_params->queue_id /= cdev->num_hwfns;
1486
1487 rc = qed_sp_eth_tx_queue_start(p_hwfn,
1488 p_hwfn->hw_info.opaque_fid,
1489 p_params,
1490 pbl_addr,
1491 pbl_size,
1492 pp_doorbell);
1493
1494 if (rc) {
1495 DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
1496 return rc;
1497 }
1498
1499 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1500 "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
1501 p_params->queue_id, p_params->rss_id, p_params->vport_id,
1502 p_params->sb);
1503
1504 return 0;
1505}
1506
1507#define QED_HW_STOP_RETRY_LIMIT (10)
1508static int qed_fastpath_stop(struct qed_dev *cdev)
1509{
1510 qed_hw_stop_fastpath(cdev);
1511
1512 return 0;
1513}
1514
1515static int qed_stop_txq(struct qed_dev *cdev,
1516 struct qed_stop_txq_params *params)
1517{
1518 struct qed_hwfn *p_hwfn;
1519 int rc, hwfn_index;
1520
1521 hwfn_index = params->rss_id % cdev->num_hwfns;
1522 p_hwfn = &cdev->hwfns[hwfn_index];
1523
1524 rc = qed_sp_eth_tx_queue_stop(p_hwfn,
1525 params->tx_queue_id / cdev->num_hwfns);
1526 if (rc) {
1527 DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
1528 return rc;
1529 }
1530
1531 return 0;
1532}
1533
1534static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
1535 enum qed_filter_rx_mode_type type)
1536{
1537 struct qed_filter_accept_flags accept_flags;
1538
1539 memset(&accept_flags, 0, sizeof(accept_flags));
1540
1541 accept_flags.update_rx_mode_config = 1;
1542 accept_flags.update_tx_mode_config = 1;
1543 accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
1544 QED_ACCEPT_MCAST_MATCHED |
1545 QED_ACCEPT_BCAST;
1546 accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
1547 QED_ACCEPT_MCAST_MATCHED |
1548 QED_ACCEPT_BCAST;
1549
1550 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC)
1551 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
1552 QED_ACCEPT_MCAST_UNMATCHED;
1553 else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC)
1554 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
1555
1556 return qed_filter_accept_cmd(cdev, 0, accept_flags,
1557 QED_SPQ_MODE_CB, NULL);
1558}
1559
1560static int qed_configure_filter_ucast(struct qed_dev *cdev,
1561 struct qed_filter_ucast_params *params)
1562{
1563 struct qed_filter_ucast ucast;
1564
1565 if (!params->vlan_valid && !params->mac_valid) {
1566 DP_NOTICE(
1567 cdev,
1568 "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
1569 return -EINVAL;
1570 }
1571
1572 memset(&ucast, 0, sizeof(ucast));
1573 switch (params->type) {
1574 case QED_FILTER_XCAST_TYPE_ADD:
1575 ucast.opcode = QED_FILTER_ADD;
1576 break;
1577 case QED_FILTER_XCAST_TYPE_DEL:
1578 ucast.opcode = QED_FILTER_REMOVE;
1579 break;
1580 case QED_FILTER_XCAST_TYPE_REPLACE:
1581 ucast.opcode = QED_FILTER_REPLACE;
1582 break;
1583 default:
1584 DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
1585 params->type);
1586 }
1587
1588 if (params->vlan_valid && params->mac_valid) {
1589 ucast.type = QED_FILTER_MAC_VLAN;
1590 ether_addr_copy(ucast.mac, params->mac);
1591 ucast.vlan = params->vlan;
1592 } else if (params->mac_valid) {
1593 ucast.type = QED_FILTER_MAC;
1594 ether_addr_copy(ucast.mac, params->mac);
1595 } else {
1596 ucast.type = QED_FILTER_VLAN;
1597 ucast.vlan = params->vlan;
1598 }
1599
1600 ucast.is_rx_filter = true;
1601 ucast.is_tx_filter = true;
1602
1603 return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
1604}
1605
1606static int qed_configure_filter_mcast(struct qed_dev *cdev,
1607 struct qed_filter_mcast_params *params)
1608{
1609 struct qed_filter_mcast mcast;
1610 int i;
1611
1612 memset(&mcast, 0, sizeof(mcast));
1613 switch (params->type) {
1614 case QED_FILTER_XCAST_TYPE_ADD:
1615 mcast.opcode = QED_FILTER_ADD;
1616 break;
1617 case QED_FILTER_XCAST_TYPE_DEL:
1618 mcast.opcode = QED_FILTER_REMOVE;
1619 break;
1620 default:
1621 DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
1622 params->type);
1623 }
1624
1625 mcast.num_mc_addrs = params->num;
1626 for (i = 0; i < mcast.num_mc_addrs; i++)
1627 ether_addr_copy(mcast.mac[i], params->mac[i]);
1628
1629 return qed_filter_mcast_cmd(cdev, &mcast,
1630 QED_SPQ_MODE_CB, NULL);
1631}
1632
1633static int qed_configure_filter(struct qed_dev *cdev,
1634 struct qed_filter_params *params)
1635{
1636 enum qed_filter_rx_mode_type accept_flags;
1637
1638 switch (params->type) {
1639 case QED_FILTER_TYPE_UCAST:
1640 return qed_configure_filter_ucast(cdev, &params->filter.ucast);
1641 case QED_FILTER_TYPE_MCAST:
1642 return qed_configure_filter_mcast(cdev, &params->filter.mcast);
1643 case QED_FILTER_TYPE_RX_MODE:
1644 accept_flags = params->filter.accept_flags;
1645 return qed_configure_filter_rx_mode(cdev, accept_flags);
1646 default:
1647 DP_NOTICE(cdev, "Unknown filter type %d\n",
1648 (int)params->type);
1649 return -EINVAL;
1650 }
1651}
1652
1653static int qed_fp_cqe_completion(struct qed_dev *dev,
1654 u8 rss_id,
1655 struct eth_slow_path_rx_cqe *cqe)
1656{
1657 return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
1658 cqe);
1659}
1660
25c089d7
YM
1661static const struct qed_eth_ops qed_eth_ops_pass = {
1662 .common = &qed_common_ops_pass,
1663 .fill_dev_info = &qed_fill_eth_dev_info,
cee4d264
MC
1664 .vport_start = &qed_start_vport,
1665 .vport_stop = &qed_stop_vport,
1666 .vport_update = &qed_update_vport,
1667 .q_rx_start = &qed_start_rxq,
1668 .q_rx_stop = &qed_stop_rxq,
1669 .q_tx_start = &qed_start_txq,
1670 .q_tx_stop = &qed_stop_txq,
1671 .filter_config = &qed_configure_filter,
1672 .fastpath_stop = &qed_fastpath_stop,
1673 .eth_cqe_completion = &qed_fp_cqe_completion,
25c089d7
YM
1674};
1675
1676const struct qed_eth_ops *qed_get_eth_ops(u32 version)
1677{
1678 if (version != QED_ETH_INTERFACE_VERSION) {
1679 pr_notice("Cannot supply ethtool operations [%08x != %08x]\n",
1680 version, QED_ETH_INTERFACE_VERSION);
1681 return NULL;
1682 }
1683
1684 return &qed_eth_ops_pass;
1685}
1686EXPORT_SYMBOL(qed_get_eth_ops);
1687
1688void qed_put_eth_ops(void)
1689{
1690 /* TODO - reference count for module? */
1691}
1692EXPORT_SYMBOL(qed_put_eth_ops);