1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <asm/param.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/etherdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <linux/slab.h>
20 #include <linux/stddef.h>
21 #include <linux/string.h>
22 #include <linux/version.h>
23 #include <linux/workqueue.h>
24 #include <linux/bitops.h>
25 #include <linux/bug.h>
27 #include <linux/qed/qed_chain.h>
29 #include "qed_dev_api.h"
30 #include <linux/qed/qed_eth_if.h>
36 #include "qed_reg_addr.h"
38 #include "qed_sriov.h"
41 #define QED_MAX_SGES_NUM 16
42 #define CRC32_POLY 0x1edc6f41
44 int qed_sp_eth_vport_start(struct qed_hwfn
*p_hwfn
,
45 struct qed_sp_vport_start_params
*p_params
)
47 struct vport_start_ramrod_data
*p_ramrod
= NULL
;
48 struct qed_spq_entry
*p_ent
= NULL
;
49 struct qed_sp_init_data init_data
;
54 rc
= qed_fw_vport(p_hwfn
, p_params
->vport_id
, &abs_vport_id
);
58 memset(&init_data
, 0, sizeof(init_data
));
59 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
60 init_data
.opaque_fid
= p_params
->opaque_fid
;
61 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
63 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
64 ETH_RAMROD_VPORT_START
,
65 PROTOCOLID_ETH
, &init_data
);
69 p_ramrod
= &p_ent
->ramrod
.vport_start
;
70 p_ramrod
->vport_id
= abs_vport_id
;
72 p_ramrod
->mtu
= cpu_to_le16(p_params
->mtu
);
73 p_ramrod
->inner_vlan_removal_en
= p_params
->remove_inner_vlan
;
74 p_ramrod
->drop_ttl0_en
= p_params
->drop_ttl0
;
75 p_ramrod
->untagged
= p_params
->only_untagged
;
77 SET_FIELD(rx_mode
, ETH_VPORT_RX_MODE_UCAST_DROP_ALL
, 1);
78 SET_FIELD(rx_mode
, ETH_VPORT_RX_MODE_MCAST_DROP_ALL
, 1);
80 p_ramrod
->rx_mode
.state
= cpu_to_le16(rx_mode
);
82 /* TPA related fields */
83 memset(&p_ramrod
->tpa_param
, 0, sizeof(struct eth_vport_tpa_param
));
85 p_ramrod
->tpa_param
.max_buff_num
= p_params
->max_buffers_per_cqe
;
87 switch (p_params
->tpa_mode
) {
88 case QED_TPA_MODE_GRO
:
89 p_ramrod
->tpa_param
.tpa_max_aggs_num
= ETH_TPA_MAX_AGGS_NUM
;
90 p_ramrod
->tpa_param
.tpa_max_size
= (u16
)-1;
91 p_ramrod
->tpa_param
.tpa_min_size_to_cont
= p_params
->mtu
/ 2;
92 p_ramrod
->tpa_param
.tpa_min_size_to_start
= p_params
->mtu
/ 2;
93 p_ramrod
->tpa_param
.tpa_ipv4_en_flg
= 1;
94 p_ramrod
->tpa_param
.tpa_ipv6_en_flg
= 1;
95 p_ramrod
->tpa_param
.tpa_pkt_split_flg
= 1;
96 p_ramrod
->tpa_param
.tpa_gro_consistent_flg
= 1;
102 p_ramrod
->tx_switching_en
= p_params
->tx_switching
;
104 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
105 p_ramrod
->sw_fid
= qed_concrete_to_sw_fid(p_hwfn
->cdev
,
106 p_params
->concrete_fid
);
108 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
111 int qed_sp_vport_start(struct qed_hwfn
*p_hwfn
,
112 struct qed_sp_vport_start_params
*p_params
)
114 if (IS_VF(p_hwfn
->cdev
)) {
115 return qed_vf_pf_vport_start(p_hwfn
, p_params
->vport_id
,
117 p_params
->remove_inner_vlan
,
119 p_params
->max_buffers_per_cqe
,
120 p_params
->only_untagged
);
123 return qed_sp_eth_vport_start(p_hwfn
, p_params
);
127 qed_sp_vport_update_rss(struct qed_hwfn
*p_hwfn
,
128 struct vport_update_ramrod_data
*p_ramrod
,
129 struct qed_rss_params
*p_params
)
131 struct eth_vport_rss_config
*rss
= &p_ramrod
->rss_config
;
132 u16 abs_l2_queue
= 0, capabilities
= 0;
136 p_ramrod
->common
.update_rss_flg
= 0;
140 BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE
!=
141 ETH_RSS_IND_TABLE_ENTRIES_NUM
);
143 rc
= qed_fw_rss_eng(p_hwfn
, p_params
->rss_eng_id
, &rss
->rss_id
);
147 p_ramrod
->common
.update_rss_flg
= p_params
->update_rss_config
;
148 rss
->update_rss_capabilities
= p_params
->update_rss_capabilities
;
149 rss
->update_rss_ind_table
= p_params
->update_rss_ind_table
;
150 rss
->update_rss_key
= p_params
->update_rss_key
;
152 rss
->rss_mode
= p_params
->rss_enable
?
153 ETH_VPORT_RSS_MODE_REGULAR
:
154 ETH_VPORT_RSS_MODE_DISABLED
;
156 SET_FIELD(capabilities
,
157 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY
,
158 !!(p_params
->rss_caps
& QED_RSS_IPV4
));
159 SET_FIELD(capabilities
,
160 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY
,
161 !!(p_params
->rss_caps
& QED_RSS_IPV6
));
162 SET_FIELD(capabilities
,
163 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY
,
164 !!(p_params
->rss_caps
& QED_RSS_IPV4_TCP
));
165 SET_FIELD(capabilities
,
166 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY
,
167 !!(p_params
->rss_caps
& QED_RSS_IPV6_TCP
));
168 SET_FIELD(capabilities
,
169 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY
,
170 !!(p_params
->rss_caps
& QED_RSS_IPV4_UDP
));
171 SET_FIELD(capabilities
,
172 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY
,
173 !!(p_params
->rss_caps
& QED_RSS_IPV6_UDP
));
174 rss
->tbl_size
= p_params
->rss_table_size_log
;
176 rss
->capabilities
= cpu_to_le16(capabilities
);
178 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFUP
,
179 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
180 p_ramrod
->common
.update_rss_flg
,
181 rss
->rss_mode
, rss
->update_rss_capabilities
,
182 capabilities
, rss
->update_rss_ind_table
,
183 rss
->update_rss_key
);
185 for (i
= 0; i
< QED_RSS_IND_TABLE_SIZE
; i
++) {
186 rc
= qed_fw_l2_queue(p_hwfn
,
187 (u8
)p_params
->rss_ind_table
[i
],
192 rss
->indirection_table
[i
] = cpu_to_le16(abs_l2_queue
);
193 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFUP
, "i= %d, queue = %d\n",
194 i
, rss
->indirection_table
[i
]);
197 for (i
= 0; i
< 10; i
++)
198 rss
->rss_key
[i
] = cpu_to_le32(p_params
->rss_key
[i
]);
204 qed_sp_update_accept_mode(struct qed_hwfn
*p_hwfn
,
205 struct vport_update_ramrod_data
*p_ramrod
,
206 struct qed_filter_accept_flags accept_flags
)
208 p_ramrod
->common
.update_rx_mode_flg
=
209 accept_flags
.update_rx_mode_config
;
211 p_ramrod
->common
.update_tx_mode_flg
=
212 accept_flags
.update_tx_mode_config
;
214 /* Set Rx mode accept flags */
215 if (p_ramrod
->common
.update_rx_mode_flg
) {
216 u8 accept_filter
= accept_flags
.rx_accept_filter
;
219 SET_FIELD(state
, ETH_VPORT_RX_MODE_UCAST_DROP_ALL
,
220 !(!!(accept_filter
& QED_ACCEPT_UCAST_MATCHED
) ||
221 !!(accept_filter
& QED_ACCEPT_UCAST_UNMATCHED
)));
223 SET_FIELD(state
, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED
,
224 !!(accept_filter
& QED_ACCEPT_UCAST_UNMATCHED
));
226 SET_FIELD(state
, ETH_VPORT_RX_MODE_MCAST_DROP_ALL
,
227 !(!!(accept_filter
& QED_ACCEPT_MCAST_MATCHED
) ||
228 !!(accept_filter
& QED_ACCEPT_MCAST_UNMATCHED
)));
230 SET_FIELD(state
, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL
,
231 (!!(accept_filter
& QED_ACCEPT_MCAST_MATCHED
) &&
232 !!(accept_filter
& QED_ACCEPT_MCAST_UNMATCHED
)));
234 SET_FIELD(state
, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL
,
235 !!(accept_filter
& QED_ACCEPT_BCAST
));
237 p_ramrod
->rx_mode
.state
= cpu_to_le16(state
);
238 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
239 "p_ramrod->rx_mode.state = 0x%x\n", state
);
242 /* Set Tx mode accept flags */
243 if (p_ramrod
->common
.update_tx_mode_flg
) {
244 u8 accept_filter
= accept_flags
.tx_accept_filter
;
247 SET_FIELD(state
, ETH_VPORT_TX_MODE_UCAST_DROP_ALL
,
248 !!(accept_filter
& QED_ACCEPT_NONE
));
250 SET_FIELD(state
, ETH_VPORT_TX_MODE_MCAST_DROP_ALL
,
251 !!(accept_filter
& QED_ACCEPT_NONE
));
253 SET_FIELD(state
, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL
,
254 (!!(accept_filter
& QED_ACCEPT_MCAST_MATCHED
) &&
255 !!(accept_filter
& QED_ACCEPT_MCAST_UNMATCHED
)));
257 SET_FIELD(state
, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL
,
258 !!(accept_filter
& QED_ACCEPT_BCAST
));
260 p_ramrod
->tx_mode
.state
= cpu_to_le16(state
);
261 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
262 "p_ramrod->tx_mode.state = 0x%x\n", state
);
267 qed_sp_vport_update_sge_tpa(struct qed_hwfn
*p_hwfn
,
268 struct vport_update_ramrod_data
*p_ramrod
,
269 struct qed_sge_tpa_params
*p_params
)
271 struct eth_vport_tpa_param
*p_tpa
;
274 p_ramrod
->common
.update_tpa_param_flg
= 0;
275 p_ramrod
->common
.update_tpa_en_flg
= 0;
276 p_ramrod
->common
.update_tpa_param_flg
= 0;
280 p_ramrod
->common
.update_tpa_en_flg
= p_params
->update_tpa_en_flg
;
281 p_tpa
= &p_ramrod
->tpa_param
;
282 p_tpa
->tpa_ipv4_en_flg
= p_params
->tpa_ipv4_en_flg
;
283 p_tpa
->tpa_ipv6_en_flg
= p_params
->tpa_ipv6_en_flg
;
284 p_tpa
->tpa_ipv4_tunn_en_flg
= p_params
->tpa_ipv4_tunn_en_flg
;
285 p_tpa
->tpa_ipv6_tunn_en_flg
= p_params
->tpa_ipv6_tunn_en_flg
;
287 p_ramrod
->common
.update_tpa_param_flg
= p_params
->update_tpa_param_flg
;
288 p_tpa
->max_buff_num
= p_params
->max_buffers_per_cqe
;
289 p_tpa
->tpa_pkt_split_flg
= p_params
->tpa_pkt_split_flg
;
290 p_tpa
->tpa_hdr_data_split_flg
= p_params
->tpa_hdr_data_split_flg
;
291 p_tpa
->tpa_gro_consistent_flg
= p_params
->tpa_gro_consistent_flg
;
292 p_tpa
->tpa_max_aggs_num
= p_params
->tpa_max_aggs_num
;
293 p_tpa
->tpa_max_size
= p_params
->tpa_max_size
;
294 p_tpa
->tpa_min_size_to_start
= p_params
->tpa_min_size_to_start
;
295 p_tpa
->tpa_min_size_to_cont
= p_params
->tpa_min_size_to_cont
;
299 qed_sp_update_mcast_bin(struct qed_hwfn
*p_hwfn
,
300 struct vport_update_ramrod_data
*p_ramrod
,
301 struct qed_sp_vport_update_params
*p_params
)
305 memset(&p_ramrod
->approx_mcast
.bins
, 0,
306 sizeof(p_ramrod
->approx_mcast
.bins
));
308 if (p_params
->update_approx_mcast_flg
) {
309 p_ramrod
->common
.update_approx_mcast_flg
= 1;
310 for (i
= 0; i
< ETH_MULTICAST_MAC_BINS_IN_REGS
; i
++) {
311 u32
*p_bins
= (u32
*)p_params
->bins
;
312 __le32 val
= cpu_to_le32(p_bins
[i
]);
314 p_ramrod
->approx_mcast
.bins
[i
] = val
;
319 int qed_sp_vport_update(struct qed_hwfn
*p_hwfn
,
320 struct qed_sp_vport_update_params
*p_params
,
321 enum spq_mode comp_mode
,
322 struct qed_spq_comp_cb
*p_comp_data
)
324 struct qed_rss_params
*p_rss_params
= p_params
->rss_params
;
325 struct vport_update_ramrod_data_cmn
*p_cmn
;
326 struct qed_sp_init_data init_data
;
327 struct vport_update_ramrod_data
*p_ramrod
= NULL
;
328 struct qed_spq_entry
*p_ent
= NULL
;
329 u8 abs_vport_id
= 0, val
;
332 if (IS_VF(p_hwfn
->cdev
)) {
333 rc
= qed_vf_pf_vport_update(p_hwfn
, p_params
);
337 rc
= qed_fw_vport(p_hwfn
, p_params
->vport_id
, &abs_vport_id
);
341 memset(&init_data
, 0, sizeof(init_data
));
342 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
343 init_data
.opaque_fid
= p_params
->opaque_fid
;
344 init_data
.comp_mode
= comp_mode
;
345 init_data
.p_comp_data
= p_comp_data
;
347 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
348 ETH_RAMROD_VPORT_UPDATE
,
349 PROTOCOLID_ETH
, &init_data
);
353 /* Copy input params to ramrod according to FW struct */
354 p_ramrod
= &p_ent
->ramrod
.vport_update
;
355 p_cmn
= &p_ramrod
->common
;
357 p_cmn
->vport_id
= abs_vport_id
;
358 p_cmn
->rx_active_flg
= p_params
->vport_active_rx_flg
;
359 p_cmn
->update_rx_active_flg
= p_params
->update_vport_active_rx_flg
;
360 p_cmn
->tx_active_flg
= p_params
->vport_active_tx_flg
;
361 p_cmn
->update_tx_active_flg
= p_params
->update_vport_active_tx_flg
;
362 p_cmn
->accept_any_vlan
= p_params
->accept_any_vlan
;
363 p_cmn
->update_accept_any_vlan_flg
=
364 p_params
->update_accept_any_vlan_flg
;
366 p_cmn
->inner_vlan_removal_en
= p_params
->inner_vlan_removal_flg
;
367 val
= p_params
->update_inner_vlan_removal_flg
;
368 p_cmn
->update_inner_vlan_removal_en_flg
= val
;
370 p_cmn
->default_vlan_en
= p_params
->default_vlan_enable_flg
;
371 val
= p_params
->update_default_vlan_enable_flg
;
372 p_cmn
->update_default_vlan_en_flg
= val
;
374 p_cmn
->default_vlan
= cpu_to_le16(p_params
->default_vlan
);
375 p_cmn
->update_default_vlan_flg
= p_params
->update_default_vlan_flg
;
377 p_cmn
->silent_vlan_removal_en
= p_params
->silent_vlan_removal_flg
;
379 p_ramrod
->common
.tx_switching_en
= p_params
->tx_switching_flg
;
380 p_cmn
->update_tx_switching_en_flg
= p_params
->update_tx_switching_flg
;
382 p_cmn
->anti_spoofing_en
= p_params
->anti_spoofing_en
;
383 val
= p_params
->update_anti_spoofing_en_flg
;
384 p_ramrod
->common
.update_anti_spoofing_en_flg
= val
;
386 rc
= qed_sp_vport_update_rss(p_hwfn
, p_ramrod
, p_rss_params
);
388 /* Return spq entry which is taken in qed_sp_init_request()*/
389 qed_spq_return_entry(p_hwfn
, p_ent
);
393 /* Update mcast bins for VFs, PF doesn't use this functionality */
394 qed_sp_update_mcast_bin(p_hwfn
, p_ramrod
, p_params
);
396 qed_sp_update_accept_mode(p_hwfn
, p_ramrod
, p_params
->accept_flags
);
397 qed_sp_vport_update_sge_tpa(p_hwfn
, p_ramrod
, p_params
->sge_tpa_params
);
398 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
401 int qed_sp_vport_stop(struct qed_hwfn
*p_hwfn
, u16 opaque_fid
, u8 vport_id
)
403 struct vport_stop_ramrod_data
*p_ramrod
;
404 struct qed_sp_init_data init_data
;
405 struct qed_spq_entry
*p_ent
;
409 if (IS_VF(p_hwfn
->cdev
))
410 return qed_vf_pf_vport_stop(p_hwfn
);
412 rc
= qed_fw_vport(p_hwfn
, vport_id
, &abs_vport_id
);
416 memset(&init_data
, 0, sizeof(init_data
));
417 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
418 init_data
.opaque_fid
= opaque_fid
;
419 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
421 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
422 ETH_RAMROD_VPORT_STOP
,
423 PROTOCOLID_ETH
, &init_data
);
427 p_ramrod
= &p_ent
->ramrod
.vport_stop
;
428 p_ramrod
->vport_id
= abs_vport_id
;
430 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
434 qed_vf_pf_accept_flags(struct qed_hwfn
*p_hwfn
,
435 struct qed_filter_accept_flags
*p_accept_flags
)
437 struct qed_sp_vport_update_params s_params
;
439 memset(&s_params
, 0, sizeof(s_params
));
440 memcpy(&s_params
.accept_flags
, p_accept_flags
,
441 sizeof(struct qed_filter_accept_flags
));
443 return qed_vf_pf_vport_update(p_hwfn
, &s_params
);
446 static int qed_filter_accept_cmd(struct qed_dev
*cdev
,
448 struct qed_filter_accept_flags accept_flags
,
449 u8 update_accept_any_vlan
,
451 enum spq_mode comp_mode
,
452 struct qed_spq_comp_cb
*p_comp_data
)
454 struct qed_sp_vport_update_params vport_update_params
;
457 /* Prepare and send the vport rx_mode change */
458 memset(&vport_update_params
, 0, sizeof(vport_update_params
));
459 vport_update_params
.vport_id
= vport
;
460 vport_update_params
.accept_flags
= accept_flags
;
461 vport_update_params
.update_accept_any_vlan_flg
= update_accept_any_vlan
;
462 vport_update_params
.accept_any_vlan
= accept_any_vlan
;
464 for_each_hwfn(cdev
, i
) {
465 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
467 vport_update_params
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
470 rc
= qed_vf_pf_accept_flags(p_hwfn
, &accept_flags
);
476 rc
= qed_sp_vport_update(p_hwfn
, &vport_update_params
,
477 comp_mode
, p_comp_data
);
479 DP_ERR(cdev
, "Update rx_mode failed %d\n", rc
);
483 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
484 "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
485 accept_flags
.rx_accept_filter
,
486 accept_flags
.tx_accept_filter
);
487 if (update_accept_any_vlan
)
488 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
489 "accept_any_vlan=%d configured\n",
496 static int qed_sp_release_queue_cid(
497 struct qed_hwfn
*p_hwfn
,
498 struct qed_hw_cid_data
*p_cid_data
)
500 if (!p_cid_data
->b_cid_allocated
)
503 qed_cxt_release_cid(p_hwfn
, p_cid_data
->cid
);
505 p_cid_data
->b_cid_allocated
= false;
510 int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn
*p_hwfn
,
513 struct qed_queue_start_common_params
*p_params
,
516 dma_addr_t bd_chain_phys_addr
,
517 dma_addr_t cqe_pbl_addr
, u16 cqe_pbl_size
)
519 struct rx_queue_start_ramrod_data
*p_ramrod
= NULL
;
520 struct qed_spq_entry
*p_ent
= NULL
;
521 struct qed_sp_init_data init_data
;
522 struct qed_hw_cid_data
*p_rx_cid
;
527 /* Store information for the stop */
528 p_rx_cid
= &p_hwfn
->p_rx_cids
[p_params
->queue_id
];
530 p_rx_cid
->opaque_fid
= opaque_fid
;
531 p_rx_cid
->vport_id
= p_params
->vport_id
;
533 rc
= qed_fw_vport(p_hwfn
, p_params
->vport_id
, &abs_vport_id
);
537 rc
= qed_fw_l2_queue(p_hwfn
, p_params
->queue_id
, &abs_rx_q_id
);
541 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
542 "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
544 cid
, p_params
->queue_id
, p_params
->vport_id
, p_params
->sb
);
547 memset(&init_data
, 0, sizeof(init_data
));
549 init_data
.opaque_fid
= opaque_fid
;
550 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
552 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
553 ETH_RAMROD_RX_QUEUE_START
,
554 PROTOCOLID_ETH
, &init_data
);
558 p_ramrod
= &p_ent
->ramrod
.rx_queue_start
;
560 p_ramrod
->sb_id
= cpu_to_le16(p_params
->sb
);
561 p_ramrod
->sb_index
= p_params
->sb_idx
;
562 p_ramrod
->vport_id
= abs_vport_id
;
563 p_ramrod
->stats_counter_id
= stats_id
;
564 p_ramrod
->rx_queue_id
= cpu_to_le16(abs_rx_q_id
);
565 p_ramrod
->complete_cqe_flg
= 0;
566 p_ramrod
->complete_event_flg
= 1;
568 p_ramrod
->bd_max_bytes
= cpu_to_le16(bd_max_bytes
);
569 DMA_REGPAIR_LE(p_ramrod
->bd_base
, bd_chain_phys_addr
);
571 p_ramrod
->num_of_pbl_pages
= cpu_to_le16(cqe_pbl_size
);
572 DMA_REGPAIR_LE(p_ramrod
->cqe_pbl_addr
, cqe_pbl_addr
);
574 p_ramrod
->vf_rx_prod_index
= p_params
->vf_qid
;
575 if (p_params
->vf_qid
)
576 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
577 "Queue is meant for VF rxq[%04x]\n",
580 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
584 qed_sp_eth_rx_queue_start(struct qed_hwfn
*p_hwfn
,
586 struct qed_queue_start_common_params
*p_params
,
588 dma_addr_t bd_chain_phys_addr
,
589 dma_addr_t cqe_pbl_addr
,
590 u16 cqe_pbl_size
, void __iomem
**pp_prod
)
592 struct qed_hw_cid_data
*p_rx_cid
;
593 u32 init_prod_val
= 0;
594 u16 abs_l2_queue
= 0;
598 if (IS_VF(p_hwfn
->cdev
)) {
599 return qed_vf_pf_rxq_start(p_hwfn
,
602 (u8
)p_params
->sb_idx
,
605 cqe_pbl_addr
, cqe_pbl_size
, pp_prod
);
608 rc
= qed_fw_l2_queue(p_hwfn
, p_params
->queue_id
, &abs_l2_queue
);
612 rc
= qed_fw_vport(p_hwfn
, p_params
->vport_id
, &abs_stats_id
);
616 *pp_prod
= (u8 __iomem
*)p_hwfn
->regview
+
617 GTT_BAR0_MAP_REG_MSDM_RAM
+
618 MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue
);
620 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
621 __internal_ram_wr(p_hwfn
, *pp_prod
, sizeof(u32
),
622 (u32
*)(&init_prod_val
));
624 /* Allocate a CID for the queue */
625 p_rx_cid
= &p_hwfn
->p_rx_cids
[p_params
->queue_id
];
626 rc
= qed_cxt_acquire_cid(p_hwfn
, PROTOCOLID_ETH
, &p_rx_cid
->cid
);
628 DP_NOTICE(p_hwfn
, "Failed to acquire cid\n");
631 p_rx_cid
->b_cid_allocated
= true;
633 rc
= qed_sp_eth_rxq_start_ramrod(p_hwfn
,
644 qed_sp_release_queue_cid(p_hwfn
, p_rx_cid
);
649 int qed_sp_eth_rx_queues_update(struct qed_hwfn
*p_hwfn
,
653 u8 complete_event_flg
,
654 enum spq_mode comp_mode
,
655 struct qed_spq_comp_cb
*p_comp_data
)
657 struct rx_queue_update_ramrod_data
*p_ramrod
= NULL
;
658 struct qed_spq_entry
*p_ent
= NULL
;
659 struct qed_sp_init_data init_data
;
660 struct qed_hw_cid_data
*p_rx_cid
;
661 u16 qid
, abs_rx_q_id
= 0;
665 memset(&init_data
, 0, sizeof(init_data
));
666 init_data
.comp_mode
= comp_mode
;
667 init_data
.p_comp_data
= p_comp_data
;
669 for (i
= 0; i
< num_rxqs
; i
++) {
670 qid
= rx_queue_id
+ i
;
671 p_rx_cid
= &p_hwfn
->p_rx_cids
[qid
];
674 init_data
.cid
= p_rx_cid
->cid
;
675 init_data
.opaque_fid
= p_rx_cid
->opaque_fid
;
677 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
678 ETH_RAMROD_RX_QUEUE_UPDATE
,
679 PROTOCOLID_ETH
, &init_data
);
683 p_ramrod
= &p_ent
->ramrod
.rx_queue_update
;
685 qed_fw_vport(p_hwfn
, p_rx_cid
->vport_id
, &p_ramrod
->vport_id
);
686 qed_fw_l2_queue(p_hwfn
, qid
, &abs_rx_q_id
);
687 p_ramrod
->rx_queue_id
= cpu_to_le16(abs_rx_q_id
);
688 p_ramrod
->complete_cqe_flg
= complete_cqe_flg
;
689 p_ramrod
->complete_event_flg
= complete_event_flg
;
691 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
699 int qed_sp_eth_rx_queue_stop(struct qed_hwfn
*p_hwfn
,
701 bool eq_completion_only
, bool cqe_completion
)
703 struct qed_hw_cid_data
*p_rx_cid
= &p_hwfn
->p_rx_cids
[rx_queue_id
];
704 struct rx_queue_stop_ramrod_data
*p_ramrod
= NULL
;
705 struct qed_spq_entry
*p_ent
= NULL
;
706 struct qed_sp_init_data init_data
;
710 if (IS_VF(p_hwfn
->cdev
))
711 return qed_vf_pf_rxq_stop(p_hwfn
, rx_queue_id
, cqe_completion
);
714 memset(&init_data
, 0, sizeof(init_data
));
715 init_data
.cid
= p_rx_cid
->cid
;
716 init_data
.opaque_fid
= p_rx_cid
->opaque_fid
;
717 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
719 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
720 ETH_RAMROD_RX_QUEUE_STOP
,
721 PROTOCOLID_ETH
, &init_data
);
725 p_ramrod
= &p_ent
->ramrod
.rx_queue_stop
;
727 qed_fw_vport(p_hwfn
, p_rx_cid
->vport_id
, &p_ramrod
->vport_id
);
728 qed_fw_l2_queue(p_hwfn
, rx_queue_id
, &abs_rx_q_id
);
729 p_ramrod
->rx_queue_id
= cpu_to_le16(abs_rx_q_id
);
731 /* Cleaning the queue requires the completion to arrive there.
732 * In addition, VFs require the answer to come as eqe to PF.
734 p_ramrod
->complete_cqe_flg
=
735 (!!(p_rx_cid
->opaque_fid
== p_hwfn
->hw_info
.opaque_fid
) &&
736 !eq_completion_only
) || cqe_completion
;
737 p_ramrod
->complete_event_flg
=
738 !(p_rx_cid
->opaque_fid
== p_hwfn
->hw_info
.opaque_fid
) ||
741 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
745 return qed_sp_release_queue_cid(p_hwfn
, p_rx_cid
);
748 int qed_sp_eth_txq_start_ramrod(struct qed_hwfn
*p_hwfn
,
751 struct qed_queue_start_common_params
*p_params
,
755 union qed_qm_pq_params
*p_pq_params
)
757 struct tx_queue_start_ramrod_data
*p_ramrod
= NULL
;
758 struct qed_spq_entry
*p_ent
= NULL
;
759 struct qed_sp_init_data init_data
;
760 struct qed_hw_cid_data
*p_tx_cid
;
761 u16 pq_id
, abs_tx_q_id
= 0;
765 /* Store information for the stop */
766 p_tx_cid
= &p_hwfn
->p_tx_cids
[p_params
->queue_id
];
768 p_tx_cid
->opaque_fid
= opaque_fid
;
770 rc
= qed_fw_vport(p_hwfn
, p_params
->vport_id
, &abs_vport_id
);
774 rc
= qed_fw_l2_queue(p_hwfn
, p_params
->queue_id
, &abs_tx_q_id
);
779 memset(&init_data
, 0, sizeof(init_data
));
781 init_data
.opaque_fid
= opaque_fid
;
782 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
784 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
785 ETH_RAMROD_TX_QUEUE_START
,
786 PROTOCOLID_ETH
, &init_data
);
790 p_ramrod
= &p_ent
->ramrod
.tx_queue_start
;
791 p_ramrod
->vport_id
= abs_vport_id
;
793 p_ramrod
->sb_id
= cpu_to_le16(p_params
->sb
);
794 p_ramrod
->sb_index
= p_params
->sb_idx
;
795 p_ramrod
->stats_counter_id
= stats_id
;
797 p_ramrod
->queue_zone_id
= cpu_to_le16(abs_tx_q_id
);
799 p_ramrod
->pbl_size
= cpu_to_le16(pbl_size
);
800 DMA_REGPAIR_LE(p_ramrod
->pbl_base_addr
, pbl_addr
);
802 pq_id
= qed_get_qm_pq(p_hwfn
, PROTOCOLID_ETH
, p_pq_params
);
803 p_ramrod
->qm_pq_id
= cpu_to_le16(pq_id
);
805 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
809 qed_sp_eth_tx_queue_start(struct qed_hwfn
*p_hwfn
,
811 struct qed_queue_start_common_params
*p_params
,
813 u16 pbl_size
, void __iomem
**pp_doorbell
)
815 struct qed_hw_cid_data
*p_tx_cid
;
816 union qed_qm_pq_params pq_params
;
820 if (IS_VF(p_hwfn
->cdev
)) {
821 return qed_vf_pf_txq_start(p_hwfn
,
825 pbl_addr
, pbl_size
, pp_doorbell
);
828 rc
= qed_fw_vport(p_hwfn
, p_params
->vport_id
, &abs_stats_id
);
832 p_tx_cid
= &p_hwfn
->p_tx_cids
[p_params
->queue_id
];
833 memset(p_tx_cid
, 0, sizeof(*p_tx_cid
));
834 memset(&pq_params
, 0, sizeof(pq_params
));
836 /* Allocate a CID for the queue */
837 rc
= qed_cxt_acquire_cid(p_hwfn
, PROTOCOLID_ETH
, &p_tx_cid
->cid
);
839 DP_NOTICE(p_hwfn
, "Failed to acquire cid\n");
842 p_tx_cid
->b_cid_allocated
= true;
844 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
845 "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
846 opaque_fid
, p_tx_cid
->cid
,
847 p_params
->queue_id
, p_params
->vport_id
, p_params
->sb
);
849 rc
= qed_sp_eth_txq_start_ramrod(p_hwfn
,
858 *pp_doorbell
= (u8 __iomem
*)p_hwfn
->doorbells
+
859 qed_db_addr(p_tx_cid
->cid
, DQ_DEMS_LEGACY
);
862 qed_sp_release_queue_cid(p_hwfn
, p_tx_cid
);
867 int qed_sp_eth_tx_queue_stop(struct qed_hwfn
*p_hwfn
, u16 tx_queue_id
)
869 struct qed_hw_cid_data
*p_tx_cid
= &p_hwfn
->p_tx_cids
[tx_queue_id
];
870 struct qed_spq_entry
*p_ent
= NULL
;
871 struct qed_sp_init_data init_data
;
874 if (IS_VF(p_hwfn
->cdev
))
875 return qed_vf_pf_txq_stop(p_hwfn
, tx_queue_id
);
878 memset(&init_data
, 0, sizeof(init_data
));
879 init_data
.cid
= p_tx_cid
->cid
;
880 init_data
.opaque_fid
= p_tx_cid
->opaque_fid
;
881 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
883 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
884 ETH_RAMROD_TX_QUEUE_STOP
,
885 PROTOCOLID_ETH
, &init_data
);
889 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
893 return qed_sp_release_queue_cid(p_hwfn
, p_tx_cid
);
896 static enum eth_filter_action
qed_filter_action(enum qed_filter_opcode opcode
)
898 enum eth_filter_action action
= MAX_ETH_FILTER_ACTION
;
902 action
= ETH_FILTER_ACTION_ADD
;
904 case QED_FILTER_REMOVE
:
905 action
= ETH_FILTER_ACTION_REMOVE
;
907 case QED_FILTER_FLUSH
:
908 action
= ETH_FILTER_ACTION_REMOVE_ALL
;
911 action
= MAX_ETH_FILTER_ACTION
;
917 static void qed_set_fw_mac_addr(__le16
*fw_msb
,
922 ((u8
*)fw_msb
)[0] = mac
[1];
923 ((u8
*)fw_msb
)[1] = mac
[0];
924 ((u8
*)fw_mid
)[0] = mac
[3];
925 ((u8
*)fw_mid
)[1] = mac
[2];
926 ((u8
*)fw_lsb
)[0] = mac
[5];
927 ((u8
*)fw_lsb
)[1] = mac
[4];
931 qed_filter_ucast_common(struct qed_hwfn
*p_hwfn
,
933 struct qed_filter_ucast
*p_filter_cmd
,
934 struct vport_filter_update_ramrod_data
**pp_ramrod
,
935 struct qed_spq_entry
**pp_ent
,
936 enum spq_mode comp_mode
,
937 struct qed_spq_comp_cb
*p_comp_data
)
939 u8 vport_to_add_to
= 0, vport_to_remove_from
= 0;
940 struct vport_filter_update_ramrod_data
*p_ramrod
;
941 struct eth_filter_cmd
*p_first_filter
;
942 struct eth_filter_cmd
*p_second_filter
;
943 struct qed_sp_init_data init_data
;
944 enum eth_filter_action action
;
947 rc
= qed_fw_vport(p_hwfn
, p_filter_cmd
->vport_to_remove_from
,
948 &vport_to_remove_from
);
952 rc
= qed_fw_vport(p_hwfn
, p_filter_cmd
->vport_to_add_to
,
958 memset(&init_data
, 0, sizeof(init_data
));
959 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
960 init_data
.opaque_fid
= opaque_fid
;
961 init_data
.comp_mode
= comp_mode
;
962 init_data
.p_comp_data
= p_comp_data
;
964 rc
= qed_sp_init_request(p_hwfn
, pp_ent
,
965 ETH_RAMROD_FILTERS_UPDATE
,
966 PROTOCOLID_ETH
, &init_data
);
970 *pp_ramrod
= &(*pp_ent
)->ramrod
.vport_filter_update
;
971 p_ramrod
= *pp_ramrod
;
972 p_ramrod
->filter_cmd_hdr
.rx
= p_filter_cmd
->is_rx_filter
? 1 : 0;
973 p_ramrod
->filter_cmd_hdr
.tx
= p_filter_cmd
->is_tx_filter
? 1 : 0;
975 switch (p_filter_cmd
->opcode
) {
976 case QED_FILTER_REPLACE
:
977 case QED_FILTER_MOVE
:
978 p_ramrod
->filter_cmd_hdr
.cmd_cnt
= 2; break;
980 p_ramrod
->filter_cmd_hdr
.cmd_cnt
= 1; break;
983 p_first_filter
= &p_ramrod
->filter_cmds
[0];
984 p_second_filter
= &p_ramrod
->filter_cmds
[1];
986 switch (p_filter_cmd
->type
) {
988 p_first_filter
->type
= ETH_FILTER_TYPE_MAC
; break;
989 case QED_FILTER_VLAN
:
990 p_first_filter
->type
= ETH_FILTER_TYPE_VLAN
; break;
991 case QED_FILTER_MAC_VLAN
:
992 p_first_filter
->type
= ETH_FILTER_TYPE_PAIR
; break;
993 case QED_FILTER_INNER_MAC
:
994 p_first_filter
->type
= ETH_FILTER_TYPE_INNER_MAC
; break;
995 case QED_FILTER_INNER_VLAN
:
996 p_first_filter
->type
= ETH_FILTER_TYPE_INNER_VLAN
; break;
997 case QED_FILTER_INNER_PAIR
:
998 p_first_filter
->type
= ETH_FILTER_TYPE_INNER_PAIR
; break;
999 case QED_FILTER_INNER_MAC_VNI_PAIR
:
1000 p_first_filter
->type
= ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR
;
1002 case QED_FILTER_MAC_VNI_PAIR
:
1003 p_first_filter
->type
= ETH_FILTER_TYPE_MAC_VNI_PAIR
; break;
1004 case QED_FILTER_VNI
:
1005 p_first_filter
->type
= ETH_FILTER_TYPE_VNI
; break;
1008 if ((p_first_filter
->type
== ETH_FILTER_TYPE_MAC
) ||
1009 (p_first_filter
->type
== ETH_FILTER_TYPE_PAIR
) ||
1010 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_MAC
) ||
1011 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_PAIR
) ||
1012 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR
) ||
1013 (p_first_filter
->type
== ETH_FILTER_TYPE_MAC_VNI_PAIR
)) {
1014 qed_set_fw_mac_addr(&p_first_filter
->mac_msb
,
1015 &p_first_filter
->mac_mid
,
1016 &p_first_filter
->mac_lsb
,
1017 (u8
*)p_filter_cmd
->mac
);
1020 if ((p_first_filter
->type
== ETH_FILTER_TYPE_VLAN
) ||
1021 (p_first_filter
->type
== ETH_FILTER_TYPE_PAIR
) ||
1022 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_VLAN
) ||
1023 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_PAIR
))
1024 p_first_filter
->vlan_id
= cpu_to_le16(p_filter_cmd
->vlan
);
1026 if ((p_first_filter
->type
== ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR
) ||
1027 (p_first_filter
->type
== ETH_FILTER_TYPE_MAC_VNI_PAIR
) ||
1028 (p_first_filter
->type
== ETH_FILTER_TYPE_VNI
))
1029 p_first_filter
->vni
= cpu_to_le32(p_filter_cmd
->vni
);
1031 if (p_filter_cmd
->opcode
== QED_FILTER_MOVE
) {
1032 p_second_filter
->type
= p_first_filter
->type
;
1033 p_second_filter
->mac_msb
= p_first_filter
->mac_msb
;
1034 p_second_filter
->mac_mid
= p_first_filter
->mac_mid
;
1035 p_second_filter
->mac_lsb
= p_first_filter
->mac_lsb
;
1036 p_second_filter
->vlan_id
= p_first_filter
->vlan_id
;
1037 p_second_filter
->vni
= p_first_filter
->vni
;
1039 p_first_filter
->action
= ETH_FILTER_ACTION_REMOVE
;
1041 p_first_filter
->vport_id
= vport_to_remove_from
;
1043 p_second_filter
->action
= ETH_FILTER_ACTION_ADD
;
1044 p_second_filter
->vport_id
= vport_to_add_to
;
1045 } else if (p_filter_cmd
->opcode
== QED_FILTER_REPLACE
) {
1046 p_first_filter
->vport_id
= vport_to_add_to
;
1047 memcpy(p_second_filter
, p_first_filter
,
1048 sizeof(*p_second_filter
));
1049 p_first_filter
->action
= ETH_FILTER_ACTION_REMOVE_ALL
;
1050 p_second_filter
->action
= ETH_FILTER_ACTION_ADD
;
1052 action
= qed_filter_action(p_filter_cmd
->opcode
);
1054 if (action
== MAX_ETH_FILTER_ACTION
) {
1056 "%d is not supported yet\n",
1057 p_filter_cmd
->opcode
);
1061 p_first_filter
->action
= action
;
1062 p_first_filter
->vport_id
= (p_filter_cmd
->opcode
==
1063 QED_FILTER_REMOVE
) ?
1064 vport_to_remove_from
:
1071 int qed_sp_eth_filter_ucast(struct qed_hwfn
*p_hwfn
,
1073 struct qed_filter_ucast
*p_filter_cmd
,
1074 enum spq_mode comp_mode
,
1075 struct qed_spq_comp_cb
*p_comp_data
)
1077 struct vport_filter_update_ramrod_data
*p_ramrod
= NULL
;
1078 struct qed_spq_entry
*p_ent
= NULL
;
1079 struct eth_filter_cmd_header
*p_header
;
1082 rc
= qed_filter_ucast_common(p_hwfn
, opaque_fid
, p_filter_cmd
,
1084 comp_mode
, p_comp_data
);
1086 DP_ERR(p_hwfn
, "Uni. filter command failed %d\n", rc
);
1089 p_header
= &p_ramrod
->filter_cmd_hdr
;
1090 p_header
->assert_on_error
= p_filter_cmd
->assert_on_error
;
1092 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
1094 DP_ERR(p_hwfn
, "Unicast filter ADD command failed %d\n", rc
);
1098 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
1099 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1100 (p_filter_cmd
->opcode
== QED_FILTER_ADD
) ? "ADD" :
1101 ((p_filter_cmd
->opcode
== QED_FILTER_REMOVE
) ?
1103 ((p_filter_cmd
->opcode
== QED_FILTER_MOVE
) ?
1104 "MOVE" : "REPLACE")),
1105 (p_filter_cmd
->type
== QED_FILTER_MAC
) ? "MAC" :
1106 ((p_filter_cmd
->type
== QED_FILTER_VLAN
) ?
1107 "VLAN" : "MAC & VLAN"),
1108 p_ramrod
->filter_cmd_hdr
.cmd_cnt
,
1109 p_filter_cmd
->is_rx_filter
,
1110 p_filter_cmd
->is_tx_filter
);
1111 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
1112 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1113 p_filter_cmd
->vport_to_add_to
,
1114 p_filter_cmd
->vport_to_remove_from
,
1115 p_filter_cmd
->mac
[0],
1116 p_filter_cmd
->mac
[1],
1117 p_filter_cmd
->mac
[2],
1118 p_filter_cmd
->mac
[3],
1119 p_filter_cmd
->mac
[4],
1120 p_filter_cmd
->mac
[5],
1121 p_filter_cmd
->vlan
);
1126 /*******************************************************************************
1128 * Calculates crc 32 on a buffer
1129 * Note: crc32_length MUST be aligned to 8
1131 ******************************************************************************/
1132 static u32
qed_calc_crc32c(u8
*crc32_packet
,
1133 u32 crc32_length
, u32 crc32_seed
, u8 complement
)
1135 u32 byte
= 0, bit
= 0, crc32_result
= crc32_seed
;
1136 u8 msb
= 0, current_byte
= 0;
1138 if ((!crc32_packet
) ||
1139 (crc32_length
== 0) ||
1140 ((crc32_length
% 8) != 0))
1141 return crc32_result
;
1142 for (byte
= 0; byte
< crc32_length
; byte
++) {
1143 current_byte
= crc32_packet
[byte
];
1144 for (bit
= 0; bit
< 8; bit
++) {
1145 msb
= (u8
)(crc32_result
>> 31);
1146 crc32_result
= crc32_result
<< 1;
1147 if (msb
!= (0x1 & (current_byte
>> bit
))) {
1148 crc32_result
= crc32_result
^ CRC32_POLY
;
1149 crc32_result
|= 1; /*crc32_result[0] = 1;*/
1153 return crc32_result
;
1156 static u32
qed_crc32c_le(u32 seed
, u8
*mac
, u32 len
)
1158 u32 packet_buf
[2] = { 0 };
1160 memcpy((u8
*)(&packet_buf
[0]), &mac
[0], 6);
1161 return qed_calc_crc32c((u8
*)packet_buf
, 8, seed
, 0);
1164 u8
qed_mcast_bin_from_mac(u8
*mac
)
1166 u32 crc
= qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED
,
1173 qed_sp_eth_filter_mcast(struct qed_hwfn
*p_hwfn
,
1175 struct qed_filter_mcast
*p_filter_cmd
,
1176 enum spq_mode comp_mode
,
1177 struct qed_spq_comp_cb
*p_comp_data
)
1179 unsigned long bins
[ETH_MULTICAST_MAC_BINS_IN_REGS
];
1180 struct vport_update_ramrod_data
*p_ramrod
= NULL
;
1181 struct qed_spq_entry
*p_ent
= NULL
;
1182 struct qed_sp_init_data init_data
;
1183 u8 abs_vport_id
= 0;
1186 if (p_filter_cmd
->opcode
== QED_FILTER_ADD
) {
1187 rc
= qed_fw_vport(p_hwfn
, p_filter_cmd
->vport_to_add_to
,
1192 rc
= qed_fw_vport(p_hwfn
, p_filter_cmd
->vport_to_remove_from
,
1199 memset(&init_data
, 0, sizeof(init_data
));
1200 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
1201 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1202 init_data
.comp_mode
= comp_mode
;
1203 init_data
.p_comp_data
= p_comp_data
;
1205 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1206 ETH_RAMROD_VPORT_UPDATE
,
1207 PROTOCOLID_ETH
, &init_data
);
1209 DP_ERR(p_hwfn
, "Multi-cast command failed %d\n", rc
);
1213 p_ramrod
= &p_ent
->ramrod
.vport_update
;
1214 p_ramrod
->common
.update_approx_mcast_flg
= 1;
1216 /* explicitly clear out the entire vector */
1217 memset(&p_ramrod
->approx_mcast
.bins
, 0,
1218 sizeof(p_ramrod
->approx_mcast
.bins
));
1219 memset(bins
, 0, sizeof(unsigned long) *
1220 ETH_MULTICAST_MAC_BINS_IN_REGS
);
1221 /* filter ADD op is explicit set op and it removes
1222 * any existing filters for the vport
1224 if (p_filter_cmd
->opcode
== QED_FILTER_ADD
) {
1225 for (i
= 0; i
< p_filter_cmd
->num_mc_addrs
; i
++) {
1228 bit
= qed_mcast_bin_from_mac(p_filter_cmd
->mac
[i
]);
1229 __set_bit(bit
, bins
);
1232 /* Convert to correct endianity */
1233 for (i
= 0; i
< ETH_MULTICAST_MAC_BINS_IN_REGS
; i
++) {
1234 struct vport_update_ramrod_mcast
*p_ramrod_bins
;
1235 u32
*p_bins
= (u32
*)bins
;
1237 p_ramrod_bins
= &p_ramrod
->approx_mcast
;
1238 p_ramrod_bins
->bins
[i
] = cpu_to_le32(p_bins
[i
]);
1242 p_ramrod
->common
.vport_id
= abs_vport_id
;
1244 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
1247 static int qed_filter_mcast_cmd(struct qed_dev
*cdev
,
1248 struct qed_filter_mcast
*p_filter_cmd
,
1249 enum spq_mode comp_mode
,
1250 struct qed_spq_comp_cb
*p_comp_data
)
1255 /* only ADD and REMOVE operations are supported for multi-cast */
1256 if ((p_filter_cmd
->opcode
!= QED_FILTER_ADD
&&
1257 (p_filter_cmd
->opcode
!= QED_FILTER_REMOVE
)) ||
1258 (p_filter_cmd
->num_mc_addrs
> QED_MAX_MC_ADDRS
))
1261 for_each_hwfn(cdev
, i
) {
1262 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1267 qed_vf_pf_filter_mcast(p_hwfn
, p_filter_cmd
);
1271 opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1273 rc
= qed_sp_eth_filter_mcast(p_hwfn
,
1276 comp_mode
, p_comp_data
);
1281 static int qed_filter_ucast_cmd(struct qed_dev
*cdev
,
1282 struct qed_filter_ucast
*p_filter_cmd
,
1283 enum spq_mode comp_mode
,
1284 struct qed_spq_comp_cb
*p_comp_data
)
1289 for_each_hwfn(cdev
, i
) {
1290 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1294 rc
= qed_vf_pf_filter_ucast(p_hwfn
, p_filter_cmd
);
1298 opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1300 rc
= qed_sp_eth_filter_ucast(p_hwfn
,
1303 comp_mode
, p_comp_data
);
1311 /* Statistics related code */
1312 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn
*p_hwfn
,
1314 u32
*p_len
, u16 statistics_bin
)
1316 if (IS_PF(p_hwfn
->cdev
)) {
1317 *p_addr
= BAR0_MAP_REG_PSDM_RAM
+
1318 PSTORM_QUEUE_STAT_OFFSET(statistics_bin
);
1319 *p_len
= sizeof(struct eth_pstorm_per_queue_stat
);
1321 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1322 struct pfvf_acquire_resp_tlv
*p_resp
= &p_iov
->acquire_resp
;
1324 *p_addr
= p_resp
->pfdev_info
.stats_info
.pstats
.address
;
1325 *p_len
= p_resp
->pfdev_info
.stats_info
.pstats
.len
;
1329 static void __qed_get_vport_pstats(struct qed_hwfn
*p_hwfn
,
1330 struct qed_ptt
*p_ptt
,
1331 struct qed_eth_stats
*p_stats
,
1334 struct eth_pstorm_per_queue_stat pstats
;
1335 u32 pstats_addr
= 0, pstats_len
= 0;
1337 __qed_get_vport_pstats_addrlen(p_hwfn
, &pstats_addr
, &pstats_len
,
1340 memset(&pstats
, 0, sizeof(pstats
));
1341 qed_memcpy_from(p_hwfn
, p_ptt
, &pstats
, pstats_addr
, pstats_len
);
1343 p_stats
->tx_ucast_bytes
+= HILO_64_REGPAIR(pstats
.sent_ucast_bytes
);
1344 p_stats
->tx_mcast_bytes
+= HILO_64_REGPAIR(pstats
.sent_mcast_bytes
);
1345 p_stats
->tx_bcast_bytes
+= HILO_64_REGPAIR(pstats
.sent_bcast_bytes
);
1346 p_stats
->tx_ucast_pkts
+= HILO_64_REGPAIR(pstats
.sent_ucast_pkts
);
1347 p_stats
->tx_mcast_pkts
+= HILO_64_REGPAIR(pstats
.sent_mcast_pkts
);
1348 p_stats
->tx_bcast_pkts
+= HILO_64_REGPAIR(pstats
.sent_bcast_pkts
);
1349 p_stats
->tx_err_drop_pkts
+= HILO_64_REGPAIR(pstats
.error_drop_pkts
);
1352 static void __qed_get_vport_tstats(struct qed_hwfn
*p_hwfn
,
1353 struct qed_ptt
*p_ptt
,
1354 struct qed_eth_stats
*p_stats
,
1357 struct tstorm_per_port_stat tstats
;
1358 u32 tstats_addr
, tstats_len
;
1360 if (IS_PF(p_hwfn
->cdev
)) {
1361 tstats_addr
= BAR0_MAP_REG_TSDM_RAM
+
1362 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn
));
1363 tstats_len
= sizeof(struct tstorm_per_port_stat
);
1365 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1366 struct pfvf_acquire_resp_tlv
*p_resp
= &p_iov
->acquire_resp
;
1368 tstats_addr
= p_resp
->pfdev_info
.stats_info
.tstats
.address
;
1369 tstats_len
= p_resp
->pfdev_info
.stats_info
.tstats
.len
;
1372 memset(&tstats
, 0, sizeof(tstats
));
1373 qed_memcpy_from(p_hwfn
, p_ptt
, &tstats
, tstats_addr
, tstats_len
);
1375 p_stats
->mftag_filter_discards
+=
1376 HILO_64_REGPAIR(tstats
.mftag_filter_discard
);
1377 p_stats
->mac_filter_discards
+=
1378 HILO_64_REGPAIR(tstats
.eth_mac_filter_discard
);
1381 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn
*p_hwfn
,
1383 u32
*p_len
, u16 statistics_bin
)
1385 if (IS_PF(p_hwfn
->cdev
)) {
1386 *p_addr
= BAR0_MAP_REG_USDM_RAM
+
1387 USTORM_QUEUE_STAT_OFFSET(statistics_bin
);
1388 *p_len
= sizeof(struct eth_ustorm_per_queue_stat
);
1390 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1391 struct pfvf_acquire_resp_tlv
*p_resp
= &p_iov
->acquire_resp
;
1393 *p_addr
= p_resp
->pfdev_info
.stats_info
.ustats
.address
;
1394 *p_len
= p_resp
->pfdev_info
.stats_info
.ustats
.len
;
1398 static void __qed_get_vport_ustats(struct qed_hwfn
*p_hwfn
,
1399 struct qed_ptt
*p_ptt
,
1400 struct qed_eth_stats
*p_stats
,
1403 struct eth_ustorm_per_queue_stat ustats
;
1404 u32 ustats_addr
= 0, ustats_len
= 0;
1406 __qed_get_vport_ustats_addrlen(p_hwfn
, &ustats_addr
, &ustats_len
,
1409 memset(&ustats
, 0, sizeof(ustats
));
1410 qed_memcpy_from(p_hwfn
, p_ptt
, &ustats
, ustats_addr
, ustats_len
);
1412 p_stats
->rx_ucast_bytes
+= HILO_64_REGPAIR(ustats
.rcv_ucast_bytes
);
1413 p_stats
->rx_mcast_bytes
+= HILO_64_REGPAIR(ustats
.rcv_mcast_bytes
);
1414 p_stats
->rx_bcast_bytes
+= HILO_64_REGPAIR(ustats
.rcv_bcast_bytes
);
1415 p_stats
->rx_ucast_pkts
+= HILO_64_REGPAIR(ustats
.rcv_ucast_pkts
);
1416 p_stats
->rx_mcast_pkts
+= HILO_64_REGPAIR(ustats
.rcv_mcast_pkts
);
1417 p_stats
->rx_bcast_pkts
+= HILO_64_REGPAIR(ustats
.rcv_bcast_pkts
);
1420 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn
*p_hwfn
,
1422 u32
*p_len
, u16 statistics_bin
)
1424 if (IS_PF(p_hwfn
->cdev
)) {
1425 *p_addr
= BAR0_MAP_REG_MSDM_RAM
+
1426 MSTORM_QUEUE_STAT_OFFSET(statistics_bin
);
1427 *p_len
= sizeof(struct eth_mstorm_per_queue_stat
);
1429 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1430 struct pfvf_acquire_resp_tlv
*p_resp
= &p_iov
->acquire_resp
;
1432 *p_addr
= p_resp
->pfdev_info
.stats_info
.mstats
.address
;
1433 *p_len
= p_resp
->pfdev_info
.stats_info
.mstats
.len
;
1437 static void __qed_get_vport_mstats(struct qed_hwfn
*p_hwfn
,
1438 struct qed_ptt
*p_ptt
,
1439 struct qed_eth_stats
*p_stats
,
1442 struct eth_mstorm_per_queue_stat mstats
;
1443 u32 mstats_addr
= 0, mstats_len
= 0;
1445 __qed_get_vport_mstats_addrlen(p_hwfn
, &mstats_addr
, &mstats_len
,
1448 memset(&mstats
, 0, sizeof(mstats
));
1449 qed_memcpy_from(p_hwfn
, p_ptt
, &mstats
, mstats_addr
, mstats_len
);
1451 p_stats
->no_buff_discards
+= HILO_64_REGPAIR(mstats
.no_buff_discard
);
1452 p_stats
->packet_too_big_discard
+=
1453 HILO_64_REGPAIR(mstats
.packet_too_big_discard
);
1454 p_stats
->ttl0_discard
+= HILO_64_REGPAIR(mstats
.ttl0_discard
);
1455 p_stats
->tpa_coalesced_pkts
+=
1456 HILO_64_REGPAIR(mstats
.tpa_coalesced_pkts
);
1457 p_stats
->tpa_coalesced_events
+=
1458 HILO_64_REGPAIR(mstats
.tpa_coalesced_events
);
1459 p_stats
->tpa_aborts_num
+= HILO_64_REGPAIR(mstats
.tpa_aborts_num
);
1460 p_stats
->tpa_coalesced_bytes
+=
1461 HILO_64_REGPAIR(mstats
.tpa_coalesced_bytes
);
1464 static void __qed_get_vport_port_stats(struct qed_hwfn
*p_hwfn
,
1465 struct qed_ptt
*p_ptt
,
1466 struct qed_eth_stats
*p_stats
)
1468 struct port_stats port_stats
;
1471 memset(&port_stats
, 0, sizeof(port_stats
));
1473 qed_memcpy_from(p_hwfn
, p_ptt
, &port_stats
,
1474 p_hwfn
->mcp_info
->port_addr
+
1475 offsetof(struct public_port
, stats
),
1476 sizeof(port_stats
));
1478 p_stats
->rx_64_byte_packets
+= port_stats
.eth
.r64
;
1479 p_stats
->rx_65_to_127_byte_packets
+= port_stats
.eth
.r127
;
1480 p_stats
->rx_128_to_255_byte_packets
+= port_stats
.eth
.r255
;
1481 p_stats
->rx_256_to_511_byte_packets
+= port_stats
.eth
.r511
;
1482 p_stats
->rx_512_to_1023_byte_packets
+= port_stats
.eth
.r1023
;
1483 p_stats
->rx_1024_to_1518_byte_packets
+= port_stats
.eth
.r1518
;
1484 p_stats
->rx_1519_to_1522_byte_packets
+= port_stats
.eth
.r1522
;
1485 p_stats
->rx_1519_to_2047_byte_packets
+= port_stats
.eth
.r2047
;
1486 p_stats
->rx_2048_to_4095_byte_packets
+= port_stats
.eth
.r4095
;
1487 p_stats
->rx_4096_to_9216_byte_packets
+= port_stats
.eth
.r9216
;
1488 p_stats
->rx_9217_to_16383_byte_packets
+= port_stats
.eth
.r16383
;
1489 p_stats
->rx_crc_errors
+= port_stats
.eth
.rfcs
;
1490 p_stats
->rx_mac_crtl_frames
+= port_stats
.eth
.rxcf
;
1491 p_stats
->rx_pause_frames
+= port_stats
.eth
.rxpf
;
1492 p_stats
->rx_pfc_frames
+= port_stats
.eth
.rxpp
;
1493 p_stats
->rx_align_errors
+= port_stats
.eth
.raln
;
1494 p_stats
->rx_carrier_errors
+= port_stats
.eth
.rfcr
;
1495 p_stats
->rx_oversize_packets
+= port_stats
.eth
.rovr
;
1496 p_stats
->rx_jabbers
+= port_stats
.eth
.rjbr
;
1497 p_stats
->rx_undersize_packets
+= port_stats
.eth
.rund
;
1498 p_stats
->rx_fragments
+= port_stats
.eth
.rfrg
;
1499 p_stats
->tx_64_byte_packets
+= port_stats
.eth
.t64
;
1500 p_stats
->tx_65_to_127_byte_packets
+= port_stats
.eth
.t127
;
1501 p_stats
->tx_128_to_255_byte_packets
+= port_stats
.eth
.t255
;
1502 p_stats
->tx_256_to_511_byte_packets
+= port_stats
.eth
.t511
;
1503 p_stats
->tx_512_to_1023_byte_packets
+= port_stats
.eth
.t1023
;
1504 p_stats
->tx_1024_to_1518_byte_packets
+= port_stats
.eth
.t1518
;
1505 p_stats
->tx_1519_to_2047_byte_packets
+= port_stats
.eth
.t2047
;
1506 p_stats
->tx_2048_to_4095_byte_packets
+= port_stats
.eth
.t4095
;
1507 p_stats
->tx_4096_to_9216_byte_packets
+= port_stats
.eth
.t9216
;
1508 p_stats
->tx_9217_to_16383_byte_packets
+= port_stats
.eth
.t16383
;
1509 p_stats
->tx_pause_frames
+= port_stats
.eth
.txpf
;
1510 p_stats
->tx_pfc_frames
+= port_stats
.eth
.txpp
;
1511 p_stats
->tx_lpi_entry_count
+= port_stats
.eth
.tlpiec
;
1512 p_stats
->tx_total_collisions
+= port_stats
.eth
.tncl
;
1513 p_stats
->rx_mac_bytes
+= port_stats
.eth
.rbyte
;
1514 p_stats
->rx_mac_uc_packets
+= port_stats
.eth
.rxuca
;
1515 p_stats
->rx_mac_mc_packets
+= port_stats
.eth
.rxmca
;
1516 p_stats
->rx_mac_bc_packets
+= port_stats
.eth
.rxbca
;
1517 p_stats
->rx_mac_frames_ok
+= port_stats
.eth
.rxpok
;
1518 p_stats
->tx_mac_bytes
+= port_stats
.eth
.tbyte
;
1519 p_stats
->tx_mac_uc_packets
+= port_stats
.eth
.txuca
;
1520 p_stats
->tx_mac_mc_packets
+= port_stats
.eth
.txmca
;
1521 p_stats
->tx_mac_bc_packets
+= port_stats
.eth
.txbca
;
1522 p_stats
->tx_mac_ctrl_frames
+= port_stats
.eth
.txcf
;
1523 for (j
= 0; j
< 8; j
++) {
1524 p_stats
->brb_truncates
+= port_stats
.brb
.brb_truncate
[j
];
1525 p_stats
->brb_discards
+= port_stats
.brb
.brb_discard
[j
];
1529 static void __qed_get_vport_stats(struct qed_hwfn
*p_hwfn
,
1530 struct qed_ptt
*p_ptt
,
1531 struct qed_eth_stats
*stats
,
1532 u16 statistics_bin
, bool b_get_port_stats
)
1534 __qed_get_vport_mstats(p_hwfn
, p_ptt
, stats
, statistics_bin
);
1535 __qed_get_vport_ustats(p_hwfn
, p_ptt
, stats
, statistics_bin
);
1536 __qed_get_vport_tstats(p_hwfn
, p_ptt
, stats
, statistics_bin
);
1537 __qed_get_vport_pstats(p_hwfn
, p_ptt
, stats
, statistics_bin
);
1539 if (b_get_port_stats
&& p_hwfn
->mcp_info
)
1540 __qed_get_vport_port_stats(p_hwfn
, p_ptt
, stats
);
1543 static void _qed_get_vport_stats(struct qed_dev
*cdev
,
1544 struct qed_eth_stats
*stats
)
1549 memset(stats
, 0, sizeof(*stats
));
1551 for_each_hwfn(cdev
, i
) {
1552 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1553 struct qed_ptt
*p_ptt
= IS_PF(cdev
) ? qed_ptt_acquire(p_hwfn
)
1557 /* The main vport index is relative first */
1558 if (qed_fw_vport(p_hwfn
, 0, &fw_vport
)) {
1559 DP_ERR(p_hwfn
, "No vport available!\n");
1564 if (IS_PF(cdev
) && !p_ptt
) {
1565 DP_ERR(p_hwfn
, "Failed to acquire ptt\n");
1569 __qed_get_vport_stats(p_hwfn
, p_ptt
, stats
, fw_vport
,
1570 IS_PF(cdev
) ? true : false);
1573 if (IS_PF(cdev
) && p_ptt
)
1574 qed_ptt_release(p_hwfn
, p_ptt
);
1578 void qed_get_vport_stats(struct qed_dev
*cdev
, struct qed_eth_stats
*stats
)
1583 memset(stats
, 0, sizeof(*stats
));
1587 _qed_get_vport_stats(cdev
, stats
);
1589 if (!cdev
->reset_stats
)
1592 /* Reduce the statistics baseline */
1593 for (i
= 0; i
< sizeof(struct qed_eth_stats
) / sizeof(u64
); i
++)
1594 ((u64
*)stats
)[i
] -= ((u64
*)cdev
->reset_stats
)[i
];
1597 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1598 void qed_reset_vport_stats(struct qed_dev
*cdev
)
1602 for_each_hwfn(cdev
, i
) {
1603 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1604 struct eth_mstorm_per_queue_stat mstats
;
1605 struct eth_ustorm_per_queue_stat ustats
;
1606 struct eth_pstorm_per_queue_stat pstats
;
1607 struct qed_ptt
*p_ptt
= IS_PF(cdev
) ? qed_ptt_acquire(p_hwfn
)
1609 u32 addr
= 0, len
= 0;
1611 if (IS_PF(cdev
) && !p_ptt
) {
1612 DP_ERR(p_hwfn
, "Failed to acquire ptt\n");
1616 memset(&mstats
, 0, sizeof(mstats
));
1617 __qed_get_vport_mstats_addrlen(p_hwfn
, &addr
, &len
, 0);
1618 qed_memcpy_to(p_hwfn
, p_ptt
, addr
, &mstats
, len
);
1620 memset(&ustats
, 0, sizeof(ustats
));
1621 __qed_get_vport_ustats_addrlen(p_hwfn
, &addr
, &len
, 0);
1622 qed_memcpy_to(p_hwfn
, p_ptt
, addr
, &ustats
, len
);
1624 memset(&pstats
, 0, sizeof(pstats
));
1625 __qed_get_vport_pstats_addrlen(p_hwfn
, &addr
, &len
, 0);
1626 qed_memcpy_to(p_hwfn
, p_ptt
, addr
, &pstats
, len
);
1629 qed_ptt_release(p_hwfn
, p_ptt
);
1632 /* PORT statistics are not necessarily reset, so we need to
1633 * read and create a baseline for future statistics.
1635 if (!cdev
->reset_stats
)
1636 DP_INFO(cdev
, "Reset stats not allocated\n");
1638 _qed_get_vport_stats(cdev
, cdev
->reset_stats
);
1641 static int qed_fill_eth_dev_info(struct qed_dev
*cdev
,
1642 struct qed_dev_eth_info
*info
)
1646 memset(info
, 0, sizeof(*info
));
1651 int max_vf_vlan_filters
= 0;
1653 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
1654 for_each_hwfn(cdev
, i
)
1656 FEAT_NUM(&cdev
->hwfns
[i
], QED_PF_L2_QUE
);
1657 if (cdev
->int_params
.fp_msix_cnt
)
1659 min_t(u8
, info
->num_queues
,
1660 cdev
->int_params
.fp_msix_cnt
);
1662 info
->num_queues
= cdev
->num_hwfns
;
1665 if (IS_QED_SRIOV(cdev
))
1666 max_vf_vlan_filters
= cdev
->p_iov_info
->total_vfs
*
1667 QED_ETH_VF_NUM_VLAN_FILTERS
;
1668 info
->num_vlan_filters
= RESC_NUM(&cdev
->hwfns
[0], QED_VLAN
) -
1669 max_vf_vlan_filters
;
1671 ether_addr_copy(info
->port_mac
,
1672 cdev
->hwfns
[0].hw_info
.hw_mac_addr
);
1674 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev
), &info
->num_queues
);
1675 if (cdev
->num_hwfns
> 1) {
1678 qed_vf_get_num_rxqs(&cdev
->hwfns
[1], &queues
);
1679 info
->num_queues
+= queues
;
1682 qed_vf_get_num_vlan_filters(&cdev
->hwfns
[0],
1683 &info
->num_vlan_filters
);
1684 qed_vf_get_port_mac(&cdev
->hwfns
[0], info
->port_mac
);
1687 qed_fill_dev_info(cdev
, &info
->common
);
1690 memset(info
->common
.hw_mac
, 0, ETH_ALEN
);
1695 static void qed_register_eth_ops(struct qed_dev
*cdev
,
1696 struct qed_eth_cb_ops
*ops
, void *cookie
)
1698 cdev
->protocol_ops
.eth
= ops
;
1699 cdev
->ops_cookie
= cookie
;
1701 /* For VF, we start bulletin reading */
1703 qed_vf_start_iov_wq(cdev
);
1706 static bool qed_check_mac(struct qed_dev
*cdev
, u8
*mac
)
1711 return qed_vf_check_mac(&cdev
->hwfns
[0], mac
);
1714 static int qed_start_vport(struct qed_dev
*cdev
,
1715 struct qed_start_vport_params
*params
)
1719 for_each_hwfn(cdev
, i
) {
1720 struct qed_sp_vport_start_params start
= { 0 };
1721 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1723 start
.tpa_mode
= params
->gro_enable
? QED_TPA_MODE_GRO
:
1725 start
.remove_inner_vlan
= params
->remove_inner_vlan
;
1726 start
.only_untagged
= true; /* untagged only */
1727 start
.drop_ttl0
= params
->drop_ttl0
;
1728 start
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1729 start
.concrete_fid
= p_hwfn
->hw_info
.concrete_fid
;
1730 start
.vport_id
= params
->vport_id
;
1731 start
.max_buffers_per_cqe
= 16;
1732 start
.mtu
= params
->mtu
;
1734 rc
= qed_sp_vport_start(p_hwfn
, &start
);
1736 DP_ERR(cdev
, "Failed to start VPORT\n");
1740 qed_hw_start_fastpath(p_hwfn
);
1742 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1743 "Started V-PORT %d with MTU %d\n",
1744 start
.vport_id
, start
.mtu
);
1747 if (params
->clear_stats
)
1748 qed_reset_vport_stats(cdev
);
1753 static int qed_stop_vport(struct qed_dev
*cdev
, u8 vport_id
)
1757 for_each_hwfn(cdev
, i
) {
1758 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1760 rc
= qed_sp_vport_stop(p_hwfn
,
1761 p_hwfn
->hw_info
.opaque_fid
, vport_id
);
1764 DP_ERR(cdev
, "Failed to stop VPORT\n");
1771 static int qed_update_vport(struct qed_dev
*cdev
,
1772 struct qed_update_vport_params
*params
)
1774 struct qed_sp_vport_update_params sp_params
;
1775 struct qed_rss_params sp_rss_params
;
1781 memset(&sp_params
, 0, sizeof(sp_params
));
1782 memset(&sp_rss_params
, 0, sizeof(sp_rss_params
));
1784 /* Translate protocol params into sp params */
1785 sp_params
.vport_id
= params
->vport_id
;
1786 sp_params
.update_vport_active_rx_flg
= params
->update_vport_active_flg
;
1787 sp_params
.update_vport_active_tx_flg
= params
->update_vport_active_flg
;
1788 sp_params
.vport_active_rx_flg
= params
->vport_active_flg
;
1789 sp_params
.vport_active_tx_flg
= params
->vport_active_flg
;
1790 sp_params
.update_tx_switching_flg
= params
->update_tx_switching_flg
;
1791 sp_params
.tx_switching_flg
= params
->tx_switching_flg
;
1792 sp_params
.accept_any_vlan
= params
->accept_any_vlan
;
1793 sp_params
.update_accept_any_vlan_flg
=
1794 params
->update_accept_any_vlan_flg
;
1796 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
1797 * We need to re-fix the rss values per engine for CMT.
1799 if (cdev
->num_hwfns
> 1 && params
->update_rss_flg
) {
1800 struct qed_update_vport_rss_params
*rss
= ¶ms
->rss_params
;
1803 /* Find largest entry, since it's possible RSS needs to
1804 * be disabled [in case only 1 queue per-hwfn]
1806 for (k
= 0; k
< QED_RSS_IND_TABLE_SIZE
; k
++)
1807 max
= (max
> rss
->rss_ind_table
[k
]) ?
1808 max
: rss
->rss_ind_table
[k
];
1810 /* Either fix RSS values or disable RSS */
1811 if (cdev
->num_hwfns
< max
+ 1) {
1812 int divisor
= (max
+ cdev
->num_hwfns
- 1) /
1815 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1816 "CMT - fixing RSS values (modulo %02x)\n",
1819 for (k
= 0; k
< QED_RSS_IND_TABLE_SIZE
; k
++)
1820 rss
->rss_ind_table
[k
] =
1821 rss
->rss_ind_table
[k
] % divisor
;
1823 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1824 "CMT - 1 queue per-hwfn; Disabling RSS\n");
1825 params
->update_rss_flg
= 0;
1829 /* Now, update the RSS configuration for actual configuration */
1830 if (params
->update_rss_flg
) {
1831 sp_rss_params
.update_rss_config
= 1;
1832 sp_rss_params
.rss_enable
= 1;
1833 sp_rss_params
.update_rss_capabilities
= 1;
1834 sp_rss_params
.update_rss_ind_table
= 1;
1835 sp_rss_params
.update_rss_key
= 1;
1836 sp_rss_params
.rss_caps
= params
->rss_params
.rss_caps
;
1837 sp_rss_params
.rss_table_size_log
= 7; /* 2^7 = 128 */
1838 memcpy(sp_rss_params
.rss_ind_table
,
1839 params
->rss_params
.rss_ind_table
,
1840 QED_RSS_IND_TABLE_SIZE
* sizeof(u16
));
1841 memcpy(sp_rss_params
.rss_key
, params
->rss_params
.rss_key
,
1842 QED_RSS_KEY_SIZE
* sizeof(u32
));
1844 sp_params
.rss_params
= &sp_rss_params
;
1846 for_each_hwfn(cdev
, i
) {
1847 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1849 sp_params
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1850 rc
= qed_sp_vport_update(p_hwfn
, &sp_params
,
1851 QED_SPQ_MODE_EBLOCK
,
1854 DP_ERR(cdev
, "Failed to update VPORT\n");
1858 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1859 "Updated V-PORT %d: active_flag %d [update %d]\n",
1860 params
->vport_id
, params
->vport_active_flg
,
1861 params
->update_vport_active_flg
);
1867 static int qed_start_rxq(struct qed_dev
*cdev
,
1868 struct qed_queue_start_common_params
*params
,
1870 dma_addr_t bd_chain_phys_addr
,
1871 dma_addr_t cqe_pbl_addr
,
1873 void __iomem
**pp_prod
)
1875 struct qed_hwfn
*p_hwfn
;
1878 hwfn_index
= params
->rss_id
% cdev
->num_hwfns
;
1879 p_hwfn
= &cdev
->hwfns
[hwfn_index
];
1881 /* Fix queue ID in 100g mode */
1882 params
->queue_id
/= cdev
->num_hwfns
;
1884 rc
= qed_sp_eth_rx_queue_start(p_hwfn
,
1885 p_hwfn
->hw_info
.opaque_fid
,
1894 DP_ERR(cdev
, "Failed to start RXQ#%d\n", params
->queue_id
);
1898 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1899 "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
1900 params
->queue_id
, params
->rss_id
, params
->vport_id
,
1906 static int qed_stop_rxq(struct qed_dev
*cdev
,
1907 struct qed_stop_rxq_params
*params
)
1910 struct qed_hwfn
*p_hwfn
;
1912 hwfn_index
= params
->rss_id
% cdev
->num_hwfns
;
1913 p_hwfn
= &cdev
->hwfns
[hwfn_index
];
1915 rc
= qed_sp_eth_rx_queue_stop(p_hwfn
,
1916 params
->rx_queue_id
/ cdev
->num_hwfns
,
1917 params
->eq_completion_only
, false);
1919 DP_ERR(cdev
, "Failed to stop RXQ#%d\n", params
->rx_queue_id
);
1926 static int qed_start_txq(struct qed_dev
*cdev
,
1927 struct qed_queue_start_common_params
*p_params
,
1928 dma_addr_t pbl_addr
,
1930 void __iomem
**pp_doorbell
)
1932 struct qed_hwfn
*p_hwfn
;
1935 hwfn_index
= p_params
->rss_id
% cdev
->num_hwfns
;
1936 p_hwfn
= &cdev
->hwfns
[hwfn_index
];
1938 /* Fix queue ID in 100g mode */
1939 p_params
->queue_id
/= cdev
->num_hwfns
;
1941 rc
= qed_sp_eth_tx_queue_start(p_hwfn
,
1942 p_hwfn
->hw_info
.opaque_fid
,
1949 DP_ERR(cdev
, "Failed to start TXQ#%d\n", p_params
->queue_id
);
1953 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1954 "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
1955 p_params
->queue_id
, p_params
->rss_id
, p_params
->vport_id
,
1961 #define QED_HW_STOP_RETRY_LIMIT (10)
1962 static int qed_fastpath_stop(struct qed_dev
*cdev
)
1964 qed_hw_stop_fastpath(cdev
);
1969 static int qed_stop_txq(struct qed_dev
*cdev
,
1970 struct qed_stop_txq_params
*params
)
1972 struct qed_hwfn
*p_hwfn
;
1975 hwfn_index
= params
->rss_id
% cdev
->num_hwfns
;
1976 p_hwfn
= &cdev
->hwfns
[hwfn_index
];
1978 rc
= qed_sp_eth_tx_queue_stop(p_hwfn
,
1979 params
->tx_queue_id
/ cdev
->num_hwfns
);
1981 DP_ERR(cdev
, "Failed to stop TXQ#%d\n", params
->tx_queue_id
);
1988 static int qed_tunn_configure(struct qed_dev
*cdev
,
1989 struct qed_tunn_params
*tunn_params
)
1991 struct qed_tunn_update_params tunn_info
;
1997 memset(&tunn_info
, 0, sizeof(tunn_info
));
1998 if (tunn_params
->update_vxlan_port
== 1) {
1999 tunn_info
.update_vxlan_udp_port
= 1;
2000 tunn_info
.vxlan_udp_port
= tunn_params
->vxlan_port
;
2003 if (tunn_params
->update_geneve_port
== 1) {
2004 tunn_info
.update_geneve_udp_port
= 1;
2005 tunn_info
.geneve_udp_port
= tunn_params
->geneve_port
;
2008 for_each_hwfn(cdev
, i
) {
2009 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
2011 rc
= qed_sp_pf_update_tunn_cfg(hwfn
, &tunn_info
,
2012 QED_SPQ_MODE_EBLOCK
, NULL
);
2021 static int qed_configure_filter_rx_mode(struct qed_dev
*cdev
,
2022 enum qed_filter_rx_mode_type type
)
2024 struct qed_filter_accept_flags accept_flags
;
2026 memset(&accept_flags
, 0, sizeof(accept_flags
));
2028 accept_flags
.update_rx_mode_config
= 1;
2029 accept_flags
.update_tx_mode_config
= 1;
2030 accept_flags
.rx_accept_filter
= QED_ACCEPT_UCAST_MATCHED
|
2031 QED_ACCEPT_MCAST_MATCHED
|
2033 accept_flags
.tx_accept_filter
= QED_ACCEPT_UCAST_MATCHED
|
2034 QED_ACCEPT_MCAST_MATCHED
|
2037 if (type
== QED_FILTER_RX_MODE_TYPE_PROMISC
)
2038 accept_flags
.rx_accept_filter
|= QED_ACCEPT_UCAST_UNMATCHED
|
2039 QED_ACCEPT_MCAST_UNMATCHED
;
2040 else if (type
== QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC
)
2041 accept_flags
.rx_accept_filter
|= QED_ACCEPT_MCAST_UNMATCHED
;
2043 return qed_filter_accept_cmd(cdev
, 0, accept_flags
, false, false,
2044 QED_SPQ_MODE_CB
, NULL
);
2047 static int qed_configure_filter_ucast(struct qed_dev
*cdev
,
2048 struct qed_filter_ucast_params
*params
)
2050 struct qed_filter_ucast ucast
;
2052 if (!params
->vlan_valid
&& !params
->mac_valid
) {
2054 "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
2058 memset(&ucast
, 0, sizeof(ucast
));
2059 switch (params
->type
) {
2060 case QED_FILTER_XCAST_TYPE_ADD
:
2061 ucast
.opcode
= QED_FILTER_ADD
;
2063 case QED_FILTER_XCAST_TYPE_DEL
:
2064 ucast
.opcode
= QED_FILTER_REMOVE
;
2066 case QED_FILTER_XCAST_TYPE_REPLACE
:
2067 ucast
.opcode
= QED_FILTER_REPLACE
;
2070 DP_NOTICE(cdev
, "Unknown unicast filter type %d\n",
2074 if (params
->vlan_valid
&& params
->mac_valid
) {
2075 ucast
.type
= QED_FILTER_MAC_VLAN
;
2076 ether_addr_copy(ucast
.mac
, params
->mac
);
2077 ucast
.vlan
= params
->vlan
;
2078 } else if (params
->mac_valid
) {
2079 ucast
.type
= QED_FILTER_MAC
;
2080 ether_addr_copy(ucast
.mac
, params
->mac
);
2082 ucast
.type
= QED_FILTER_VLAN
;
2083 ucast
.vlan
= params
->vlan
;
2086 ucast
.is_rx_filter
= true;
2087 ucast
.is_tx_filter
= true;
2089 return qed_filter_ucast_cmd(cdev
, &ucast
, QED_SPQ_MODE_CB
, NULL
);
2092 static int qed_configure_filter_mcast(struct qed_dev
*cdev
,
2093 struct qed_filter_mcast_params
*params
)
2095 struct qed_filter_mcast mcast
;
2098 memset(&mcast
, 0, sizeof(mcast
));
2099 switch (params
->type
) {
2100 case QED_FILTER_XCAST_TYPE_ADD
:
2101 mcast
.opcode
= QED_FILTER_ADD
;
2103 case QED_FILTER_XCAST_TYPE_DEL
:
2104 mcast
.opcode
= QED_FILTER_REMOVE
;
2107 DP_NOTICE(cdev
, "Unknown multicast filter type %d\n",
2111 mcast
.num_mc_addrs
= params
->num
;
2112 for (i
= 0; i
< mcast
.num_mc_addrs
; i
++)
2113 ether_addr_copy(mcast
.mac
[i
], params
->mac
[i
]);
2115 return qed_filter_mcast_cmd(cdev
, &mcast
, QED_SPQ_MODE_CB
, NULL
);
2118 static int qed_configure_filter(struct qed_dev
*cdev
,
2119 struct qed_filter_params
*params
)
2121 enum qed_filter_rx_mode_type accept_flags
;
2123 switch (params
->type
) {
2124 case QED_FILTER_TYPE_UCAST
:
2125 return qed_configure_filter_ucast(cdev
, ¶ms
->filter
.ucast
);
2126 case QED_FILTER_TYPE_MCAST
:
2127 return qed_configure_filter_mcast(cdev
, ¶ms
->filter
.mcast
);
2128 case QED_FILTER_TYPE_RX_MODE
:
2129 accept_flags
= params
->filter
.accept_flags
;
2130 return qed_configure_filter_rx_mode(cdev
, accept_flags
);
2132 DP_NOTICE(cdev
, "Unknown filter type %d\n", (int)params
->type
);
2137 static int qed_fp_cqe_completion(struct qed_dev
*dev
,
2138 u8 rss_id
, struct eth_slow_path_rx_cqe
*cqe
)
2140 return qed_eth_cqe_completion(&dev
->hwfns
[rss_id
% dev
->num_hwfns
],
2144 #ifdef CONFIG_QED_SRIOV
2145 extern const struct qed_iov_hv_ops qed_iov_ops_pass
;
2149 extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass
;
2152 static const struct qed_eth_ops qed_eth_ops_pass
= {
2153 .common
= &qed_common_ops_pass
,
2154 #ifdef CONFIG_QED_SRIOV
2155 .iov
= &qed_iov_ops_pass
,
2158 .dcb
= &qed_dcbnl_ops_pass
,
2160 .fill_dev_info
= &qed_fill_eth_dev_info
,
2161 .register_ops
= &qed_register_eth_ops
,
2162 .check_mac
= &qed_check_mac
,
2163 .vport_start
= &qed_start_vport
,
2164 .vport_stop
= &qed_stop_vport
,
2165 .vport_update
= &qed_update_vport
,
2166 .q_rx_start
= &qed_start_rxq
,
2167 .q_rx_stop
= &qed_stop_rxq
,
2168 .q_tx_start
= &qed_start_txq
,
2169 .q_tx_stop
= &qed_stop_txq
,
2170 .filter_config
= &qed_configure_filter
,
2171 .fastpath_stop
= &qed_fastpath_stop
,
2172 .eth_cqe_completion
= &qed_fp_cqe_completion
,
2173 .get_vport_stats
= &qed_get_vport_stats
,
2174 .tunn_config
= &qed_tunn_configure
,
2177 const struct qed_eth_ops
*qed_get_eth_ops(void)
2179 return &qed_eth_ops_pass
;
2181 EXPORT_SYMBOL(qed_get_eth_ops
);
2183 void qed_put_eth_ops(void)
2185 /* TODO - reference count for module? */
2187 EXPORT_SYMBOL(qed_put_eth_ops
);