1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
7 #include "qede_ethdev.h"
8 #include <rte_string_fns.h>
10 #include <rte_version.h>
11 #include <rte_kvargs.h>
14 int qede_logtype_init
;
15 int qede_logtype_driver
;
17 static const struct qed_eth_ops
*qed_ops
;
18 static int qede_eth_dev_uninit(struct rte_eth_dev
*eth_dev
);
19 static int qede_eth_dev_init(struct rte_eth_dev
*eth_dev
);
21 #define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */
23 struct rte_qede_xstats_name_off
{
24 char name
[RTE_ETH_XSTATS_NAME_SIZE
];
28 static const struct rte_qede_xstats_name_off qede_xstats_strings
[] = {
30 offsetof(struct ecore_eth_stats_common
, rx_ucast_bytes
)},
31 {"rx_multicast_bytes",
32 offsetof(struct ecore_eth_stats_common
, rx_mcast_bytes
)},
33 {"rx_broadcast_bytes",
34 offsetof(struct ecore_eth_stats_common
, rx_bcast_bytes
)},
35 {"rx_unicast_packets",
36 offsetof(struct ecore_eth_stats_common
, rx_ucast_pkts
)},
37 {"rx_multicast_packets",
38 offsetof(struct ecore_eth_stats_common
, rx_mcast_pkts
)},
39 {"rx_broadcast_packets",
40 offsetof(struct ecore_eth_stats_common
, rx_bcast_pkts
)},
43 offsetof(struct ecore_eth_stats_common
, tx_ucast_bytes
)},
44 {"tx_multicast_bytes",
45 offsetof(struct ecore_eth_stats_common
, tx_mcast_bytes
)},
46 {"tx_broadcast_bytes",
47 offsetof(struct ecore_eth_stats_common
, tx_bcast_bytes
)},
48 {"tx_unicast_packets",
49 offsetof(struct ecore_eth_stats_common
, tx_ucast_pkts
)},
50 {"tx_multicast_packets",
51 offsetof(struct ecore_eth_stats_common
, tx_mcast_pkts
)},
52 {"tx_broadcast_packets",
53 offsetof(struct ecore_eth_stats_common
, tx_bcast_pkts
)},
55 {"rx_64_byte_packets",
56 offsetof(struct ecore_eth_stats_common
, rx_64_byte_packets
)},
57 {"rx_65_to_127_byte_packets",
58 offsetof(struct ecore_eth_stats_common
,
59 rx_65_to_127_byte_packets
)},
60 {"rx_128_to_255_byte_packets",
61 offsetof(struct ecore_eth_stats_common
,
62 rx_128_to_255_byte_packets
)},
63 {"rx_256_to_511_byte_packets",
64 offsetof(struct ecore_eth_stats_common
,
65 rx_256_to_511_byte_packets
)},
66 {"rx_512_to_1023_byte_packets",
67 offsetof(struct ecore_eth_stats_common
,
68 rx_512_to_1023_byte_packets
)},
69 {"rx_1024_to_1518_byte_packets",
70 offsetof(struct ecore_eth_stats_common
,
71 rx_1024_to_1518_byte_packets
)},
72 {"tx_64_byte_packets",
73 offsetof(struct ecore_eth_stats_common
, tx_64_byte_packets
)},
74 {"tx_65_to_127_byte_packets",
75 offsetof(struct ecore_eth_stats_common
,
76 tx_65_to_127_byte_packets
)},
77 {"tx_128_to_255_byte_packets",
78 offsetof(struct ecore_eth_stats_common
,
79 tx_128_to_255_byte_packets
)},
80 {"tx_256_to_511_byte_packets",
81 offsetof(struct ecore_eth_stats_common
,
82 tx_256_to_511_byte_packets
)},
83 {"tx_512_to_1023_byte_packets",
84 offsetof(struct ecore_eth_stats_common
,
85 tx_512_to_1023_byte_packets
)},
86 {"tx_1024_to_1518_byte_packets",
87 offsetof(struct ecore_eth_stats_common
,
88 tx_1024_to_1518_byte_packets
)},
90 {"rx_mac_crtl_frames",
91 offsetof(struct ecore_eth_stats_common
, rx_mac_crtl_frames
)},
92 {"tx_mac_control_frames",
93 offsetof(struct ecore_eth_stats_common
, tx_mac_ctrl_frames
)},
95 offsetof(struct ecore_eth_stats_common
, rx_pause_frames
)},
97 offsetof(struct ecore_eth_stats_common
, tx_pause_frames
)},
98 {"rx_priority_flow_control_frames",
99 offsetof(struct ecore_eth_stats_common
, rx_pfc_frames
)},
100 {"tx_priority_flow_control_frames",
101 offsetof(struct ecore_eth_stats_common
, tx_pfc_frames
)},
104 offsetof(struct ecore_eth_stats_common
, rx_crc_errors
)},
106 offsetof(struct ecore_eth_stats_common
, rx_align_errors
)},
107 {"rx_carrier_errors",
108 offsetof(struct ecore_eth_stats_common
, rx_carrier_errors
)},
109 {"rx_oversize_packet_errors",
110 offsetof(struct ecore_eth_stats_common
, rx_oversize_packets
)},
112 offsetof(struct ecore_eth_stats_common
, rx_jabbers
)},
113 {"rx_undersize_packet_errors",
114 offsetof(struct ecore_eth_stats_common
, rx_undersize_packets
)},
115 {"rx_fragments", offsetof(struct ecore_eth_stats_common
, rx_fragments
)},
116 {"rx_host_buffer_not_available",
117 offsetof(struct ecore_eth_stats_common
, no_buff_discards
)},
118 /* Number of packets discarded because they are bigger than MTU */
119 {"rx_packet_too_big_discards",
120 offsetof(struct ecore_eth_stats_common
,
121 packet_too_big_discard
)},
122 {"rx_ttl_zero_discards",
123 offsetof(struct ecore_eth_stats_common
, ttl0_discard
)},
124 {"rx_multi_function_tag_filter_discards",
125 offsetof(struct ecore_eth_stats_common
, mftag_filter_discards
)},
126 {"rx_mac_filter_discards",
127 offsetof(struct ecore_eth_stats_common
, mac_filter_discards
)},
128 {"rx_hw_buffer_truncates",
129 offsetof(struct ecore_eth_stats_common
, brb_truncates
)},
130 {"rx_hw_buffer_discards",
131 offsetof(struct ecore_eth_stats_common
, brb_discards
)},
132 {"tx_error_drop_packets",
133 offsetof(struct ecore_eth_stats_common
, tx_err_drop_pkts
)},
135 {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common
, rx_mac_bytes
)},
136 {"rx_mac_unicast_packets",
137 offsetof(struct ecore_eth_stats_common
, rx_mac_uc_packets
)},
138 {"rx_mac_multicast_packets",
139 offsetof(struct ecore_eth_stats_common
, rx_mac_mc_packets
)},
140 {"rx_mac_broadcast_packets",
141 offsetof(struct ecore_eth_stats_common
, rx_mac_bc_packets
)},
143 offsetof(struct ecore_eth_stats_common
, rx_mac_frames_ok
)},
144 {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common
, tx_mac_bytes
)},
145 {"tx_mac_unicast_packets",
146 offsetof(struct ecore_eth_stats_common
, tx_mac_uc_packets
)},
147 {"tx_mac_multicast_packets",
148 offsetof(struct ecore_eth_stats_common
, tx_mac_mc_packets
)},
149 {"tx_mac_broadcast_packets",
150 offsetof(struct ecore_eth_stats_common
, tx_mac_bc_packets
)},
152 {"lro_coalesced_packets",
153 offsetof(struct ecore_eth_stats_common
, tpa_coalesced_pkts
)},
154 {"lro_coalesced_events",
155 offsetof(struct ecore_eth_stats_common
, tpa_coalesced_events
)},
157 offsetof(struct ecore_eth_stats_common
, tpa_aborts_num
)},
158 {"lro_not_coalesced_packets",
159 offsetof(struct ecore_eth_stats_common
,
160 tpa_not_coalesced_pkts
)},
161 {"lro_coalesced_bytes",
162 offsetof(struct ecore_eth_stats_common
,
163 tpa_coalesced_bytes
)},
166 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings
[] = {
167 {"rx_1519_to_1522_byte_packets",
168 offsetof(struct ecore_eth_stats
, bb
) +
169 offsetof(struct ecore_eth_stats_bb
,
170 rx_1519_to_1522_byte_packets
)},
171 {"rx_1519_to_2047_byte_packets",
172 offsetof(struct ecore_eth_stats
, bb
) +
173 offsetof(struct ecore_eth_stats_bb
,
174 rx_1519_to_2047_byte_packets
)},
175 {"rx_2048_to_4095_byte_packets",
176 offsetof(struct ecore_eth_stats
, bb
) +
177 offsetof(struct ecore_eth_stats_bb
,
178 rx_2048_to_4095_byte_packets
)},
179 {"rx_4096_to_9216_byte_packets",
180 offsetof(struct ecore_eth_stats
, bb
) +
181 offsetof(struct ecore_eth_stats_bb
,
182 rx_4096_to_9216_byte_packets
)},
183 {"rx_9217_to_16383_byte_packets",
184 offsetof(struct ecore_eth_stats
, bb
) +
185 offsetof(struct ecore_eth_stats_bb
,
186 rx_9217_to_16383_byte_packets
)},
188 {"tx_1519_to_2047_byte_packets",
189 offsetof(struct ecore_eth_stats
, bb
) +
190 offsetof(struct ecore_eth_stats_bb
,
191 tx_1519_to_2047_byte_packets
)},
192 {"tx_2048_to_4095_byte_packets",
193 offsetof(struct ecore_eth_stats
, bb
) +
194 offsetof(struct ecore_eth_stats_bb
,
195 tx_2048_to_4095_byte_packets
)},
196 {"tx_4096_to_9216_byte_packets",
197 offsetof(struct ecore_eth_stats
, bb
) +
198 offsetof(struct ecore_eth_stats_bb
,
199 tx_4096_to_9216_byte_packets
)},
200 {"tx_9217_to_16383_byte_packets",
201 offsetof(struct ecore_eth_stats
, bb
) +
202 offsetof(struct ecore_eth_stats_bb
,
203 tx_9217_to_16383_byte_packets
)},
205 {"tx_lpi_entry_count",
206 offsetof(struct ecore_eth_stats
, bb
) +
207 offsetof(struct ecore_eth_stats_bb
, tx_lpi_entry_count
)},
208 {"tx_total_collisions",
209 offsetof(struct ecore_eth_stats
, bb
) +
210 offsetof(struct ecore_eth_stats_bb
, tx_total_collisions
)},
213 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings
[] = {
214 {"rx_1519_to_max_byte_packets",
215 offsetof(struct ecore_eth_stats
, ah
) +
216 offsetof(struct ecore_eth_stats_ah
,
217 rx_1519_to_max_byte_packets
)},
218 {"tx_1519_to_max_byte_packets",
219 offsetof(struct ecore_eth_stats
, ah
) +
220 offsetof(struct ecore_eth_stats_ah
,
221 tx_1519_to_max_byte_packets
)},
224 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings
[] = {
226 offsetof(struct qede_rx_queue
, rx_segs
)},
228 offsetof(struct qede_rx_queue
, rx_hw_errors
)},
229 {"rx_q_allocation_errors",
230 offsetof(struct qede_rx_queue
, rx_alloc_errors
)}
233 static void qede_interrupt_action(struct ecore_hwfn
*p_hwfn
)
235 ecore_int_sp_dpc((osal_int_ptr_t
)(p_hwfn
));
239 qede_interrupt_handler_intx(void *param
)
241 struct rte_eth_dev
*eth_dev
= (struct rte_eth_dev
*)param
;
242 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
243 struct ecore_dev
*edev
= &qdev
->edev
;
246 /* Check if our device actually raised an interrupt */
247 status
= ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev
));
249 qede_interrupt_action(ECORE_LEADING_HWFN(edev
));
251 if (rte_intr_enable(eth_dev
->intr_handle
))
252 DP_ERR(edev
, "rte_intr_enable failed\n");
257 qede_interrupt_handler(void *param
)
259 struct rte_eth_dev
*eth_dev
= (struct rte_eth_dev
*)param
;
260 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
261 struct ecore_dev
*edev
= &qdev
->edev
;
263 qede_interrupt_action(ECORE_LEADING_HWFN(edev
));
264 if (rte_intr_enable(eth_dev
->intr_handle
))
265 DP_ERR(edev
, "rte_intr_enable failed\n");
269 qede_alloc_etherdev(struct qede_dev
*qdev
, struct qed_dev_eth_info
*info
)
271 rte_memcpy(&qdev
->dev_info
, info
, sizeof(*info
));
275 static void qede_print_adapter_info(struct qede_dev
*qdev
)
277 struct ecore_dev
*edev
= &qdev
->edev
;
278 struct qed_dev_info
*info
= &qdev
->dev_info
.common
;
279 static char drv_ver
[QEDE_PMD_DRV_VER_STR_SIZE
];
280 static char ver_str
[QEDE_PMD_DRV_VER_STR_SIZE
];
282 DP_INFO(edev
, "*********************************\n");
283 DP_INFO(edev
, " DPDK version:%s\n", rte_version());
284 DP_INFO(edev
, " Chip details : %s %c%d\n",
285 ECORE_IS_BB(edev
) ? "BB" : "AH",
286 'A' + edev
->chip_rev
,
287 (int)edev
->chip_metal
);
288 snprintf(ver_str
, QEDE_PMD_DRV_VER_STR_SIZE
, "%d.%d.%d.%d",
289 info
->fw_major
, info
->fw_minor
, info
->fw_rev
, info
->fw_eng
);
290 snprintf(drv_ver
, QEDE_PMD_DRV_VER_STR_SIZE
, "%s_%s",
291 ver_str
, QEDE_PMD_VERSION
);
292 DP_INFO(edev
, " Driver version : %s\n", drv_ver
);
293 DP_INFO(edev
, " Firmware version : %s\n", ver_str
);
295 snprintf(ver_str
, MCP_DRV_VER_STR_SIZE
,
297 (info
->mfw_rev
>> 24) & 0xff,
298 (info
->mfw_rev
>> 16) & 0xff,
299 (info
->mfw_rev
>> 8) & 0xff, (info
->mfw_rev
) & 0xff);
300 DP_INFO(edev
, " Management Firmware version : %s\n", ver_str
);
301 DP_INFO(edev
, " Firmware file : %s\n", qede_fw_file
);
302 DP_INFO(edev
, "*********************************\n");
305 static void qede_reset_queue_stats(struct qede_dev
*qdev
, bool xstats
)
307 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
308 unsigned int i
= 0, j
= 0, qid
;
309 unsigned int rxq_stat_cntrs
, txq_stat_cntrs
;
310 struct qede_tx_queue
*txq
;
312 DP_VERBOSE(edev
, ECORE_MSG_DEBUG
, "Clearing queue stats\n");
314 rxq_stat_cntrs
= RTE_MIN(QEDE_RSS_COUNT(qdev
),
315 RTE_ETHDEV_QUEUE_STAT_CNTRS
);
316 txq_stat_cntrs
= RTE_MIN(QEDE_TSS_COUNT(qdev
),
317 RTE_ETHDEV_QUEUE_STAT_CNTRS
);
320 OSAL_MEMSET(((char *)(qdev
->fp_array
[qid
].rxq
)) +
321 offsetof(struct qede_rx_queue
, rcv_pkts
), 0,
323 OSAL_MEMSET(((char *)(qdev
->fp_array
[qid
].rxq
)) +
324 offsetof(struct qede_rx_queue
, rx_hw_errors
), 0,
326 OSAL_MEMSET(((char *)(qdev
->fp_array
[qid
].rxq
)) +
327 offsetof(struct qede_rx_queue
, rx_alloc_errors
), 0,
331 for (j
= 0; j
< RTE_DIM(qede_rxq_xstats_strings
); j
++)
332 OSAL_MEMSET((((char *)
333 (qdev
->fp_array
[qid
].rxq
)) +
334 qede_rxq_xstats_strings
[j
].offset
),
339 if (i
== rxq_stat_cntrs
)
346 txq
= qdev
->fp_array
[qid
].txq
;
348 OSAL_MEMSET((uint64_t *)(uintptr_t)
349 (((uint64_t)(uintptr_t)(txq
)) +
350 offsetof(struct qede_tx_queue
, xmit_pkts
)), 0,
354 if (i
== txq_stat_cntrs
)
360 qede_stop_vport(struct ecore_dev
*edev
)
362 struct ecore_hwfn
*p_hwfn
;
368 for_each_hwfn(edev
, i
) {
369 p_hwfn
= &edev
->hwfns
[i
];
370 rc
= ecore_sp_vport_stop(p_hwfn
, p_hwfn
->hw_info
.opaque_fid
,
372 if (rc
!= ECORE_SUCCESS
) {
373 DP_ERR(edev
, "Stop V-PORT failed rc = %d\n", rc
);
378 DP_INFO(edev
, "vport stopped\n");
384 qede_start_vport(struct qede_dev
*qdev
, uint16_t mtu
)
386 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
387 struct ecore_sp_vport_start_params params
;
388 struct ecore_hwfn
*p_hwfn
;
392 if (qdev
->vport_started
)
393 qede_stop_vport(edev
);
395 memset(¶ms
, 0, sizeof(params
));
398 /* @DPDK - Disable FW placement */
399 params
.zero_placement_offset
= 1;
400 for_each_hwfn(edev
, i
) {
401 p_hwfn
= &edev
->hwfns
[i
];
402 params
.concrete_fid
= p_hwfn
->hw_info
.concrete_fid
;
403 params
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
404 rc
= ecore_sp_vport_start(p_hwfn
, ¶ms
);
405 if (rc
!= ECORE_SUCCESS
) {
406 DP_ERR(edev
, "Start V-PORT failed %d\n", rc
);
410 ecore_reset_vport_stats(edev
);
411 qdev
->vport_started
= true;
412 DP_INFO(edev
, "VPORT started with MTU = %u\n", mtu
);
417 #define QEDE_NPAR_TX_SWITCHING "npar_tx_switching"
418 #define QEDE_VF_TX_SWITCHING "vf_tx_switching"
420 /* Activate or deactivate vport via vport-update */
421 int qede_activate_vport(struct rte_eth_dev
*eth_dev
, bool flg
)
423 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
424 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
425 struct ecore_sp_vport_update_params params
;
426 struct ecore_hwfn
*p_hwfn
;
430 memset(¶ms
, 0, sizeof(struct ecore_sp_vport_update_params
));
432 params
.update_vport_active_rx_flg
= 1;
433 params
.update_vport_active_tx_flg
= 1;
434 params
.vport_active_rx_flg
= flg
;
435 params
.vport_active_tx_flg
= flg
;
436 if (~qdev
->enable_tx_switching
& flg
) {
437 params
.update_tx_switching_flg
= 1;
438 params
.tx_switching_flg
= !flg
;
440 for_each_hwfn(edev
, i
) {
441 p_hwfn
= &edev
->hwfns
[i
];
442 params
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
443 rc
= ecore_sp_vport_update(p_hwfn
, ¶ms
,
444 ECORE_SPQ_MODE_EBLOCK
, NULL
);
445 if (rc
!= ECORE_SUCCESS
) {
446 DP_ERR(edev
, "Failed to update vport\n");
450 DP_INFO(edev
, "vport is %s\n", flg
? "activated" : "deactivated");
456 qede_update_sge_tpa_params(struct ecore_sge_tpa_params
*sge_tpa_params
,
457 uint16_t mtu
, bool enable
)
459 /* Enable LRO in split mode */
460 sge_tpa_params
->tpa_ipv4_en_flg
= enable
;
461 sge_tpa_params
->tpa_ipv6_en_flg
= enable
;
462 sge_tpa_params
->tpa_ipv4_tunn_en_flg
= enable
;
463 sge_tpa_params
->tpa_ipv6_tunn_en_flg
= enable
;
464 /* set if tpa enable changes */
465 sge_tpa_params
->update_tpa_en_flg
= 1;
466 /* set if tpa parameters should be handled */
467 sge_tpa_params
->update_tpa_param_flg
= enable
;
469 sge_tpa_params
->max_buffers_per_cqe
= 20;
470 /* Enable TPA in split mode. In this mode each TPA segment
471 * starts on the new BD, so there is one BD per segment.
473 sge_tpa_params
->tpa_pkt_split_flg
= 1;
474 sge_tpa_params
->tpa_hdr_data_split_flg
= 0;
475 sge_tpa_params
->tpa_gro_consistent_flg
= 0;
476 sge_tpa_params
->tpa_max_aggs_num
= ETH_TPA_MAX_AGGS_NUM
;
477 sge_tpa_params
->tpa_max_size
= 0x7FFF;
478 sge_tpa_params
->tpa_min_size_to_start
= mtu
/ 2;
479 sge_tpa_params
->tpa_min_size_to_cont
= mtu
/ 2;
482 /* Enable/disable LRO via vport-update */
483 int qede_enable_tpa(struct rte_eth_dev
*eth_dev
, bool flg
)
485 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
486 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
487 struct ecore_sp_vport_update_params params
;
488 struct ecore_sge_tpa_params tpa_params
;
489 struct ecore_hwfn
*p_hwfn
;
493 memset(¶ms
, 0, sizeof(struct ecore_sp_vport_update_params
));
494 memset(&tpa_params
, 0, sizeof(struct ecore_sge_tpa_params
));
495 qede_update_sge_tpa_params(&tpa_params
, qdev
->mtu
, flg
);
497 params
.sge_tpa_params
= &tpa_params
;
498 for_each_hwfn(edev
, i
) {
499 p_hwfn
= &edev
->hwfns
[i
];
500 params
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
501 rc
= ecore_sp_vport_update(p_hwfn
, ¶ms
,
502 ECORE_SPQ_MODE_EBLOCK
, NULL
);
503 if (rc
!= ECORE_SUCCESS
) {
504 DP_ERR(edev
, "Failed to update LRO\n");
508 qdev
->enable_lro
= flg
;
509 eth_dev
->data
->lro
= flg
;
511 DP_INFO(edev
, "LRO is %s\n", flg
? "enabled" : "disabled");
517 qed_configure_filter_rx_mode(struct rte_eth_dev
*eth_dev
,
518 enum qed_filter_rx_mode_type type
)
520 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
521 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
522 struct ecore_filter_accept_flags flags
;
524 memset(&flags
, 0, sizeof(flags
));
526 flags
.update_rx_mode_config
= 1;
527 flags
.update_tx_mode_config
= 1;
528 flags
.rx_accept_filter
= ECORE_ACCEPT_UCAST_MATCHED
|
529 ECORE_ACCEPT_MCAST_MATCHED
|
532 flags
.tx_accept_filter
= ECORE_ACCEPT_UCAST_MATCHED
|
533 ECORE_ACCEPT_MCAST_MATCHED
|
536 if (type
== QED_FILTER_RX_MODE_TYPE_PROMISC
) {
537 flags
.rx_accept_filter
|= ECORE_ACCEPT_UCAST_UNMATCHED
;
539 flags
.tx_accept_filter
|= ECORE_ACCEPT_UCAST_UNMATCHED
;
540 DP_INFO(edev
, "Enabling Tx unmatched flag for VF\n");
542 } else if (type
== QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC
) {
543 flags
.rx_accept_filter
|= ECORE_ACCEPT_MCAST_UNMATCHED
;
544 } else if (type
== (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC
|
545 QED_FILTER_RX_MODE_TYPE_PROMISC
)) {
546 flags
.rx_accept_filter
|= ECORE_ACCEPT_UCAST_UNMATCHED
|
547 ECORE_ACCEPT_MCAST_UNMATCHED
;
550 return ecore_filter_accept_cmd(edev
, 0, flags
, false, false,
551 ECORE_SPQ_MODE_CB
, NULL
);
555 qede_ucast_filter(struct rte_eth_dev
*eth_dev
, struct ecore_filter_ucast
*ucast
,
558 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
559 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
560 struct qede_ucast_entry
*tmp
= NULL
;
561 struct qede_ucast_entry
*u
;
562 struct ether_addr
*mac_addr
;
564 mac_addr
= (struct ether_addr
*)ucast
->mac
;
566 SLIST_FOREACH(tmp
, &qdev
->uc_list_head
, list
) {
567 if ((memcmp(mac_addr
, &tmp
->mac
,
568 ETHER_ADDR_LEN
) == 0) &&
569 ucast
->vni
== tmp
->vni
&&
570 ucast
->vlan
== tmp
->vlan
) {
571 DP_INFO(edev
, "Unicast MAC is already added"
572 " with vlan = %u, vni = %u\n",
573 ucast
->vlan
, ucast
->vni
);
577 u
= rte_malloc(NULL
, sizeof(struct qede_ucast_entry
),
578 RTE_CACHE_LINE_SIZE
);
580 DP_ERR(edev
, "Did not allocate memory for ucast\n");
583 ether_addr_copy(mac_addr
, &u
->mac
);
584 u
->vlan
= ucast
->vlan
;
586 SLIST_INSERT_HEAD(&qdev
->uc_list_head
, u
, list
);
589 SLIST_FOREACH(tmp
, &qdev
->uc_list_head
, list
) {
590 if ((memcmp(mac_addr
, &tmp
->mac
,
591 ETHER_ADDR_LEN
) == 0) &&
592 ucast
->vlan
== tmp
->vlan
&&
593 ucast
->vni
== tmp
->vni
)
597 DP_INFO(edev
, "Unicast MAC is not found\n");
600 SLIST_REMOVE(&qdev
->uc_list_head
, tmp
, qede_ucast_entry
, list
);
608 qede_add_mcast_filters(struct rte_eth_dev
*eth_dev
, struct ether_addr
*mc_addrs
,
609 uint32_t mc_addrs_num
)
611 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
612 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
613 struct ecore_filter_mcast mcast
;
614 struct qede_mcast_entry
*m
= NULL
;
618 for (i
= 0; i
< mc_addrs_num
; i
++) {
619 m
= rte_malloc(NULL
, sizeof(struct qede_mcast_entry
),
620 RTE_CACHE_LINE_SIZE
);
622 DP_ERR(edev
, "Did not allocate memory for mcast\n");
625 ether_addr_copy(&mc_addrs
[i
], &m
->mac
);
626 SLIST_INSERT_HEAD(&qdev
->mc_list_head
, m
, list
);
628 memset(&mcast
, 0, sizeof(mcast
));
629 mcast
.num_mc_addrs
= mc_addrs_num
;
630 mcast
.opcode
= ECORE_FILTER_ADD
;
631 for (i
= 0; i
< mc_addrs_num
; i
++)
632 ether_addr_copy(&mc_addrs
[i
], (struct ether_addr
*)
634 rc
= ecore_filter_mcast_cmd(edev
, &mcast
, ECORE_SPQ_MODE_CB
, NULL
);
635 if (rc
!= ECORE_SUCCESS
) {
636 DP_ERR(edev
, "Failed to add multicast filter (rc = %d\n)", rc
);
643 static int qede_del_mcast_filters(struct rte_eth_dev
*eth_dev
)
645 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
646 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
647 struct qede_mcast_entry
*tmp
= NULL
;
648 struct ecore_filter_mcast mcast
;
652 memset(&mcast
, 0, sizeof(mcast
));
653 mcast
.num_mc_addrs
= qdev
->num_mc_addr
;
654 mcast
.opcode
= ECORE_FILTER_REMOVE
;
656 SLIST_FOREACH(tmp
, &qdev
->mc_list_head
, list
) {
657 ether_addr_copy(&tmp
->mac
, (struct ether_addr
*)&mcast
.mac
[j
]);
660 rc
= ecore_filter_mcast_cmd(edev
, &mcast
, ECORE_SPQ_MODE_CB
, NULL
);
661 if (rc
!= ECORE_SUCCESS
) {
662 DP_ERR(edev
, "Failed to delete multicast filter\n");
666 while (!SLIST_EMPTY(&qdev
->mc_list_head
)) {
667 tmp
= SLIST_FIRST(&qdev
->mc_list_head
);
668 SLIST_REMOVE_HEAD(&qdev
->mc_list_head
, list
);
670 SLIST_INIT(&qdev
->mc_list_head
);
676 qede_mac_int_ops(struct rte_eth_dev
*eth_dev
, struct ecore_filter_ucast
*ucast
,
679 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
680 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
681 enum _ecore_status_t rc
= ECORE_INVAL
;
683 if (add
&& (qdev
->num_uc_addr
>= qdev
->dev_info
.num_mac_filters
)) {
684 DP_ERR(edev
, "Ucast filter table limit exceeded,"
685 " Please enable promisc mode\n");
689 rc
= qede_ucast_filter(eth_dev
, ucast
, add
);
691 rc
= ecore_filter_ucast_cmd(edev
, ucast
,
692 ECORE_SPQ_MODE_CB
, NULL
);
693 /* Indicate error only for add filter operation.
694 * Delete filter operations are not severe.
696 if ((rc
!= ECORE_SUCCESS
) && add
)
697 DP_ERR(edev
, "MAC filter failed, rc = %d, op = %d\n",
704 qede_mac_addr_add(struct rte_eth_dev
*eth_dev
, struct ether_addr
*mac_addr
,
705 __rte_unused
uint32_t index
, __rte_unused
uint32_t pool
)
707 struct ecore_filter_ucast ucast
;
710 if (!is_valid_assigned_ether_addr(mac_addr
))
713 qede_set_ucast_cmn_params(&ucast
);
714 ucast
.opcode
= ECORE_FILTER_ADD
;
715 ucast
.type
= ECORE_FILTER_MAC
;
716 ether_addr_copy(mac_addr
, (struct ether_addr
*)&ucast
.mac
);
717 re
= (int)qede_mac_int_ops(eth_dev
, &ucast
, 1);
722 qede_mac_addr_remove(struct rte_eth_dev
*eth_dev
, uint32_t index
)
724 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
725 struct ecore_dev
*edev
= &qdev
->edev
;
726 struct ecore_filter_ucast ucast
;
728 PMD_INIT_FUNC_TRACE(edev
);
730 if (index
>= qdev
->dev_info
.num_mac_filters
) {
731 DP_ERR(edev
, "Index %u is above MAC filter limit %u\n",
732 index
, qdev
->dev_info
.num_mac_filters
);
736 if (!is_valid_assigned_ether_addr(ð_dev
->data
->mac_addrs
[index
]))
739 qede_set_ucast_cmn_params(&ucast
);
740 ucast
.opcode
= ECORE_FILTER_REMOVE
;
741 ucast
.type
= ECORE_FILTER_MAC
;
743 /* Use the index maintained by rte */
744 ether_addr_copy(ð_dev
->data
->mac_addrs
[index
],
745 (struct ether_addr
*)&ucast
.mac
);
747 qede_mac_int_ops(eth_dev
, &ucast
, false);
751 qede_mac_addr_set(struct rte_eth_dev
*eth_dev
, struct ether_addr
*mac_addr
)
753 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
754 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
756 if (IS_VF(edev
) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev
),
757 mac_addr
->addr_bytes
)) {
758 DP_ERR(edev
, "Setting MAC address is not allowed\n");
762 qede_mac_addr_remove(eth_dev
, 0);
764 return qede_mac_addr_add(eth_dev
, mac_addr
, 0, 0);
767 void qede_config_accept_any_vlan(struct qede_dev
*qdev
, bool flg
)
769 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
770 struct ecore_sp_vport_update_params params
;
771 struct ecore_hwfn
*p_hwfn
;
775 memset(¶ms
, 0, sizeof(struct ecore_sp_vport_update_params
));
777 params
.update_accept_any_vlan_flg
= 1;
778 params
.accept_any_vlan
= flg
;
779 for_each_hwfn(edev
, i
) {
780 p_hwfn
= &edev
->hwfns
[i
];
781 params
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
782 rc
= ecore_sp_vport_update(p_hwfn
, ¶ms
,
783 ECORE_SPQ_MODE_EBLOCK
, NULL
);
784 if (rc
!= ECORE_SUCCESS
) {
785 DP_ERR(edev
, "Failed to configure accept-any-vlan\n");
790 DP_INFO(edev
, "%s accept-any-vlan\n", flg
? "enabled" : "disabled");
793 static int qede_vlan_stripping(struct rte_eth_dev
*eth_dev
, bool flg
)
795 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
796 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
797 struct ecore_sp_vport_update_params params
;
798 struct ecore_hwfn
*p_hwfn
;
802 memset(¶ms
, 0, sizeof(struct ecore_sp_vport_update_params
));
804 params
.update_inner_vlan_removal_flg
= 1;
805 params
.inner_vlan_removal_flg
= flg
;
806 for_each_hwfn(edev
, i
) {
807 p_hwfn
= &edev
->hwfns
[i
];
808 params
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
809 rc
= ecore_sp_vport_update(p_hwfn
, ¶ms
,
810 ECORE_SPQ_MODE_EBLOCK
, NULL
);
811 if (rc
!= ECORE_SUCCESS
) {
812 DP_ERR(edev
, "Failed to update vport\n");
817 DP_INFO(edev
, "VLAN stripping %s\n", flg
? "enabled" : "disabled");
821 static int qede_vlan_filter_set(struct rte_eth_dev
*eth_dev
,
822 uint16_t vlan_id
, int on
)
824 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
825 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
826 struct qed_dev_eth_info
*dev_info
= &qdev
->dev_info
;
827 struct qede_vlan_entry
*tmp
= NULL
;
828 struct qede_vlan_entry
*vlan
;
829 struct ecore_filter_ucast ucast
;
833 if (qdev
->configured_vlans
== dev_info
->num_vlan_filters
) {
834 DP_ERR(edev
, "Reached max VLAN filter limit"
835 " enabling accept_any_vlan\n");
836 qede_config_accept_any_vlan(qdev
, true);
840 SLIST_FOREACH(tmp
, &qdev
->vlan_list_head
, list
) {
841 if (tmp
->vid
== vlan_id
) {
842 DP_INFO(edev
, "VLAN %u already configured\n",
848 vlan
= rte_malloc(NULL
, sizeof(struct qede_vlan_entry
),
849 RTE_CACHE_LINE_SIZE
);
852 DP_ERR(edev
, "Did not allocate memory for VLAN\n");
856 qede_set_ucast_cmn_params(&ucast
);
857 ucast
.opcode
= ECORE_FILTER_ADD
;
858 ucast
.type
= ECORE_FILTER_VLAN
;
859 ucast
.vlan
= vlan_id
;
860 rc
= ecore_filter_ucast_cmd(edev
, &ucast
, ECORE_SPQ_MODE_CB
,
863 DP_ERR(edev
, "Failed to add VLAN %u rc %d\n", vlan_id
,
868 SLIST_INSERT_HEAD(&qdev
->vlan_list_head
, vlan
, list
);
869 qdev
->configured_vlans
++;
870 DP_INFO(edev
, "VLAN %u added, configured_vlans %u\n",
871 vlan_id
, qdev
->configured_vlans
);
874 SLIST_FOREACH(tmp
, &qdev
->vlan_list_head
, list
) {
875 if (tmp
->vid
== vlan_id
)
880 if (qdev
->configured_vlans
== 0) {
882 "No VLAN filters configured yet\n");
886 DP_ERR(edev
, "VLAN %u not configured\n", vlan_id
);
890 SLIST_REMOVE(&qdev
->vlan_list_head
, tmp
, qede_vlan_entry
, list
);
892 qede_set_ucast_cmn_params(&ucast
);
893 ucast
.opcode
= ECORE_FILTER_REMOVE
;
894 ucast
.type
= ECORE_FILTER_VLAN
;
895 ucast
.vlan
= vlan_id
;
896 rc
= ecore_filter_ucast_cmd(edev
, &ucast
, ECORE_SPQ_MODE_CB
,
899 DP_ERR(edev
, "Failed to delete VLAN %u rc %d\n",
902 qdev
->configured_vlans
--;
903 DP_INFO(edev
, "VLAN %u removed configured_vlans %u\n",
904 vlan_id
, qdev
->configured_vlans
);
911 static int qede_vlan_offload_set(struct rte_eth_dev
*eth_dev
, int mask
)
913 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
914 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
915 uint64_t rx_offloads
= eth_dev
->data
->dev_conf
.rxmode
.offloads
;
917 if (mask
& ETH_VLAN_STRIP_MASK
) {
918 if (rx_offloads
& DEV_RX_OFFLOAD_VLAN_STRIP
)
919 (void)qede_vlan_stripping(eth_dev
, 1);
921 (void)qede_vlan_stripping(eth_dev
, 0);
924 if (mask
& ETH_VLAN_FILTER_MASK
) {
925 /* VLAN filtering kicks in when a VLAN is added */
926 if (rx_offloads
& DEV_RX_OFFLOAD_VLAN_FILTER
) {
927 qede_vlan_filter_set(eth_dev
, 0, 1);
929 if (qdev
->configured_vlans
> 1) { /* Excluding VLAN0 */
931 " Please remove existing VLAN filters"
932 " before disabling VLAN filtering\n");
933 /* Signal app that VLAN filtering is still
936 eth_dev
->data
->dev_conf
.rxmode
.offloads
|=
937 DEV_RX_OFFLOAD_VLAN_FILTER
;
939 qede_vlan_filter_set(eth_dev
, 0, 0);
944 if (mask
& ETH_VLAN_EXTEND_MASK
)
945 DP_ERR(edev
, "Extend VLAN not supported\n");
947 qdev
->vlan_offload_mask
= mask
;
949 DP_INFO(edev
, "VLAN offload mask %d\n", mask
);
954 static void qede_prandom_bytes(uint32_t *buff
)
958 srand((unsigned int)time(NULL
));
959 for (i
= 0; i
< ECORE_RSS_KEY_SIZE
; i
++)
963 int qede_config_rss(struct rte_eth_dev
*eth_dev
)
965 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
966 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
967 uint32_t def_rss_key
[ECORE_RSS_KEY_SIZE
];
968 struct rte_eth_rss_reta_entry64 reta_conf
[2];
969 struct rte_eth_rss_conf rss_conf
;
970 uint32_t i
, id
, pos
, q
;
972 rss_conf
= eth_dev
->data
->dev_conf
.rx_adv_conf
.rss_conf
;
973 if (!rss_conf
.rss_key
) {
974 DP_INFO(edev
, "Applying driver default key\n");
975 rss_conf
.rss_key_len
= ECORE_RSS_KEY_SIZE
* sizeof(uint32_t);
976 qede_prandom_bytes(&def_rss_key
[0]);
977 rss_conf
.rss_key
= (uint8_t *)&def_rss_key
[0];
980 /* Configure RSS hash */
981 if (qede_rss_hash_update(eth_dev
, &rss_conf
))
984 /* Configure default RETA */
985 memset(reta_conf
, 0, sizeof(reta_conf
));
986 for (i
= 0; i
< ECORE_RSS_IND_TABLE_SIZE
; i
++)
987 reta_conf
[i
/ RTE_RETA_GROUP_SIZE
].mask
= UINT64_MAX
;
989 for (i
= 0; i
< ECORE_RSS_IND_TABLE_SIZE
; i
++) {
990 id
= i
/ RTE_RETA_GROUP_SIZE
;
991 pos
= i
% RTE_RETA_GROUP_SIZE
;
992 q
= i
% QEDE_RSS_COUNT(qdev
);
993 reta_conf
[id
].reta
[pos
] = q
;
995 if (qede_rss_reta_update(eth_dev
, &reta_conf
[0],
996 ECORE_RSS_IND_TABLE_SIZE
))
1002 static void qede_fastpath_start(struct ecore_dev
*edev
)
1004 struct ecore_hwfn
*p_hwfn
;
1007 for_each_hwfn(edev
, i
) {
1008 p_hwfn
= &edev
->hwfns
[i
];
1009 ecore_hw_start_fastpath(p_hwfn
);
1013 static int qede_dev_start(struct rte_eth_dev
*eth_dev
)
1015 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
1016 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
1017 struct rte_eth_rxmode
*rxmode
= ð_dev
->data
->dev_conf
.rxmode
;
1019 PMD_INIT_FUNC_TRACE(edev
);
1021 /* Update MTU only if it has changed */
1022 if (eth_dev
->data
->mtu
!= qdev
->mtu
) {
1023 if (qede_update_mtu(eth_dev
, qdev
->mtu
))
1027 /* Configure TPA parameters */
1028 if (rxmode
->offloads
& DEV_RX_OFFLOAD_TCP_LRO
) {
1029 if (qede_enable_tpa(eth_dev
, true))
1031 /* Enable scatter mode for LRO */
1032 if (!eth_dev
->data
->scattered_rx
)
1033 rxmode
->offloads
|= DEV_RX_OFFLOAD_SCATTER
;
1037 if (qede_start_queues(eth_dev
))
1041 qede_reset_queue_stats(qdev
, true);
1043 /* Newer SR-IOV PF driver expects RX/TX queues to be started before
1044 * enabling RSS. Hence RSS configuration is deferred upto this point.
1045 * Also, we would like to retain similar behavior in PF case, so we
1046 * don't do PF/VF specific check here.
1048 if (eth_dev
->data
->dev_conf
.rxmode
.mq_mode
== ETH_MQ_RX_RSS
)
1049 if (qede_config_rss(eth_dev
))
1053 if (qede_activate_vport(eth_dev
, true))
1056 /* Update link status */
1057 qede_link_update(eth_dev
, 0);
1059 /* Start/resume traffic */
1060 qede_fastpath_start(edev
);
1062 DP_INFO(edev
, "Device started\n");
1066 DP_ERR(edev
, "Device start fails\n");
1067 return -1; /* common error code is < 0 */
1070 static void qede_dev_stop(struct rte_eth_dev
*eth_dev
)
1072 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
1073 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
1075 PMD_INIT_FUNC_TRACE(edev
);
1078 if (qede_activate_vport(eth_dev
, false))
1081 if (qdev
->enable_lro
)
1082 qede_enable_tpa(eth_dev
, false);
1085 qede_stop_queues(eth_dev
);
1087 /* Disable traffic */
1088 ecore_hw_stop_fastpath(edev
); /* TBD - loop */
1090 DP_INFO(edev
, "Device is stopped\n");
1093 static const char * const valid_args
[] = {
1094 QEDE_NPAR_TX_SWITCHING
,
1095 QEDE_VF_TX_SWITCHING
,
1099 static int qede_args_check(const char *key
, const char *val
, void *opaque
)
1103 struct rte_eth_dev
*eth_dev
= opaque
;
1104 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
1105 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
1108 tmp
= strtoul(val
, NULL
, 0);
1110 DP_INFO(edev
, "%s: \"%s\" is not a valid integer", key
, val
);
1114 if ((strcmp(QEDE_NPAR_TX_SWITCHING
, key
) == 0) ||
1115 ((strcmp(QEDE_VF_TX_SWITCHING
, key
) == 0) && IS_VF(edev
))) {
1116 qdev
->enable_tx_switching
= !!tmp
;
1117 DP_INFO(edev
, "Disabling %s tx-switching\n",
1118 strcmp(QEDE_NPAR_TX_SWITCHING
, key
) ?
1125 static int qede_args(struct rte_eth_dev
*eth_dev
)
1127 struct rte_pci_device
*pci_dev
= RTE_DEV_TO_PCI(eth_dev
->device
);
1128 struct rte_kvargs
*kvlist
;
1129 struct rte_devargs
*devargs
;
1133 devargs
= pci_dev
->device
.devargs
;
1135 return 0; /* return success */
1137 kvlist
= rte_kvargs_parse(devargs
->args
, valid_args
);
1141 /* Process parameters. */
1142 for (i
= 0; (valid_args
[i
] != NULL
); ++i
) {
1143 if (rte_kvargs_count(kvlist
, valid_args
[i
])) {
1144 ret
= rte_kvargs_process(kvlist
, valid_args
[i
],
1145 qede_args_check
, eth_dev
);
1146 if (ret
!= ECORE_SUCCESS
) {
1147 rte_kvargs_free(kvlist
);
1152 rte_kvargs_free(kvlist
);
1157 static int qede_dev_configure(struct rte_eth_dev
*eth_dev
)
1159 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
1160 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
1161 struct rte_eth_rxmode
*rxmode
= ð_dev
->data
->dev_conf
.rxmode
;
1164 PMD_INIT_FUNC_TRACE(edev
);
1166 /* Check requirements for 100G mode */
1167 if (ECORE_IS_CMT(edev
)) {
1168 if (eth_dev
->data
->nb_rx_queues
< 2 ||
1169 eth_dev
->data
->nb_tx_queues
< 2) {
1170 DP_ERR(edev
, "100G mode needs min. 2 RX/TX queues\n");
1174 if ((eth_dev
->data
->nb_rx_queues
% 2 != 0) ||
1175 (eth_dev
->data
->nb_tx_queues
% 2 != 0)) {
1177 "100G mode needs even no. of RX/TX queues\n");
1182 /* We need to have min 1 RX queue.There is no min check in
1183 * rte_eth_dev_configure(), so we are checking it here.
1185 if (eth_dev
->data
->nb_rx_queues
== 0) {
1186 DP_ERR(edev
, "Minimum one RX queue is required\n");
1190 /* Enable Tx switching by default */
1191 qdev
->enable_tx_switching
= 1;
1193 /* Parse devargs and fix up rxmode */
1194 if (qede_args(eth_dev
))
1195 DP_NOTICE(edev
, false,
1196 "Invalid devargs supplied, requested change will not take effect\n");
1198 if (!(rxmode
->mq_mode
== ETH_MQ_RX_NONE
||
1199 rxmode
->mq_mode
== ETH_MQ_RX_RSS
)) {
1200 DP_ERR(edev
, "Unsupported multi-queue mode\n");
1203 /* Flow director mode check */
1204 if (qede_check_fdir_support(eth_dev
))
1207 qede_dealloc_fp_resc(eth_dev
);
1208 qdev
->num_tx_queues
= eth_dev
->data
->nb_tx_queues
;
1209 qdev
->num_rx_queues
= eth_dev
->data
->nb_rx_queues
;
1210 if (qede_alloc_fp_resc(qdev
))
1213 /* If jumbo enabled adjust MTU */
1214 if (rxmode
->offloads
& DEV_RX_OFFLOAD_JUMBO_FRAME
)
1215 eth_dev
->data
->mtu
=
1216 eth_dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
-
1217 ETHER_HDR_LEN
- QEDE_ETH_OVERHEAD
;
1219 if (rxmode
->offloads
& DEV_RX_OFFLOAD_SCATTER
)
1220 eth_dev
->data
->scattered_rx
= 1;
1222 if (qede_start_vport(qdev
, eth_dev
->data
->mtu
))
1225 qdev
->mtu
= eth_dev
->data
->mtu
;
1227 /* Enable VLAN offloads by default */
1228 ret
= qede_vlan_offload_set(eth_dev
, ETH_VLAN_STRIP_MASK
|
1229 ETH_VLAN_FILTER_MASK
);
1233 DP_INFO(edev
, "Device configured with RSS=%d TSS=%d\n",
1234 QEDE_RSS_COUNT(qdev
), QEDE_TSS_COUNT(qdev
));
1239 /* Info about HW descriptor ring limitations */
1240 static const struct rte_eth_desc_lim qede_rx_desc_lim
= {
1241 .nb_max
= 0x8000, /* 32K */
1243 .nb_align
= 128 /* lowest common multiple */
1246 static const struct rte_eth_desc_lim qede_tx_desc_lim
= {
1247 .nb_max
= 0x8000, /* 32K */
1250 .nb_seg_max
= ETH_TX_MAX_BDS_PER_LSO_PACKET
,
1251 .nb_mtu_seg_max
= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
1255 qede_dev_info_get(struct rte_eth_dev
*eth_dev
,
1256 struct rte_eth_dev_info
*dev_info
)
1258 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
1259 struct ecore_dev
*edev
= &qdev
->edev
;
1260 struct qed_link_output link
;
1261 uint32_t speed_cap
= 0;
1263 PMD_INIT_FUNC_TRACE(edev
);
1265 dev_info
->min_rx_bufsize
= (uint32_t)QEDE_MIN_RX_BUFF_SIZE
;
1266 dev_info
->max_rx_pktlen
= (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN
;
1267 dev_info
->rx_desc_lim
= qede_rx_desc_lim
;
1268 dev_info
->tx_desc_lim
= qede_tx_desc_lim
;
1271 dev_info
->max_rx_queues
= (uint16_t)RTE_MIN(
1272 QEDE_MAX_RSS_CNT(qdev
), QEDE_PF_NUM_CONNS
/ 2);
1274 dev_info
->max_rx_queues
= (uint16_t)RTE_MIN(
1275 QEDE_MAX_RSS_CNT(qdev
), ECORE_MAX_VF_CHAINS_PER_PF
);
1276 dev_info
->max_tx_queues
= dev_info
->max_rx_queues
;
1278 dev_info
->max_mac_addrs
= qdev
->dev_info
.num_mac_filters
;
1279 dev_info
->max_vfs
= 0;
1280 dev_info
->reta_size
= ECORE_RSS_IND_TABLE_SIZE
;
1281 dev_info
->hash_key_size
= ECORE_RSS_KEY_SIZE
* sizeof(uint32_t);
1282 dev_info
->flow_type_rss_offloads
= (uint64_t)QEDE_RSS_OFFLOAD_ALL
;
1283 dev_info
->rx_offload_capa
= (DEV_RX_OFFLOAD_IPV4_CKSUM
|
1284 DEV_RX_OFFLOAD_UDP_CKSUM
|
1285 DEV_RX_OFFLOAD_TCP_CKSUM
|
1286 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
|
1287 DEV_RX_OFFLOAD_TCP_LRO
|
1288 DEV_RX_OFFLOAD_KEEP_CRC
|
1289 DEV_RX_OFFLOAD_SCATTER
|
1290 DEV_RX_OFFLOAD_JUMBO_FRAME
|
1291 DEV_RX_OFFLOAD_VLAN_FILTER
|
1292 DEV_RX_OFFLOAD_VLAN_STRIP
);
1293 dev_info
->rx_queue_offload_capa
= 0;
1295 /* TX offloads are on a per-packet basis, so it is applicable
1296 * to both at port and queue levels.
1298 dev_info
->tx_offload_capa
= (DEV_TX_OFFLOAD_VLAN_INSERT
|
1299 DEV_TX_OFFLOAD_IPV4_CKSUM
|
1300 DEV_TX_OFFLOAD_UDP_CKSUM
|
1301 DEV_TX_OFFLOAD_TCP_CKSUM
|
1302 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM
|
1303 DEV_TX_OFFLOAD_MULTI_SEGS
|
1304 DEV_TX_OFFLOAD_TCP_TSO
|
1305 DEV_TX_OFFLOAD_VXLAN_TNL_TSO
|
1306 DEV_TX_OFFLOAD_GENEVE_TNL_TSO
);
1307 dev_info
->tx_queue_offload_capa
= dev_info
->tx_offload_capa
;
1309 dev_info
->default_txconf
= (struct rte_eth_txconf
) {
1310 .offloads
= DEV_TX_OFFLOAD_MULTI_SEGS
,
1313 dev_info
->default_rxconf
= (struct rte_eth_rxconf
) {
1314 /* Packets are always dropped if no descriptors are available */
1319 memset(&link
, 0, sizeof(struct qed_link_output
));
1320 qdev
->ops
->common
->get_link(edev
, &link
);
1321 if (link
.adv_speed
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
)
1322 speed_cap
|= ETH_LINK_SPEED_1G
;
1323 if (link
.adv_speed
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
)
1324 speed_cap
|= ETH_LINK_SPEED_10G
;
1325 if (link
.adv_speed
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
)
1326 speed_cap
|= ETH_LINK_SPEED_25G
;
1327 if (link
.adv_speed
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
)
1328 speed_cap
|= ETH_LINK_SPEED_40G
;
1329 if (link
.adv_speed
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
)
1330 speed_cap
|= ETH_LINK_SPEED_50G
;
1331 if (link
.adv_speed
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
)
1332 speed_cap
|= ETH_LINK_SPEED_100G
;
1333 dev_info
->speed_capa
= speed_cap
;
1336 /* return 0 means link status changed, -1 means not changed */
1338 qede_link_update(struct rte_eth_dev
*eth_dev
, __rte_unused
int wait_to_complete
)
1340 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
1341 struct ecore_dev
*edev
= &qdev
->edev
;
1342 struct qed_link_output q_link
;
1343 struct rte_eth_link link
;
1344 uint16_t link_duplex
;
1346 memset(&q_link
, 0, sizeof(q_link
));
1347 memset(&link
, 0, sizeof(link
));
1349 qdev
->ops
->common
->get_link(edev
, &q_link
);
1352 link
.link_speed
= q_link
.speed
;
1355 switch (q_link
.duplex
) {
1356 case QEDE_DUPLEX_HALF
:
1357 link_duplex
= ETH_LINK_HALF_DUPLEX
;
1359 case QEDE_DUPLEX_FULL
:
1360 link_duplex
= ETH_LINK_FULL_DUPLEX
;
1362 case QEDE_DUPLEX_UNKNOWN
:
1366 link
.link_duplex
= link_duplex
;
1369 link
.link_status
= q_link
.link_up
? ETH_LINK_UP
: ETH_LINK_DOWN
;
1372 link
.link_autoneg
= (q_link
.supported_caps
& QEDE_SUPPORTED_AUTONEG
) ?
1373 ETH_LINK_AUTONEG
: ETH_LINK_FIXED
;
1375 DP_INFO(edev
, "Link - Speed %u Mode %u AN %u Status %u\n",
1376 link
.link_speed
, link
.link_duplex
,
1377 link
.link_autoneg
, link
.link_status
);
1379 return rte_eth_linkstatus_set(eth_dev
, &link
);
1382 static void qede_promiscuous_enable(struct rte_eth_dev
*eth_dev
)
1384 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
1385 struct ecore_dev
*edev
= &qdev
->edev
;
1386 enum qed_filter_rx_mode_type type
= QED_FILTER_RX_MODE_TYPE_PROMISC
;
1388 PMD_INIT_FUNC_TRACE(edev
);
1390 if (rte_eth_allmulticast_get(eth_dev
->data
->port_id
) == 1)
1391 type
|= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC
;
1393 qed_configure_filter_rx_mode(eth_dev
, type
);
1396 static void qede_promiscuous_disable(struct rte_eth_dev
*eth_dev
)
1398 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
1399 struct ecore_dev
*edev
= &qdev
->edev
;
1401 PMD_INIT_FUNC_TRACE(edev
);
1403 if (rte_eth_allmulticast_get(eth_dev
->data
->port_id
) == 1)
1404 qed_configure_filter_rx_mode(eth_dev
,
1405 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC
);
1407 qed_configure_filter_rx_mode(eth_dev
,
1408 QED_FILTER_RX_MODE_TYPE_REGULAR
);
1411 static void qede_poll_sp_sb_cb(void *param
)
1413 struct rte_eth_dev
*eth_dev
= (struct rte_eth_dev
*)param
;
1414 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
1415 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
1418 qede_interrupt_action(ECORE_LEADING_HWFN(edev
));
1419 qede_interrupt_action(&edev
->hwfns
[1]);
1421 rc
= rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD
,
1425 DP_ERR(edev
, "Unable to start periodic"
1426 " timer rc %d\n", rc
);
1427 assert(false && "Unable to start periodic timer");
1431 static void qede_dev_close(struct rte_eth_dev
*eth_dev
)
1433 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
1434 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
1435 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
1437 PMD_INIT_FUNC_TRACE(edev
);
1439 /* dev_stop() shall cleanup fp resources in hw but without releasing
1440 * dma memories and sw structures so that dev_start() can be called
1441 * by the app without reconfiguration. However, in dev_close() we
1442 * can release all the resources and device can be brought up newly
1444 if (eth_dev
->data
->dev_started
)
1445 qede_dev_stop(eth_dev
);
1447 qede_stop_vport(edev
);
1448 qdev
->vport_started
= false;
1449 qede_fdir_dealloc_resc(eth_dev
);
1450 qede_dealloc_fp_resc(eth_dev
);
1452 eth_dev
->data
->nb_rx_queues
= 0;
1453 eth_dev
->data
->nb_tx_queues
= 0;
1455 /* Bring the link down */
1456 qede_dev_set_link_state(eth_dev
, false);
1457 qdev
->ops
->common
->slowpath_stop(edev
);
1458 qdev
->ops
->common
->remove(edev
);
1459 rte_intr_disable(&pci_dev
->intr_handle
);
1461 switch (pci_dev
->intr_handle
.type
) {
1462 case RTE_INTR_HANDLE_UIO_INTX
:
1463 case RTE_INTR_HANDLE_VFIO_LEGACY
:
1464 rte_intr_callback_unregister(&pci_dev
->intr_handle
,
1465 qede_interrupt_handler_intx
,
1469 rte_intr_callback_unregister(&pci_dev
->intr_handle
,
1470 qede_interrupt_handler
,
1474 if (ECORE_IS_CMT(edev
))
1475 rte_eal_alarm_cancel(qede_poll_sp_sb_cb
, (void *)eth_dev
);
1479 qede_get_stats(struct rte_eth_dev
*eth_dev
, struct rte_eth_stats
*eth_stats
)
1481 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
1482 struct ecore_dev
*edev
= &qdev
->edev
;
1483 struct ecore_eth_stats stats
;
1484 unsigned int i
= 0, j
= 0, qid
;
1485 unsigned int rxq_stat_cntrs
, txq_stat_cntrs
;
1486 struct qede_tx_queue
*txq
;
1488 ecore_get_vport_stats(edev
, &stats
);
1491 eth_stats
->ipackets
= stats
.common
.rx_ucast_pkts
+
1492 stats
.common
.rx_mcast_pkts
+ stats
.common
.rx_bcast_pkts
;
1494 eth_stats
->ibytes
= stats
.common
.rx_ucast_bytes
+
1495 stats
.common
.rx_mcast_bytes
+ stats
.common
.rx_bcast_bytes
;
1497 eth_stats
->ierrors
= stats
.common
.rx_crc_errors
+
1498 stats
.common
.rx_align_errors
+
1499 stats
.common
.rx_carrier_errors
+
1500 stats
.common
.rx_oversize_packets
+
1501 stats
.common
.rx_jabbers
+ stats
.common
.rx_undersize_packets
;
1503 eth_stats
->rx_nombuf
= stats
.common
.no_buff_discards
;
1505 eth_stats
->imissed
= stats
.common
.mftag_filter_discards
+
1506 stats
.common
.mac_filter_discards
+
1507 stats
.common
.no_buff_discards
+
1508 stats
.common
.brb_truncates
+ stats
.common
.brb_discards
;
1511 eth_stats
->opackets
= stats
.common
.tx_ucast_pkts
+
1512 stats
.common
.tx_mcast_pkts
+ stats
.common
.tx_bcast_pkts
;
1514 eth_stats
->obytes
= stats
.common
.tx_ucast_bytes
+
1515 stats
.common
.tx_mcast_bytes
+ stats
.common
.tx_bcast_bytes
;
1517 eth_stats
->oerrors
= stats
.common
.tx_err_drop_pkts
;
1520 rxq_stat_cntrs
= RTE_MIN(QEDE_RSS_COUNT(qdev
),
1521 RTE_ETHDEV_QUEUE_STAT_CNTRS
);
1522 txq_stat_cntrs
= RTE_MIN(QEDE_TSS_COUNT(qdev
),
1523 RTE_ETHDEV_QUEUE_STAT_CNTRS
);
1524 if ((rxq_stat_cntrs
!= (unsigned int)QEDE_RSS_COUNT(qdev
)) ||
1525 (txq_stat_cntrs
!= (unsigned int)QEDE_TSS_COUNT(qdev
)))
1526 DP_VERBOSE(edev
, ECORE_MSG_DEBUG
,
1527 "Not all the queue stats will be displayed. Set"
1528 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
1529 " appropriately and retry.\n");
1532 eth_stats
->q_ipackets
[i
] =
1534 ((char *)(qdev
->fp_array
[qid
].rxq
)) +
1535 offsetof(struct qede_rx_queue
,
1537 eth_stats
->q_errors
[i
] =
1539 ((char *)(qdev
->fp_array
[qid
].rxq
)) +
1540 offsetof(struct qede_rx_queue
,
1543 ((char *)(qdev
->fp_array
[qid
].rxq
)) +
1544 offsetof(struct qede_rx_queue
,
1547 if (i
== rxq_stat_cntrs
)
1552 txq
= qdev
->fp_array
[qid
].txq
;
1553 eth_stats
->q_opackets
[j
] =
1554 *((uint64_t *)(uintptr_t)
1555 (((uint64_t)(uintptr_t)(txq
)) +
1556 offsetof(struct qede_tx_queue
,
1559 if (j
== txq_stat_cntrs
)
1567 qede_get_xstats_count(struct qede_dev
*qdev
) {
1568 if (ECORE_IS_BB(&qdev
->edev
))
1569 return RTE_DIM(qede_xstats_strings
) +
1570 RTE_DIM(qede_bb_xstats_strings
) +
1571 (RTE_DIM(qede_rxq_xstats_strings
) *
1572 RTE_MIN(QEDE_RSS_COUNT(qdev
),
1573 RTE_ETHDEV_QUEUE_STAT_CNTRS
));
1575 return RTE_DIM(qede_xstats_strings
) +
1576 RTE_DIM(qede_ah_xstats_strings
) +
1577 (RTE_DIM(qede_rxq_xstats_strings
) *
1578 RTE_MIN(QEDE_RSS_COUNT(qdev
),
1579 RTE_ETHDEV_QUEUE_STAT_CNTRS
));
1583 qede_get_xstats_names(struct rte_eth_dev
*dev
,
1584 struct rte_eth_xstat_name
*xstats_names
,
1585 __rte_unused
unsigned int limit
)
1587 struct qede_dev
*qdev
= dev
->data
->dev_private
;
1588 struct ecore_dev
*edev
= &qdev
->edev
;
1589 const unsigned int stat_cnt
= qede_get_xstats_count(qdev
);
1590 unsigned int i
, qid
, stat_idx
= 0;
1591 unsigned int rxq_stat_cntrs
;
1593 if (xstats_names
!= NULL
) {
1594 for (i
= 0; i
< RTE_DIM(qede_xstats_strings
); i
++) {
1595 strlcpy(xstats_names
[stat_idx
].name
,
1596 qede_xstats_strings
[i
].name
,
1597 sizeof(xstats_names
[stat_idx
].name
));
1601 if (ECORE_IS_BB(edev
)) {
1602 for (i
= 0; i
< RTE_DIM(qede_bb_xstats_strings
); i
++) {
1603 strlcpy(xstats_names
[stat_idx
].name
,
1604 qede_bb_xstats_strings
[i
].name
,
1605 sizeof(xstats_names
[stat_idx
].name
));
1609 for (i
= 0; i
< RTE_DIM(qede_ah_xstats_strings
); i
++) {
1610 strlcpy(xstats_names
[stat_idx
].name
,
1611 qede_ah_xstats_strings
[i
].name
,
1612 sizeof(xstats_names
[stat_idx
].name
));
1617 rxq_stat_cntrs
= RTE_MIN(QEDE_RSS_COUNT(qdev
),
1618 RTE_ETHDEV_QUEUE_STAT_CNTRS
);
1619 for (qid
= 0; qid
< rxq_stat_cntrs
; qid
++) {
1620 for (i
= 0; i
< RTE_DIM(qede_rxq_xstats_strings
); i
++) {
1621 snprintf(xstats_names
[stat_idx
].name
,
1622 sizeof(xstats_names
[stat_idx
].name
),
1624 qede_rxq_xstats_strings
[i
].name
, qid
,
1625 qede_rxq_xstats_strings
[i
].name
+ 4);
1635 qede_get_xstats(struct rte_eth_dev
*dev
, struct rte_eth_xstat
*xstats
,
1638 struct qede_dev
*qdev
= dev
->data
->dev_private
;
1639 struct ecore_dev
*edev
= &qdev
->edev
;
1640 struct ecore_eth_stats stats
;
1641 const unsigned int num
= qede_get_xstats_count(qdev
);
1642 unsigned int i
, qid
, stat_idx
= 0;
1643 unsigned int rxq_stat_cntrs
;
1648 ecore_get_vport_stats(edev
, &stats
);
1650 for (i
= 0; i
< RTE_DIM(qede_xstats_strings
); i
++) {
1651 xstats
[stat_idx
].value
= *(uint64_t *)(((char *)&stats
) +
1652 qede_xstats_strings
[i
].offset
);
1653 xstats
[stat_idx
].id
= stat_idx
;
1657 if (ECORE_IS_BB(edev
)) {
1658 for (i
= 0; i
< RTE_DIM(qede_bb_xstats_strings
); i
++) {
1659 xstats
[stat_idx
].value
=
1660 *(uint64_t *)(((char *)&stats
) +
1661 qede_bb_xstats_strings
[i
].offset
);
1662 xstats
[stat_idx
].id
= stat_idx
;
1666 for (i
= 0; i
< RTE_DIM(qede_ah_xstats_strings
); i
++) {
1667 xstats
[stat_idx
].value
=
1668 *(uint64_t *)(((char *)&stats
) +
1669 qede_ah_xstats_strings
[i
].offset
);
1670 xstats
[stat_idx
].id
= stat_idx
;
1675 rxq_stat_cntrs
= RTE_MIN(QEDE_RSS_COUNT(qdev
),
1676 RTE_ETHDEV_QUEUE_STAT_CNTRS
);
1677 for (qid
= 0; qid
< rxq_stat_cntrs
; qid
++) {
1679 for (i
= 0; i
< RTE_DIM(qede_rxq_xstats_strings
); i
++) {
1680 xstats
[stat_idx
].value
= *(uint64_t *)(
1681 ((char *)(qdev
->fp_array
[qid
].rxq
)) +
1682 qede_rxq_xstats_strings
[i
].offset
);
1683 xstats
[stat_idx
].id
= stat_idx
;
1693 qede_reset_xstats(struct rte_eth_dev
*dev
)
1695 struct qede_dev
*qdev
= dev
->data
->dev_private
;
1696 struct ecore_dev
*edev
= &qdev
->edev
;
1698 ecore_reset_vport_stats(edev
);
1699 qede_reset_queue_stats(qdev
, true);
1702 int qede_dev_set_link_state(struct rte_eth_dev
*eth_dev
, bool link_up
)
1704 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
1705 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
1706 struct qed_link_params link_params
;
1709 DP_INFO(edev
, "setting link state %d\n", link_up
);
1710 memset(&link_params
, 0, sizeof(link_params
));
1711 link_params
.link_up
= link_up
;
1712 rc
= qdev
->ops
->common
->set_link(edev
, &link_params
);
1713 if (rc
!= ECORE_SUCCESS
)
1714 DP_ERR(edev
, "Unable to set link state %d\n", link_up
);
1719 static int qede_dev_set_link_up(struct rte_eth_dev
*eth_dev
)
1721 return qede_dev_set_link_state(eth_dev
, true);
1724 static int qede_dev_set_link_down(struct rte_eth_dev
*eth_dev
)
1726 return qede_dev_set_link_state(eth_dev
, false);
1729 static void qede_reset_stats(struct rte_eth_dev
*eth_dev
)
1731 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
1732 struct ecore_dev
*edev
= &qdev
->edev
;
1734 ecore_reset_vport_stats(edev
);
1735 qede_reset_queue_stats(qdev
, false);
1738 static void qede_allmulticast_enable(struct rte_eth_dev
*eth_dev
)
1740 enum qed_filter_rx_mode_type type
=
1741 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC
;
1743 if (rte_eth_promiscuous_get(eth_dev
->data
->port_id
) == 1)
1744 type
|= QED_FILTER_RX_MODE_TYPE_PROMISC
;
1746 qed_configure_filter_rx_mode(eth_dev
, type
);
1749 static void qede_allmulticast_disable(struct rte_eth_dev
*eth_dev
)
1751 if (rte_eth_promiscuous_get(eth_dev
->data
->port_id
) == 1)
1752 qed_configure_filter_rx_mode(eth_dev
,
1753 QED_FILTER_RX_MODE_TYPE_PROMISC
);
1755 qed_configure_filter_rx_mode(eth_dev
,
1756 QED_FILTER_RX_MODE_TYPE_REGULAR
);
1760 qede_set_mc_addr_list(struct rte_eth_dev
*eth_dev
, struct ether_addr
*mc_addrs
,
1761 uint32_t mc_addrs_num
)
1763 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
1764 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
1767 if (mc_addrs_num
> ECORE_MAX_MC_ADDRS
) {
1768 DP_ERR(edev
, "Reached max multicast filters limit,"
1769 "Please enable multicast promisc mode\n");
1773 for (i
= 0; i
< mc_addrs_num
; i
++) {
1774 if (!is_multicast_ether_addr(&mc_addrs
[i
])) {
1775 DP_ERR(edev
, "Not a valid multicast MAC\n");
1780 /* Flush all existing entries */
1781 if (qede_del_mcast_filters(eth_dev
))
1784 /* Set new mcast list */
1785 return qede_add_mcast_filters(eth_dev
, mc_addrs
, mc_addrs_num
);
1788 /* Update MTU via vport-update without doing port restart.
1789 * The vport must be deactivated before calling this API.
1791 int qede_update_mtu(struct rte_eth_dev
*eth_dev
, uint16_t mtu
)
1793 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
1794 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
1795 struct ecore_hwfn
*p_hwfn
;
1800 struct ecore_sp_vport_update_params params
;
1802 memset(¶ms
, 0, sizeof(struct ecore_sp_vport_update_params
));
1803 params
.vport_id
= 0;
1805 params
.vport_id
= 0;
1806 for_each_hwfn(edev
, i
) {
1807 p_hwfn
= &edev
->hwfns
[i
];
1808 params
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1809 rc
= ecore_sp_vport_update(p_hwfn
, ¶ms
,
1810 ECORE_SPQ_MODE_EBLOCK
, NULL
);
1811 if (rc
!= ECORE_SUCCESS
)
1815 for_each_hwfn(edev
, i
) {
1816 p_hwfn
= &edev
->hwfns
[i
];
1817 rc
= ecore_vf_pf_update_mtu(p_hwfn
, mtu
);
1818 if (rc
== ECORE_INVAL
) {
1819 DP_INFO(edev
, "VF MTU Update TLV not supported\n");
1820 /* Recreate vport */
1821 rc
= qede_start_vport(qdev
, mtu
);
1822 if (rc
!= ECORE_SUCCESS
)
1825 /* Restore config lost due to vport stop */
1826 if (eth_dev
->data
->promiscuous
)
1827 qede_promiscuous_enable(eth_dev
);
1829 qede_promiscuous_disable(eth_dev
);
1831 if (eth_dev
->data
->all_multicast
)
1832 qede_allmulticast_enable(eth_dev
);
1834 qede_allmulticast_disable(eth_dev
);
1836 qede_vlan_offload_set(eth_dev
,
1837 qdev
->vlan_offload_mask
);
1838 } else if (rc
!= ECORE_SUCCESS
) {
1843 DP_INFO(edev
, "%s MTU updated to %u\n", IS_PF(edev
) ? "PF" : "VF", mtu
);
1848 DP_ERR(edev
, "Failed to update MTU\n");
1852 static int qede_flow_ctrl_set(struct rte_eth_dev
*eth_dev
,
1853 struct rte_eth_fc_conf
*fc_conf
)
1855 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
1856 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
1857 struct qed_link_output current_link
;
1858 struct qed_link_params params
;
1860 memset(¤t_link
, 0, sizeof(current_link
));
1861 qdev
->ops
->common
->get_link(edev
, ¤t_link
);
1863 memset(¶ms
, 0, sizeof(params
));
1864 params
.override_flags
|= QED_LINK_OVERRIDE_PAUSE_CONFIG
;
1865 if (fc_conf
->autoneg
) {
1866 if (!(current_link
.supported_caps
& QEDE_SUPPORTED_AUTONEG
)) {
1867 DP_ERR(edev
, "Autoneg not supported\n");
1870 params
.pause_config
|= QED_LINK_PAUSE_AUTONEG_ENABLE
;
1873 /* Pause is assumed to be supported (SUPPORTED_Pause) */
1874 if (fc_conf
->mode
== RTE_FC_FULL
)
1875 params
.pause_config
|= (QED_LINK_PAUSE_TX_ENABLE
|
1876 QED_LINK_PAUSE_RX_ENABLE
);
1877 if (fc_conf
->mode
== RTE_FC_TX_PAUSE
)
1878 params
.pause_config
|= QED_LINK_PAUSE_TX_ENABLE
;
1879 if (fc_conf
->mode
== RTE_FC_RX_PAUSE
)
1880 params
.pause_config
|= QED_LINK_PAUSE_RX_ENABLE
;
1882 params
.link_up
= true;
1883 (void)qdev
->ops
->common
->set_link(edev
, ¶ms
);
1888 static int qede_flow_ctrl_get(struct rte_eth_dev
*eth_dev
,
1889 struct rte_eth_fc_conf
*fc_conf
)
1891 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
1892 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
1893 struct qed_link_output current_link
;
1895 memset(¤t_link
, 0, sizeof(current_link
));
1896 qdev
->ops
->common
->get_link(edev
, ¤t_link
);
1898 if (current_link
.pause_config
& QED_LINK_PAUSE_AUTONEG_ENABLE
)
1899 fc_conf
->autoneg
= true;
1901 if (current_link
.pause_config
& (QED_LINK_PAUSE_RX_ENABLE
|
1902 QED_LINK_PAUSE_TX_ENABLE
))
1903 fc_conf
->mode
= RTE_FC_FULL
;
1904 else if (current_link
.pause_config
& QED_LINK_PAUSE_RX_ENABLE
)
1905 fc_conf
->mode
= RTE_FC_RX_PAUSE
;
1906 else if (current_link
.pause_config
& QED_LINK_PAUSE_TX_ENABLE
)
1907 fc_conf
->mode
= RTE_FC_TX_PAUSE
;
1909 fc_conf
->mode
= RTE_FC_NONE
;
1914 static const uint32_t *
1915 qede_dev_supported_ptypes_get(struct rte_eth_dev
*eth_dev
)
1917 static const uint32_t ptypes
[] = {
1919 RTE_PTYPE_L2_ETHER_VLAN
,
1924 RTE_PTYPE_TUNNEL_VXLAN
,
1926 RTE_PTYPE_TUNNEL_GENEVE
,
1927 RTE_PTYPE_TUNNEL_GRE
,
1929 RTE_PTYPE_INNER_L2_ETHER
,
1930 RTE_PTYPE_INNER_L2_ETHER_VLAN
,
1931 RTE_PTYPE_INNER_L3_IPV4
,
1932 RTE_PTYPE_INNER_L3_IPV6
,
1933 RTE_PTYPE_INNER_L4_TCP
,
1934 RTE_PTYPE_INNER_L4_UDP
,
1935 RTE_PTYPE_INNER_L4_FRAG
,
1939 if (eth_dev
->rx_pkt_burst
== qede_recv_pkts
)
1945 static void qede_init_rss_caps(uint8_t *rss_caps
, uint64_t hf
)
1948 *rss_caps
|= (hf
& ETH_RSS_IPV4
) ? ECORE_RSS_IPV4
: 0;
1949 *rss_caps
|= (hf
& ETH_RSS_IPV6
) ? ECORE_RSS_IPV6
: 0;
1950 *rss_caps
|= (hf
& ETH_RSS_IPV6_EX
) ? ECORE_RSS_IPV6
: 0;
1951 *rss_caps
|= (hf
& ETH_RSS_NONFRAG_IPV4_TCP
) ? ECORE_RSS_IPV4_TCP
: 0;
1952 *rss_caps
|= (hf
& ETH_RSS_NONFRAG_IPV6_TCP
) ? ECORE_RSS_IPV6_TCP
: 0;
1953 *rss_caps
|= (hf
& ETH_RSS_IPV6_TCP_EX
) ? ECORE_RSS_IPV6_TCP
: 0;
1954 *rss_caps
|= (hf
& ETH_RSS_NONFRAG_IPV4_UDP
) ? ECORE_RSS_IPV4_UDP
: 0;
1955 *rss_caps
|= (hf
& ETH_RSS_NONFRAG_IPV6_UDP
) ? ECORE_RSS_IPV6_UDP
: 0;
1958 int qede_rss_hash_update(struct rte_eth_dev
*eth_dev
,
1959 struct rte_eth_rss_conf
*rss_conf
)
1961 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
1962 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
1963 struct ecore_sp_vport_update_params vport_update_params
;
1964 struct ecore_rss_params rss_params
;
1965 struct ecore_hwfn
*p_hwfn
;
1966 uint32_t *key
= (uint32_t *)rss_conf
->rss_key
;
1967 uint64_t hf
= rss_conf
->rss_hf
;
1968 uint8_t len
= rss_conf
->rss_key_len
;
1973 memset(&vport_update_params
, 0, sizeof(vport_update_params
));
1974 memset(&rss_params
, 0, sizeof(rss_params
));
1976 DP_INFO(edev
, "RSS hf = 0x%lx len = %u key = %p\n",
1977 (unsigned long)hf
, len
, key
);
1981 DP_INFO(edev
, "Enabling rss\n");
1984 qede_init_rss_caps(&rss_params
.rss_caps
, hf
);
1985 rss_params
.update_rss_capabilities
= 1;
1989 if (len
> (ECORE_RSS_KEY_SIZE
* sizeof(uint32_t))) {
1990 DP_ERR(edev
, "RSS key length exceeds limit\n");
1993 DP_INFO(edev
, "Applying user supplied hash key\n");
1994 rss_params
.update_rss_key
= 1;
1995 memcpy(&rss_params
.rss_key
, key
, len
);
1997 rss_params
.rss_enable
= 1;
2000 rss_params
.update_rss_config
= 1;
2001 /* tbl_size has to be set with capabilities */
2002 rss_params
.rss_table_size_log
= 7;
2003 vport_update_params
.vport_id
= 0;
2004 /* pass the L2 handles instead of qids */
2005 for (i
= 0 ; i
< ECORE_RSS_IND_TABLE_SIZE
; i
++) {
2006 idx
= i
% QEDE_RSS_COUNT(qdev
);
2007 rss_params
.rss_ind_table
[i
] = qdev
->fp_array
[idx
].rxq
->handle
;
2009 vport_update_params
.rss_params
= &rss_params
;
2011 for_each_hwfn(edev
, i
) {
2012 p_hwfn
= &edev
->hwfns
[i
];
2013 vport_update_params
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
2014 rc
= ecore_sp_vport_update(p_hwfn
, &vport_update_params
,
2015 ECORE_SPQ_MODE_EBLOCK
, NULL
);
2017 DP_ERR(edev
, "vport-update for RSS failed\n");
2021 qdev
->rss_enable
= rss_params
.rss_enable
;
2023 /* Update local structure for hash query */
2024 qdev
->rss_conf
.rss_hf
= hf
;
2025 qdev
->rss_conf
.rss_key_len
= len
;
2026 if (qdev
->rss_enable
) {
2027 if (qdev
->rss_conf
.rss_key
== NULL
) {
2028 qdev
->rss_conf
.rss_key
= (uint8_t *)malloc(len
);
2029 if (qdev
->rss_conf
.rss_key
== NULL
) {
2030 DP_ERR(edev
, "No memory to store RSS key\n");
2035 DP_INFO(edev
, "Storing RSS key\n");
2036 memcpy(qdev
->rss_conf
.rss_key
, key
, len
);
2038 } else if (!qdev
->rss_enable
&& len
== 0) {
2039 if (qdev
->rss_conf
.rss_key
) {
2040 free(qdev
->rss_conf
.rss_key
);
2041 qdev
->rss_conf
.rss_key
= NULL
;
2042 DP_INFO(edev
, "Free RSS key\n");
2049 static int qede_rss_hash_conf_get(struct rte_eth_dev
*eth_dev
,
2050 struct rte_eth_rss_conf
*rss_conf
)
2052 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
2054 rss_conf
->rss_hf
= qdev
->rss_conf
.rss_hf
;
2055 rss_conf
->rss_key_len
= qdev
->rss_conf
.rss_key_len
;
2057 if (rss_conf
->rss_key
&& qdev
->rss_conf
.rss_key
)
2058 memcpy(rss_conf
->rss_key
, qdev
->rss_conf
.rss_key
,
2059 rss_conf
->rss_key_len
);
2063 static bool qede_update_rss_parm_cmt(struct ecore_dev
*edev
,
2064 struct ecore_rss_params
*rss
)
2067 bool rss_mode
= 1; /* enable */
2068 struct ecore_queue_cid
*cid
;
2069 struct ecore_rss_params
*t_rss
;
2071 /* In regular scenario, we'd simply need to take input handlers.
2072 * But in CMT, we'd have to split the handlers according to the
2073 * engine they were configured on. We'd then have to understand
2074 * whether RSS is really required, since 2-queues on CMT doesn't
2078 /* CMT should be round-robin */
2079 for (i
= 0; i
< ECORE_RSS_IND_TABLE_SIZE
; i
++) {
2080 cid
= rss
->rss_ind_table
[i
];
2082 if (cid
->p_owner
== ECORE_LEADING_HWFN(edev
))
2087 t_rss
->rss_ind_table
[i
/ edev
->num_hwfns
] = cid
;
2091 t_rss
->update_rss_ind_table
= 1;
2092 t_rss
->rss_table_size_log
= 7;
2093 t_rss
->update_rss_config
= 1;
2095 /* Make sure RSS is actually required */
2096 for_each_hwfn(edev
, fn
) {
2097 for (i
= 1; i
< ECORE_RSS_IND_TABLE_SIZE
/ edev
->num_hwfns
;
2099 if (rss
[fn
].rss_ind_table
[i
] !=
2100 rss
[fn
].rss_ind_table
[0])
2104 if (i
== ECORE_RSS_IND_TABLE_SIZE
/ edev
->num_hwfns
) {
2106 "CMT - 1 queue per-hwfn; Disabling RSS\n");
2113 t_rss
->rss_enable
= rss_mode
;
2118 int qede_rss_reta_update(struct rte_eth_dev
*eth_dev
,
2119 struct rte_eth_rss_reta_entry64
*reta_conf
,
2122 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
2123 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
2124 struct ecore_sp_vport_update_params vport_update_params
;
2125 struct ecore_rss_params
*params
;
2126 struct ecore_hwfn
*p_hwfn
;
2127 uint16_t i
, idx
, shift
;
2131 if (reta_size
> ETH_RSS_RETA_SIZE_128
) {
2132 DP_ERR(edev
, "reta_size %d is not supported by hardware\n",
2137 memset(&vport_update_params
, 0, sizeof(vport_update_params
));
2138 params
= rte_zmalloc("qede_rss", sizeof(*params
) * edev
->num_hwfns
,
2139 RTE_CACHE_LINE_SIZE
);
2140 if (params
== NULL
) {
2141 DP_ERR(edev
, "failed to allocate memory\n");
2145 for (i
= 0; i
< reta_size
; i
++) {
2146 idx
= i
/ RTE_RETA_GROUP_SIZE
;
2147 shift
= i
% RTE_RETA_GROUP_SIZE
;
2148 if (reta_conf
[idx
].mask
& (1ULL << shift
)) {
2149 entry
= reta_conf
[idx
].reta
[shift
];
2150 /* Pass rxq handles to ecore */
2151 params
->rss_ind_table
[i
] =
2152 qdev
->fp_array
[entry
].rxq
->handle
;
2153 /* Update the local copy for RETA query command */
2154 qdev
->rss_ind_table
[i
] = entry
;
2158 params
->update_rss_ind_table
= 1;
2159 params
->rss_table_size_log
= 7;
2160 params
->update_rss_config
= 1;
2162 /* Fix up RETA for CMT mode device */
2163 if (ECORE_IS_CMT(edev
))
2164 qdev
->rss_enable
= qede_update_rss_parm_cmt(edev
,
2166 vport_update_params
.vport_id
= 0;
2167 /* Use the current value of rss_enable */
2168 params
->rss_enable
= qdev
->rss_enable
;
2169 vport_update_params
.rss_params
= params
;
2171 for_each_hwfn(edev
, i
) {
2172 p_hwfn
= &edev
->hwfns
[i
];
2173 vport_update_params
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
2174 rc
= ecore_sp_vport_update(p_hwfn
, &vport_update_params
,
2175 ECORE_SPQ_MODE_EBLOCK
, NULL
);
2177 DP_ERR(edev
, "vport-update for RSS failed\n");
2187 static int qede_rss_reta_query(struct rte_eth_dev
*eth_dev
,
2188 struct rte_eth_rss_reta_entry64
*reta_conf
,
2191 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
2192 struct ecore_dev
*edev
= &qdev
->edev
;
2193 uint16_t i
, idx
, shift
;
2196 if (reta_size
> ETH_RSS_RETA_SIZE_128
) {
2197 DP_ERR(edev
, "reta_size %d is not supported\n",
2202 for (i
= 0; i
< reta_size
; i
++) {
2203 idx
= i
/ RTE_RETA_GROUP_SIZE
;
2204 shift
= i
% RTE_RETA_GROUP_SIZE
;
2205 if (reta_conf
[idx
].mask
& (1ULL << shift
)) {
2206 entry
= qdev
->rss_ind_table
[i
];
2207 reta_conf
[idx
].reta
[shift
] = entry
;
2216 static int qede_set_mtu(struct rte_eth_dev
*dev
, uint16_t mtu
)
2218 struct qede_dev
*qdev
= QEDE_INIT_QDEV(dev
);
2219 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
2220 struct rte_eth_dev_info dev_info
= {0};
2221 struct qede_fastpath
*fp
;
2222 uint32_t max_rx_pkt_len
;
2223 uint32_t frame_size
;
2225 bool restart
= false;
2228 PMD_INIT_FUNC_TRACE(edev
);
2229 qede_dev_info_get(dev
, &dev_info
);
2230 max_rx_pkt_len
= mtu
+ QEDE_MAX_ETHER_HDR_LEN
;
2231 frame_size
= max_rx_pkt_len
;
2232 if ((mtu
< ETHER_MIN_MTU
) || (frame_size
> dev_info
.max_rx_pktlen
)) {
2233 DP_ERR(edev
, "MTU %u out of range, %u is maximum allowable\n",
2234 mtu
, dev_info
.max_rx_pktlen
- ETHER_HDR_LEN
-
2238 if (!dev
->data
->scattered_rx
&&
2239 frame_size
> dev
->data
->min_rx_buf_size
- RTE_PKTMBUF_HEADROOM
) {
2240 DP_INFO(edev
, "MTU greater than minimum RX buffer size of %u\n",
2241 dev
->data
->min_rx_buf_size
);
2244 /* Temporarily replace I/O functions with dummy ones. It cannot
2245 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
2247 dev
->rx_pkt_burst
= qede_rxtx_pkts_dummy
;
2248 dev
->tx_pkt_burst
= qede_rxtx_pkts_dummy
;
2249 if (dev
->data
->dev_started
) {
2250 dev
->data
->dev_started
= 0;
2257 /* Fix up RX buf size for all queues of the port */
2259 fp
= &qdev
->fp_array
[i
];
2260 if (fp
->rxq
!= NULL
) {
2261 bufsz
= (uint16_t)rte_pktmbuf_data_room_size(
2262 fp
->rxq
->mb_pool
) - RTE_PKTMBUF_HEADROOM
;
2263 /* cache align the mbuf size to simplfy rx_buf_size
2266 bufsz
= QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz
);
2267 rc
= qede_calc_rx_buf_size(dev
, bufsz
, frame_size
);
2271 fp
->rxq
->rx_buf_size
= rc
;
2274 if (max_rx_pkt_len
> ETHER_MAX_LEN
)
2275 dev
->data
->dev_conf
.rxmode
.offloads
|= DEV_RX_OFFLOAD_JUMBO_FRAME
;
2277 dev
->data
->dev_conf
.rxmode
.offloads
&= ~DEV_RX_OFFLOAD_JUMBO_FRAME
;
2279 if (!dev
->data
->dev_started
&& restart
) {
2280 qede_dev_start(dev
);
2281 dev
->data
->dev_started
= 1;
2284 /* update max frame size */
2285 dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
= max_rx_pkt_len
;
2287 dev
->rx_pkt_burst
= qede_recv_pkts
;
2288 dev
->tx_pkt_burst
= qede_xmit_pkts
;
2294 qede_dev_reset(struct rte_eth_dev
*dev
)
2298 ret
= qede_eth_dev_uninit(dev
);
2302 return qede_eth_dev_init(dev
);
2305 static const struct eth_dev_ops qede_eth_dev_ops
= {
2306 .dev_configure
= qede_dev_configure
,
2307 .dev_infos_get
= qede_dev_info_get
,
2308 .rx_queue_setup
= qede_rx_queue_setup
,
2309 .rx_queue_release
= qede_rx_queue_release
,
2310 .rx_descriptor_status
= qede_rx_descriptor_status
,
2311 .tx_queue_setup
= qede_tx_queue_setup
,
2312 .tx_queue_release
= qede_tx_queue_release
,
2313 .dev_start
= qede_dev_start
,
2314 .dev_reset
= qede_dev_reset
,
2315 .dev_set_link_up
= qede_dev_set_link_up
,
2316 .dev_set_link_down
= qede_dev_set_link_down
,
2317 .link_update
= qede_link_update
,
2318 .promiscuous_enable
= qede_promiscuous_enable
,
2319 .promiscuous_disable
= qede_promiscuous_disable
,
2320 .allmulticast_enable
= qede_allmulticast_enable
,
2321 .allmulticast_disable
= qede_allmulticast_disable
,
2322 .set_mc_addr_list
= qede_set_mc_addr_list
,
2323 .dev_stop
= qede_dev_stop
,
2324 .dev_close
= qede_dev_close
,
2325 .stats_get
= qede_get_stats
,
2326 .stats_reset
= qede_reset_stats
,
2327 .xstats_get
= qede_get_xstats
,
2328 .xstats_reset
= qede_reset_xstats
,
2329 .xstats_get_names
= qede_get_xstats_names
,
2330 .mac_addr_add
= qede_mac_addr_add
,
2331 .mac_addr_remove
= qede_mac_addr_remove
,
2332 .mac_addr_set
= qede_mac_addr_set
,
2333 .vlan_offload_set
= qede_vlan_offload_set
,
2334 .vlan_filter_set
= qede_vlan_filter_set
,
2335 .flow_ctrl_set
= qede_flow_ctrl_set
,
2336 .flow_ctrl_get
= qede_flow_ctrl_get
,
2337 .dev_supported_ptypes_get
= qede_dev_supported_ptypes_get
,
2338 .rss_hash_update
= qede_rss_hash_update
,
2339 .rss_hash_conf_get
= qede_rss_hash_conf_get
,
2340 .reta_update
= qede_rss_reta_update
,
2341 .reta_query
= qede_rss_reta_query
,
2342 .mtu_set
= qede_set_mtu
,
2343 .filter_ctrl
= qede_dev_filter_ctrl
,
2344 .udp_tunnel_port_add
= qede_udp_dst_port_add
,
2345 .udp_tunnel_port_del
= qede_udp_dst_port_del
,
2348 static const struct eth_dev_ops qede_eth_vf_dev_ops
= {
2349 .dev_configure
= qede_dev_configure
,
2350 .dev_infos_get
= qede_dev_info_get
,
2351 .rx_queue_setup
= qede_rx_queue_setup
,
2352 .rx_queue_release
= qede_rx_queue_release
,
2353 .rx_descriptor_status
= qede_rx_descriptor_status
,
2354 .tx_queue_setup
= qede_tx_queue_setup
,
2355 .tx_queue_release
= qede_tx_queue_release
,
2356 .dev_start
= qede_dev_start
,
2357 .dev_reset
= qede_dev_reset
,
2358 .dev_set_link_up
= qede_dev_set_link_up
,
2359 .dev_set_link_down
= qede_dev_set_link_down
,
2360 .link_update
= qede_link_update
,
2361 .promiscuous_enable
= qede_promiscuous_enable
,
2362 .promiscuous_disable
= qede_promiscuous_disable
,
2363 .allmulticast_enable
= qede_allmulticast_enable
,
2364 .allmulticast_disable
= qede_allmulticast_disable
,
2365 .set_mc_addr_list
= qede_set_mc_addr_list
,
2366 .dev_stop
= qede_dev_stop
,
2367 .dev_close
= qede_dev_close
,
2368 .stats_get
= qede_get_stats
,
2369 .stats_reset
= qede_reset_stats
,
2370 .xstats_get
= qede_get_xstats
,
2371 .xstats_reset
= qede_reset_xstats
,
2372 .xstats_get_names
= qede_get_xstats_names
,
2373 .vlan_offload_set
= qede_vlan_offload_set
,
2374 .vlan_filter_set
= qede_vlan_filter_set
,
2375 .dev_supported_ptypes_get
= qede_dev_supported_ptypes_get
,
2376 .rss_hash_update
= qede_rss_hash_update
,
2377 .rss_hash_conf_get
= qede_rss_hash_conf_get
,
2378 .reta_update
= qede_rss_reta_update
,
2379 .reta_query
= qede_rss_reta_query
,
2380 .mtu_set
= qede_set_mtu
,
2381 .udp_tunnel_port_add
= qede_udp_dst_port_add
,
2382 .udp_tunnel_port_del
= qede_udp_dst_port_del
,
2383 .mac_addr_add
= qede_mac_addr_add
,
2384 .mac_addr_remove
= qede_mac_addr_remove
,
2385 .mac_addr_set
= qede_mac_addr_set
,
2388 static void qede_update_pf_params(struct ecore_dev
*edev
)
2390 struct ecore_pf_params pf_params
;
2392 memset(&pf_params
, 0, sizeof(struct ecore_pf_params
));
2393 pf_params
.eth_pf_params
.num_cons
= QEDE_PF_NUM_CONNS
;
2394 pf_params
.eth_pf_params
.num_arfs_filters
= QEDE_RFS_MAX_FLTR
;
2395 qed_ops
->common
->update_pf_params(edev
, &pf_params
);
2398 static int qede_common_dev_init(struct rte_eth_dev
*eth_dev
, bool is_vf
)
2400 struct rte_pci_device
*pci_dev
;
2401 struct rte_pci_addr pci_addr
;
2402 struct qede_dev
*adapter
;
2403 struct ecore_dev
*edev
;
2404 struct qed_dev_eth_info dev_info
;
2405 struct qed_slowpath_params params
;
2406 static bool do_once
= true;
2407 uint8_t bulletin_change
;
2408 uint8_t vf_mac
[ETHER_ADDR_LEN
];
2409 uint8_t is_mac_forced
;
2411 /* Fix up ecore debug level */
2412 uint32_t dp_module
= ~0 & ~ECORE_MSG_HW
;
2413 uint8_t dp_level
= ECORE_LEVEL_VERBOSE
;
2417 /* Extract key data structures */
2418 adapter
= eth_dev
->data
->dev_private
;
2419 adapter
->ethdev
= eth_dev
;
2420 edev
= &adapter
->edev
;
2421 pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
2422 pci_addr
= pci_dev
->addr
;
2424 PMD_INIT_FUNC_TRACE(edev
);
2426 snprintf(edev
->name
, NAME_SIZE
, PCI_SHORT_PRI_FMT
":dpdk-port-%u",
2427 pci_addr
.bus
, pci_addr
.devid
, pci_addr
.function
,
2428 eth_dev
->data
->port_id
);
2430 eth_dev
->rx_pkt_burst
= qede_recv_pkts
;
2431 eth_dev
->tx_pkt_burst
= qede_xmit_pkts
;
2432 eth_dev
->tx_pkt_prepare
= qede_xmit_prep_pkts
;
2434 if (rte_eal_process_type() != RTE_PROC_PRIMARY
) {
2435 DP_ERR(edev
, "Skipping device init from secondary process\n");
2439 rte_eth_copy_pci_info(eth_dev
, pci_dev
);
2442 edev
->vendor_id
= pci_dev
->id
.vendor_id
;
2443 edev
->device_id
= pci_dev
->id
.device_id
;
2445 qed_ops
= qed_get_eth_ops();
2447 DP_ERR(edev
, "Failed to get qed_eth_ops_pass\n");
2451 DP_INFO(edev
, "Starting qede probe\n");
2452 rc
= qed_ops
->common
->probe(edev
, pci_dev
, dp_module
,
2455 DP_ERR(edev
, "qede probe failed rc %d\n", rc
);
2458 qede_update_pf_params(edev
);
2460 switch (pci_dev
->intr_handle
.type
) {
2461 case RTE_INTR_HANDLE_UIO_INTX
:
2462 case RTE_INTR_HANDLE_VFIO_LEGACY
:
2463 int_mode
= ECORE_INT_MODE_INTA
;
2464 rte_intr_callback_register(&pci_dev
->intr_handle
,
2465 qede_interrupt_handler_intx
,
2469 int_mode
= ECORE_INT_MODE_MSIX
;
2470 rte_intr_callback_register(&pci_dev
->intr_handle
,
2471 qede_interrupt_handler
,
2475 if (rte_intr_enable(&pci_dev
->intr_handle
)) {
2476 DP_ERR(edev
, "rte_intr_enable() failed\n");
2480 /* Start the Slowpath-process */
2481 memset(¶ms
, 0, sizeof(struct qed_slowpath_params
));
2483 params
.int_mode
= int_mode
;
2484 params
.drv_major
= QEDE_PMD_VERSION_MAJOR
;
2485 params
.drv_minor
= QEDE_PMD_VERSION_MINOR
;
2486 params
.drv_rev
= QEDE_PMD_VERSION_REVISION
;
2487 params
.drv_eng
= QEDE_PMD_VERSION_PATCH
;
2488 strncpy((char *)params
.name
, QEDE_PMD_VER_PREFIX
,
2489 QEDE_PMD_DRV_VER_STR_SIZE
);
2491 /* For CMT mode device do periodic polling for slowpath events.
2492 * This is required since uio device uses only one MSI-x
2493 * interrupt vector but we need one for each engine.
2495 if (ECORE_IS_CMT(edev
) && IS_PF(edev
)) {
2496 rc
= rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD
,
2500 DP_ERR(edev
, "Unable to start periodic"
2501 " timer rc %d\n", rc
);
2506 rc
= qed_ops
->common
->slowpath_start(edev
, ¶ms
);
2508 DP_ERR(edev
, "Cannot start slowpath rc = %d\n", rc
);
2509 rte_eal_alarm_cancel(qede_poll_sp_sb_cb
,
2514 rc
= qed_ops
->fill_dev_info(edev
, &dev_info
);
2516 DP_ERR(edev
, "Cannot get device_info rc %d\n", rc
);
2517 qed_ops
->common
->slowpath_stop(edev
);
2518 qed_ops
->common
->remove(edev
);
2519 rte_eal_alarm_cancel(qede_poll_sp_sb_cb
,
2524 qede_alloc_etherdev(adapter
, &dev_info
);
2526 adapter
->ops
->common
->set_name(edev
, edev
->name
);
2529 adapter
->dev_info
.num_mac_filters
=
2530 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev
),
2533 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev
),
2534 (uint32_t *)&adapter
->dev_info
.num_mac_filters
);
2536 /* Allocate memory for storing MAC addr */
2537 eth_dev
->data
->mac_addrs
= rte_zmalloc(edev
->name
,
2539 adapter
->dev_info
.num_mac_filters
),
2540 RTE_CACHE_LINE_SIZE
);
2542 if (eth_dev
->data
->mac_addrs
== NULL
) {
2543 DP_ERR(edev
, "Failed to allocate MAC address\n");
2544 qed_ops
->common
->slowpath_stop(edev
);
2545 qed_ops
->common
->remove(edev
);
2546 rte_eal_alarm_cancel(qede_poll_sp_sb_cb
,
2552 ether_addr_copy((struct ether_addr
*)edev
->hwfns
[0].
2553 hw_info
.hw_mac_addr
,
2554 ð_dev
->data
->mac_addrs
[0]);
2555 ether_addr_copy(ð_dev
->data
->mac_addrs
[0],
2556 &adapter
->primary_mac
);
2558 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev
),
2560 if (bulletin_change
) {
2562 ecore_vf_bulletin_get_forced_mac(
2563 ECORE_LEADING_HWFN(edev
),
2567 DP_INFO(edev
, "VF macaddr received from PF\n");
2568 ether_addr_copy((struct ether_addr
*)&vf_mac
,
2569 ð_dev
->data
->mac_addrs
[0]);
2570 ether_addr_copy(ð_dev
->data
->mac_addrs
[0],
2571 &adapter
->primary_mac
);
2573 DP_ERR(edev
, "No VF macaddr assigned\n");
2578 eth_dev
->dev_ops
= (is_vf
) ? &qede_eth_vf_dev_ops
: &qede_eth_dev_ops
;
2581 qede_print_adapter_info(adapter
);
2585 /* Bring-up the link */
2586 qede_dev_set_link_state(eth_dev
, true);
2588 adapter
->num_tx_queues
= 0;
2589 adapter
->num_rx_queues
= 0;
2590 SLIST_INIT(&adapter
->arfs_info
.arfs_list_head
);
2591 SLIST_INIT(&adapter
->vlan_list_head
);
2592 SLIST_INIT(&adapter
->uc_list_head
);
2593 SLIST_INIT(&adapter
->mc_list_head
);
2594 adapter
->mtu
= ETHER_MTU
;
2595 adapter
->vport_started
= false;
2597 /* VF tunnel offloads is enabled by default in PF driver */
2598 adapter
->vxlan
.num_filters
= 0;
2599 adapter
->geneve
.num_filters
= 0;
2600 adapter
->ipgre
.num_filters
= 0;
2602 adapter
->vxlan
.enable
= true;
2603 adapter
->vxlan
.filter_type
= ETH_TUNNEL_FILTER_IMAC
|
2604 ETH_TUNNEL_FILTER_IVLAN
;
2605 adapter
->vxlan
.udp_port
= QEDE_VXLAN_DEF_PORT
;
2606 adapter
->geneve
.enable
= true;
2607 adapter
->geneve
.filter_type
= ETH_TUNNEL_FILTER_IMAC
|
2608 ETH_TUNNEL_FILTER_IVLAN
;
2609 adapter
->geneve
.udp_port
= QEDE_GENEVE_DEF_PORT
;
2610 adapter
->ipgre
.enable
= true;
2611 adapter
->ipgre
.filter_type
= ETH_TUNNEL_FILTER_IMAC
|
2612 ETH_TUNNEL_FILTER_IVLAN
;
2614 adapter
->vxlan
.enable
= false;
2615 adapter
->geneve
.enable
= false;
2616 adapter
->ipgre
.enable
= false;
2619 DP_INFO(edev
, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
2620 adapter
->primary_mac
.addr_bytes
[0],
2621 adapter
->primary_mac
.addr_bytes
[1],
2622 adapter
->primary_mac
.addr_bytes
[2],
2623 adapter
->primary_mac
.addr_bytes
[3],
2624 adapter
->primary_mac
.addr_bytes
[4],
2625 adapter
->primary_mac
.addr_bytes
[5]);
2627 DP_INFO(edev
, "Device initialized\n");
2632 static int qedevf_eth_dev_init(struct rte_eth_dev
*eth_dev
)
2634 return qede_common_dev_init(eth_dev
, 1);
2637 static int qede_eth_dev_init(struct rte_eth_dev
*eth_dev
)
2639 return qede_common_dev_init(eth_dev
, 0);
2642 static int qede_dev_common_uninit(struct rte_eth_dev
*eth_dev
)
2644 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
2645 struct ecore_dev
*edev
= &qdev
->edev
;
2647 PMD_INIT_FUNC_TRACE(edev
);
2649 /* only uninitialize in the primary process */
2650 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
2653 /* safe to close dev here */
2654 qede_dev_close(eth_dev
);
2656 eth_dev
->dev_ops
= NULL
;
2657 eth_dev
->rx_pkt_burst
= NULL
;
2658 eth_dev
->tx_pkt_burst
= NULL
;
2663 static int qede_eth_dev_uninit(struct rte_eth_dev
*eth_dev
)
2665 return qede_dev_common_uninit(eth_dev
);
2668 static int qedevf_eth_dev_uninit(struct rte_eth_dev
*eth_dev
)
2670 return qede_dev_common_uninit(eth_dev
);
2673 static const struct rte_pci_id pci_id_qedevf_map
[] = {
2674 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2676 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF
)
2679 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV
)
2682 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV
)
2687 static const struct rte_pci_id pci_id_qede_map
[] = {
2688 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2690 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E
)
2693 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S
)
2696 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40
)
2699 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25
)
2702 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100
)
2705 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50
)
2708 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G
)
2711 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G
)
2714 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G
)
2717 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G
)
2722 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver
*pci_drv __rte_unused
,
2723 struct rte_pci_device
*pci_dev
)
2725 return rte_eth_dev_pci_generic_probe(pci_dev
,
2726 sizeof(struct qede_dev
), qedevf_eth_dev_init
);
2729 static int qedevf_eth_dev_pci_remove(struct rte_pci_device
*pci_dev
)
2731 return rte_eth_dev_pci_generic_remove(pci_dev
, qedevf_eth_dev_uninit
);
2734 static struct rte_pci_driver rte_qedevf_pmd
= {
2735 .id_table
= pci_id_qedevf_map
,
2736 .drv_flags
= RTE_PCI_DRV_NEED_MAPPING
| RTE_PCI_DRV_INTR_LSC
|
2737 RTE_PCI_DRV_IOVA_AS_VA
,
2738 .probe
= qedevf_eth_dev_pci_probe
,
2739 .remove
= qedevf_eth_dev_pci_remove
,
2742 static int qede_eth_dev_pci_probe(struct rte_pci_driver
*pci_drv __rte_unused
,
2743 struct rte_pci_device
*pci_dev
)
2745 return rte_eth_dev_pci_generic_probe(pci_dev
,
2746 sizeof(struct qede_dev
), qede_eth_dev_init
);
2749 static int qede_eth_dev_pci_remove(struct rte_pci_device
*pci_dev
)
2751 return rte_eth_dev_pci_generic_remove(pci_dev
, qede_eth_dev_uninit
);
2754 static struct rte_pci_driver rte_qede_pmd
= {
2755 .id_table
= pci_id_qede_map
,
2756 .drv_flags
= RTE_PCI_DRV_NEED_MAPPING
| RTE_PCI_DRV_INTR_LSC
|
2757 RTE_PCI_DRV_IOVA_AS_VA
,
2758 .probe
= qede_eth_dev_pci_probe
,
2759 .remove
= qede_eth_dev_pci_remove
,
2762 RTE_PMD_REGISTER_PCI(net_qede
, rte_qede_pmd
);
2763 RTE_PMD_REGISTER_PCI_TABLE(net_qede
, pci_id_qede_map
);
2764 RTE_PMD_REGISTER_KMOD_DEP(net_qede
, "* igb_uio | uio_pci_generic | vfio-pci");
2765 RTE_PMD_REGISTER_PCI(net_qede_vf
, rte_qedevf_pmd
);
2766 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf
, pci_id_qedevf_map
);
2767 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf
, "* igb_uio | vfio-pci");
2769 RTE_INIT(qede_init_log
)
2771 qede_logtype_init
= rte_log_register("pmd.net.qede.init");
2772 if (qede_logtype_init
>= 0)
2773 rte_log_set_level(qede_logtype_init
, RTE_LOG_NOTICE
);
2774 qede_logtype_driver
= rte_log_register("pmd.net.qede.driver");
2775 if (qede_logtype_driver
>= 0)
2776 rte_log_set_level(qede_logtype_driver
, RTE_LOG_NOTICE
);