2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
9 #include "qede_ethdev.h"
10 #include <rte_alarm.h>
11 #include <rte_version.h>
14 static const struct qed_eth_ops
*qed_ops
;
15 static const char *drivername
= "qede pmd";
16 static int64_t timer_period
= 1;
18 struct rte_qede_xstats_name_off
{
19 char name
[RTE_ETH_XSTATS_NAME_SIZE
];
23 static const struct rte_qede_xstats_name_off qede_xstats_strings
[] = {
24 {"rx_unicast_bytes", offsetof(struct ecore_eth_stats
, rx_ucast_bytes
)},
25 {"rx_multicast_bytes",
26 offsetof(struct ecore_eth_stats
, rx_mcast_bytes
)},
27 {"rx_broadcast_bytes",
28 offsetof(struct ecore_eth_stats
, rx_bcast_bytes
)},
29 {"rx_unicast_packets", offsetof(struct ecore_eth_stats
, rx_ucast_pkts
)},
30 {"rx_multicast_packets",
31 offsetof(struct ecore_eth_stats
, rx_mcast_pkts
)},
32 {"rx_broadcast_packets",
33 offsetof(struct ecore_eth_stats
, rx_bcast_pkts
)},
35 {"tx_unicast_bytes", offsetof(struct ecore_eth_stats
, tx_ucast_bytes
)},
36 {"tx_multicast_bytes",
37 offsetof(struct ecore_eth_stats
, tx_mcast_bytes
)},
38 {"tx_broadcast_bytes",
39 offsetof(struct ecore_eth_stats
, tx_bcast_bytes
)},
40 {"tx_unicast_packets", offsetof(struct ecore_eth_stats
, tx_ucast_pkts
)},
41 {"tx_multicast_packets",
42 offsetof(struct ecore_eth_stats
, tx_mcast_pkts
)},
43 {"tx_broadcast_packets",
44 offsetof(struct ecore_eth_stats
, tx_bcast_pkts
)},
46 {"rx_64_byte_packets",
47 offsetof(struct ecore_eth_stats
, rx_64_byte_packets
)},
48 {"rx_65_to_127_byte_packets",
49 offsetof(struct ecore_eth_stats
, rx_65_to_127_byte_packets
)},
50 {"rx_128_to_255_byte_packets",
51 offsetof(struct ecore_eth_stats
, rx_128_to_255_byte_packets
)},
52 {"rx_256_to_511_byte_packets",
53 offsetof(struct ecore_eth_stats
, rx_256_to_511_byte_packets
)},
54 {"rx_512_to_1023_byte_packets",
55 offsetof(struct ecore_eth_stats
, rx_512_to_1023_byte_packets
)},
56 {"rx_1024_to_1518_byte_packets",
57 offsetof(struct ecore_eth_stats
, rx_1024_to_1518_byte_packets
)},
58 {"rx_1519_to_1522_byte_packets",
59 offsetof(struct ecore_eth_stats
, rx_1519_to_1522_byte_packets
)},
60 {"rx_1519_to_2047_byte_packets",
61 offsetof(struct ecore_eth_stats
, rx_1519_to_2047_byte_packets
)},
62 {"rx_2048_to_4095_byte_packets",
63 offsetof(struct ecore_eth_stats
, rx_2048_to_4095_byte_packets
)},
64 {"rx_4096_to_9216_byte_packets",
65 offsetof(struct ecore_eth_stats
, rx_4096_to_9216_byte_packets
)},
66 {"rx_9217_to_16383_byte_packets",
67 offsetof(struct ecore_eth_stats
,
68 rx_9217_to_16383_byte_packets
)},
69 {"tx_64_byte_packets",
70 offsetof(struct ecore_eth_stats
, tx_64_byte_packets
)},
71 {"tx_65_to_127_byte_packets",
72 offsetof(struct ecore_eth_stats
, tx_65_to_127_byte_packets
)},
73 {"tx_128_to_255_byte_packets",
74 offsetof(struct ecore_eth_stats
, tx_128_to_255_byte_packets
)},
75 {"tx_256_to_511_byte_packets",
76 offsetof(struct ecore_eth_stats
, tx_256_to_511_byte_packets
)},
77 {"tx_512_to_1023_byte_packets",
78 offsetof(struct ecore_eth_stats
, tx_512_to_1023_byte_packets
)},
79 {"tx_1024_to_1518_byte_packets",
80 offsetof(struct ecore_eth_stats
, tx_1024_to_1518_byte_packets
)},
81 {"trx_1519_to_1522_byte_packets",
82 offsetof(struct ecore_eth_stats
, tx_1519_to_2047_byte_packets
)},
83 {"tx_2048_to_4095_byte_packets",
84 offsetof(struct ecore_eth_stats
, tx_2048_to_4095_byte_packets
)},
85 {"tx_4096_to_9216_byte_packets",
86 offsetof(struct ecore_eth_stats
, tx_4096_to_9216_byte_packets
)},
87 {"tx_9217_to_16383_byte_packets",
88 offsetof(struct ecore_eth_stats
,
89 tx_9217_to_16383_byte_packets
)},
91 {"rx_mac_crtl_frames",
92 offsetof(struct ecore_eth_stats
, rx_mac_crtl_frames
)},
93 {"tx_mac_control_frames",
94 offsetof(struct ecore_eth_stats
, tx_mac_ctrl_frames
)},
95 {"rx_pause_frames", offsetof(struct ecore_eth_stats
, rx_pause_frames
)},
96 {"tx_pause_frames", offsetof(struct ecore_eth_stats
, tx_pause_frames
)},
97 {"rx_priority_flow_control_frames",
98 offsetof(struct ecore_eth_stats
, rx_pfc_frames
)},
99 {"tx_priority_flow_control_frames",
100 offsetof(struct ecore_eth_stats
, tx_pfc_frames
)},
102 {"rx_crc_errors", offsetof(struct ecore_eth_stats
, rx_crc_errors
)},
103 {"rx_align_errors", offsetof(struct ecore_eth_stats
, rx_align_errors
)},
104 {"rx_carrier_errors",
105 offsetof(struct ecore_eth_stats
, rx_carrier_errors
)},
106 {"rx_oversize_packet_errors",
107 offsetof(struct ecore_eth_stats
, rx_oversize_packets
)},
108 {"rx_jabber_errors", offsetof(struct ecore_eth_stats
, rx_jabbers
)},
109 {"rx_undersize_packet_errors",
110 offsetof(struct ecore_eth_stats
, rx_undersize_packets
)},
111 {"rx_fragments", offsetof(struct ecore_eth_stats
, rx_fragments
)},
112 {"rx_host_buffer_not_available",
113 offsetof(struct ecore_eth_stats
, no_buff_discards
)},
114 /* Number of packets discarded because they are bigger than MTU */
115 {"rx_packet_too_big_discards",
116 offsetof(struct ecore_eth_stats
, packet_too_big_discard
)},
117 {"rx_ttl_zero_discards",
118 offsetof(struct ecore_eth_stats
, ttl0_discard
)},
119 {"rx_multi_function_tag_filter_discards",
120 offsetof(struct ecore_eth_stats
, mftag_filter_discards
)},
121 {"rx_mac_filter_discards",
122 offsetof(struct ecore_eth_stats
, mac_filter_discards
)},
123 {"rx_hw_buffer_truncates",
124 offsetof(struct ecore_eth_stats
, brb_truncates
)},
125 {"rx_hw_buffer_discards",
126 offsetof(struct ecore_eth_stats
, brb_discards
)},
127 {"tx_lpi_entry_count",
128 offsetof(struct ecore_eth_stats
, tx_lpi_entry_count
)},
129 {"tx_total_collisions",
130 offsetof(struct ecore_eth_stats
, tx_total_collisions
)},
131 {"tx_error_drop_packets",
132 offsetof(struct ecore_eth_stats
, tx_err_drop_pkts
)},
134 {"rx_mac_bytes", offsetof(struct ecore_eth_stats
, rx_mac_bytes
)},
135 {"rx_mac_unicast_packets",
136 offsetof(struct ecore_eth_stats
, rx_mac_uc_packets
)},
137 {"rx_mac_multicast_packets",
138 offsetof(struct ecore_eth_stats
, rx_mac_mc_packets
)},
139 {"rx_mac_broadcast_packets",
140 offsetof(struct ecore_eth_stats
, rx_mac_bc_packets
)},
142 offsetof(struct ecore_eth_stats
, rx_mac_frames_ok
)},
143 {"tx_mac_bytes", offsetof(struct ecore_eth_stats
, tx_mac_bytes
)},
144 {"tx_mac_unicast_packets",
145 offsetof(struct ecore_eth_stats
, tx_mac_uc_packets
)},
146 {"tx_mac_multicast_packets",
147 offsetof(struct ecore_eth_stats
, tx_mac_mc_packets
)},
148 {"tx_mac_broadcast_packets",
149 offsetof(struct ecore_eth_stats
, tx_mac_bc_packets
)},
151 {"lro_coalesced_packets",
152 offsetof(struct ecore_eth_stats
, tpa_coalesced_pkts
)},
153 {"lro_coalesced_events",
154 offsetof(struct ecore_eth_stats
, tpa_coalesced_events
)},
156 offsetof(struct ecore_eth_stats
, tpa_aborts_num
)},
157 {"lro_not_coalesced_packets",
158 offsetof(struct ecore_eth_stats
, tpa_not_coalesced_pkts
)},
159 {"lro_coalesced_bytes",
160 offsetof(struct ecore_eth_stats
, tpa_coalesced_bytes
)},
163 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings
[] = {
165 offsetof(struct qede_rx_queue
, rx_segs
)},
167 offsetof(struct qede_rx_queue
, rx_hw_errors
)},
168 {"rx_q_allocation_errors",
169 offsetof(struct qede_rx_queue
, rx_alloc_errors
)}
172 static void qede_interrupt_action(struct ecore_hwfn
*p_hwfn
)
174 ecore_int_sp_dpc((osal_int_ptr_t
)(p_hwfn
));
178 qede_interrupt_handler(__rte_unused
struct rte_intr_handle
*handle
, void *param
)
180 struct rte_eth_dev
*eth_dev
= (struct rte_eth_dev
*)param
;
181 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
182 struct ecore_dev
*edev
= &qdev
->edev
;
184 qede_interrupt_action(ECORE_LEADING_HWFN(edev
));
185 if (rte_intr_enable(ð_dev
->pci_dev
->intr_handle
))
186 DP_ERR(edev
, "rte_intr_enable failed\n");
190 qede_alloc_etherdev(struct qede_dev
*qdev
, struct qed_dev_eth_info
*info
)
192 rte_memcpy(&qdev
->dev_info
, info
, sizeof(*info
));
193 qdev
->num_tc
= qdev
->dev_info
.num_tc
;
197 static void qede_print_adapter_info(struct qede_dev
*qdev
)
199 struct ecore_dev
*edev
= &qdev
->edev
;
200 struct qed_dev_info
*info
= &qdev
->dev_info
.common
;
201 static char drv_ver
[QEDE_PMD_DRV_VER_STR_SIZE
];
202 static char ver_str
[QEDE_PMD_DRV_VER_STR_SIZE
];
204 DP_INFO(edev
, "*********************************\n");
205 DP_INFO(edev
, " DPDK version:%s\n", rte_version());
206 DP_INFO(edev
, " Chip details : %s%d\n",
207 ECORE_IS_BB(edev
) ? "BB" : "AH",
208 CHIP_REV_IS_A0(edev
) ? 0 : 1);
209 snprintf(ver_str
, QEDE_PMD_DRV_VER_STR_SIZE
, "%d.%d.%d.%d",
210 info
->fw_major
, info
->fw_minor
, info
->fw_rev
, info
->fw_eng
);
211 snprintf(drv_ver
, QEDE_PMD_DRV_VER_STR_SIZE
, "%s_%s",
212 ver_str
, QEDE_PMD_VERSION
);
213 DP_INFO(edev
, " Driver version : %s\n", drv_ver
);
214 DP_INFO(edev
, " Firmware version : %s\n", ver_str
);
216 snprintf(ver_str
, MCP_DRV_VER_STR_SIZE
,
218 (info
->mfw_rev
>> 24) & 0xff,
219 (info
->mfw_rev
>> 16) & 0xff,
220 (info
->mfw_rev
>> 8) & 0xff, (info
->mfw_rev
) & 0xff);
221 DP_INFO(edev
, " Management Firmware version : %s\n", ver_str
);
222 DP_INFO(edev
, " Firmware file : %s\n", fw_file
);
223 DP_INFO(edev
, "*********************************\n");
227 qede_set_ucast_rx_mac(struct qede_dev
*qdev
,
228 enum qed_filter_xcast_params_type opcode
,
229 uint8_t mac
[ETHER_ADDR_LEN
])
231 struct ecore_dev
*edev
= &qdev
->edev
;
232 struct qed_filter_params filter_cmd
;
234 memset(&filter_cmd
, 0, sizeof(filter_cmd
));
235 filter_cmd
.type
= QED_FILTER_TYPE_UCAST
;
236 filter_cmd
.filter
.ucast
.type
= opcode
;
237 filter_cmd
.filter
.ucast
.mac_valid
= 1;
238 rte_memcpy(&filter_cmd
.filter
.ucast
.mac
[0], &mac
[0], ETHER_ADDR_LEN
);
239 return qdev
->ops
->filter_config(edev
, &filter_cmd
);
243 qede_mac_addr_add(struct rte_eth_dev
*eth_dev
, struct ether_addr
*mac_addr
,
244 uint32_t index
, __rte_unused
uint32_t pool
)
246 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
247 struct ecore_dev
*edev
= &qdev
->edev
;
250 PMD_INIT_FUNC_TRACE(edev
);
252 if (index
>= qdev
->dev_info
.num_mac_addrs
) {
253 DP_ERR(edev
, "Index %u is above MAC filter limit %u\n",
254 index
, qdev
->dev_info
.num_mac_addrs
);
258 /* Adding macaddr even though promiscuous mode is set */
259 if (rte_eth_promiscuous_get(eth_dev
->data
->port_id
) == 1)
260 DP_INFO(edev
, "Port is in promisc mode, yet adding it\n");
262 /* Add MAC filters according to the unicast secondary macs */
263 rc
= qede_set_ucast_rx_mac(qdev
, QED_FILTER_XCAST_TYPE_ADD
,
264 mac_addr
->addr_bytes
);
266 DP_ERR(edev
, "Unable to add macaddr rc=%d\n", rc
);
270 qede_mac_addr_remove(struct rte_eth_dev
*eth_dev
, uint32_t index
)
272 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
273 struct ecore_dev
*edev
= &qdev
->edev
;
274 struct ether_addr mac_addr
;
277 PMD_INIT_FUNC_TRACE(edev
);
279 if (index
>= qdev
->dev_info
.num_mac_addrs
) {
280 DP_ERR(edev
, "Index %u is above MAC filter limit %u\n",
281 index
, qdev
->dev_info
.num_mac_addrs
);
285 /* Use the index maintained by rte */
286 ether_addr_copy(ð_dev
->data
->mac_addrs
[index
], &mac_addr
);
287 rc
= qede_set_ucast_rx_mac(qdev
, QED_FILTER_XCAST_TYPE_DEL
,
288 mac_addr
.addr_bytes
);
290 DP_ERR(edev
, "Unable to remove macaddr rc=%d\n", rc
);
294 qede_mac_addr_set(struct rte_eth_dev
*eth_dev
, struct ether_addr
*mac_addr
)
296 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
297 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
300 if (IS_VF(edev
) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev
),
301 mac_addr
->addr_bytes
)) {
302 DP_ERR(edev
, "Setting MAC address is not allowed\n");
303 ether_addr_copy(&qdev
->primary_mac
,
304 ð_dev
->data
->mac_addrs
[0]);
308 /* First remove the primary mac */
309 rc
= qede_set_ucast_rx_mac(qdev
, QED_FILTER_XCAST_TYPE_DEL
,
310 qdev
->primary_mac
.addr_bytes
);
313 DP_ERR(edev
, "Unable to remove current macaddr"
314 " Reverting to previous default mac\n");
315 ether_addr_copy(&qdev
->primary_mac
,
316 ð_dev
->data
->mac_addrs
[0]);
321 rc
= qede_set_ucast_rx_mac(qdev
, QED_FILTER_XCAST_TYPE_ADD
,
322 mac_addr
->addr_bytes
);
325 DP_ERR(edev
, "Unable to add new default mac\n");
327 ether_addr_copy(mac_addr
, &qdev
->primary_mac
);
333 static void qede_config_accept_any_vlan(struct qede_dev
*qdev
, bool action
)
335 struct ecore_dev
*edev
= &qdev
->edev
;
336 struct qed_update_vport_params params
= {
338 .accept_any_vlan
= action
,
339 .update_accept_any_vlan_flg
= 1,
343 /* Proceed only if action actually needs to be performed */
344 if (qdev
->accept_any_vlan
== action
)
347 rc
= qdev
->ops
->vport_update(edev
, ¶ms
);
349 DP_ERR(edev
, "Failed to %s accept-any-vlan\n",
350 action
? "enable" : "disable");
352 DP_INFO(edev
, "%s accept-any-vlan\n",
353 action
? "enabled" : "disabled");
354 qdev
->accept_any_vlan
= action
;
358 static int qede_vlan_stripping(struct rte_eth_dev
*eth_dev
, bool set_stripping
)
360 struct qed_update_vport_params vport_update_params
;
361 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
362 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
365 memset(&vport_update_params
, 0, sizeof(vport_update_params
));
366 vport_update_params
.vport_id
= 0;
367 vport_update_params
.update_inner_vlan_removal_flg
= 1;
368 vport_update_params
.inner_vlan_removal_flg
= set_stripping
;
369 rc
= qdev
->ops
->vport_update(edev
, &vport_update_params
);
371 DP_ERR(edev
, "Update V-PORT failed %d\n", rc
);
378 static void qede_vlan_offload_set(struct rte_eth_dev
*eth_dev
, int mask
)
380 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
381 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
382 struct rte_eth_rxmode
*rxmode
= ð_dev
->data
->dev_conf
.rxmode
;
384 if (mask
& ETH_VLAN_STRIP_MASK
) {
385 if (rxmode
->hw_vlan_strip
)
386 (void)qede_vlan_stripping(eth_dev
, 1);
388 (void)qede_vlan_stripping(eth_dev
, 0);
391 if (mask
& ETH_VLAN_FILTER_MASK
) {
392 /* VLAN filtering kicks in when a VLAN is added */
393 if (rxmode
->hw_vlan_filter
) {
394 qede_vlan_filter_set(eth_dev
, 0, 1);
396 if (qdev
->configured_vlans
> 1) { /* Excluding VLAN0 */
397 DP_NOTICE(edev
, false,
398 " Please remove existing VLAN filters"
399 " before disabling VLAN filtering\n");
400 /* Signal app that VLAN filtering is still
403 rxmode
->hw_vlan_filter
= true;
405 qede_vlan_filter_set(eth_dev
, 0, 0);
410 if (mask
& ETH_VLAN_EXTEND_MASK
)
411 DP_INFO(edev
, "No offloads are supported with VLAN Q-in-Q"
412 " and classification is based on outer tag only\n");
414 DP_INFO(edev
, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
415 mask
, rxmode
->hw_vlan_strip
, rxmode
->hw_vlan_filter
);
418 static int qede_set_ucast_rx_vlan(struct qede_dev
*qdev
,
419 enum qed_filter_xcast_params_type opcode
,
422 struct qed_filter_params filter_cmd
;
423 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
425 memset(&filter_cmd
, 0, sizeof(filter_cmd
));
426 filter_cmd
.type
= QED_FILTER_TYPE_UCAST
;
427 filter_cmd
.filter
.ucast
.type
= opcode
;
428 filter_cmd
.filter
.ucast
.vlan_valid
= 1;
429 filter_cmd
.filter
.ucast
.vlan
= vid
;
431 return qdev
->ops
->filter_config(edev
, &filter_cmd
);
434 static int qede_vlan_filter_set(struct rte_eth_dev
*eth_dev
,
435 uint16_t vlan_id
, int on
)
437 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
438 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
439 struct qed_dev_eth_info
*dev_info
= &qdev
->dev_info
;
440 struct qede_vlan_entry
*tmp
= NULL
;
441 struct qede_vlan_entry
*vlan
;
445 if (qdev
->configured_vlans
== dev_info
->num_vlan_filters
) {
446 DP_INFO(edev
, "Reached max VLAN filter limit"
447 " enabling accept_any_vlan\n");
448 qede_config_accept_any_vlan(qdev
, true);
452 SLIST_FOREACH(tmp
, &qdev
->vlan_list_head
, list
) {
453 if (tmp
->vid
== vlan_id
) {
454 DP_ERR(edev
, "VLAN %u already configured\n",
460 vlan
= rte_malloc(NULL
, sizeof(struct qede_vlan_entry
),
461 RTE_CACHE_LINE_SIZE
);
464 DP_ERR(edev
, "Did not allocate memory for VLAN\n");
468 rc
= qede_set_ucast_rx_vlan(qdev
, QED_FILTER_XCAST_TYPE_ADD
,
471 DP_ERR(edev
, "Failed to add VLAN %u rc %d\n", vlan_id
,
476 SLIST_INSERT_HEAD(&qdev
->vlan_list_head
, vlan
, list
);
477 qdev
->configured_vlans
++;
478 DP_INFO(edev
, "VLAN %u added, configured_vlans %u\n",
479 vlan_id
, qdev
->configured_vlans
);
482 SLIST_FOREACH(tmp
, &qdev
->vlan_list_head
, list
) {
483 if (tmp
->vid
== vlan_id
)
488 if (qdev
->configured_vlans
== 0) {
490 "No VLAN filters configured yet\n");
494 DP_ERR(edev
, "VLAN %u not configured\n", vlan_id
);
498 SLIST_REMOVE(&qdev
->vlan_list_head
, tmp
, qede_vlan_entry
, list
);
500 rc
= qede_set_ucast_rx_vlan(qdev
, QED_FILTER_XCAST_TYPE_DEL
,
503 DP_ERR(edev
, "Failed to delete VLAN %u rc %d\n",
506 qdev
->configured_vlans
--;
507 DP_INFO(edev
, "VLAN %u removed configured_vlans %u\n",
508 vlan_id
, qdev
->configured_vlans
);
515 static int qede_init_vport(struct qede_dev
*qdev
)
517 struct ecore_dev
*edev
= &qdev
->edev
;
518 struct qed_start_vport_params start
= {0};
521 start
.remove_inner_vlan
= 1;
522 start
.gro_enable
= 0;
523 start
.mtu
= ETHER_MTU
+ QEDE_ETH_OVERHEAD
;
525 start
.drop_ttl0
= false;
526 start
.clear_stats
= 1;
527 start
.handle_ptp_pkts
= 0;
529 rc
= qdev
->ops
->vport_start(edev
, &start
);
531 DP_ERR(edev
, "Start V-PORT failed %d\n", rc
);
536 "Start vport ramrod passed, vport_id = %d, MTU = %u\n",
537 start
.vport_id
, ETHER_MTU
);
542 static int qede_dev_configure(struct rte_eth_dev
*eth_dev
)
544 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
545 struct ecore_dev
*edev
= &qdev
->edev
;
546 struct rte_eth_rxmode
*rxmode
= ð_dev
->data
->dev_conf
.rxmode
;
549 PMD_INIT_FUNC_TRACE(edev
);
551 /* Check requirements for 100G mode */
552 if (edev
->num_hwfns
> 1) {
553 if (eth_dev
->data
->nb_rx_queues
< 2 ||
554 eth_dev
->data
->nb_tx_queues
< 2) {
555 DP_NOTICE(edev
, false,
556 "100G mode needs min. 2 RX/TX queues\n");
560 if ((eth_dev
->data
->nb_rx_queues
% 2 != 0) ||
561 (eth_dev
->data
->nb_tx_queues
% 2 != 0)) {
562 DP_NOTICE(edev
, false,
563 "100G mode needs even no. of RX/TX queues\n");
568 /* Sanity checks and throw warnings */
569 if (rxmode
->enable_scatter
== 1)
570 eth_dev
->data
->scattered_rx
= 1;
572 if (rxmode
->enable_lro
== 1) {
573 DP_INFO(edev
, "LRO is not supported\n");
577 if (!rxmode
->hw_strip_crc
)
578 DP_INFO(edev
, "L2 CRC stripping is always enabled in hw\n");
580 if (!rxmode
->hw_ip_checksum
)
581 DP_INFO(edev
, "IP/UDP/TCP checksum offload is always enabled "
584 /* Check for the port restart case */
585 if (qdev
->state
!= QEDE_DEV_INIT
) {
586 rc
= qdev
->ops
->vport_stop(edev
, 0);
589 qede_dealloc_fp_resc(eth_dev
);
592 qdev
->fp_num_tx
= eth_dev
->data
->nb_tx_queues
;
593 qdev
->fp_num_rx
= eth_dev
->data
->nb_rx_queues
;
594 qdev
->num_queues
= qdev
->fp_num_tx
+ qdev
->fp_num_rx
;
596 /* Fastpath status block should be initialized before sending
597 * VPORT-START in the case of VF. Anyway, do it for both VF/PF.
599 rc
= qede_alloc_fp_resc(qdev
);
603 /* Issue VPORT-START with default config values to allow
604 * other port configurations early on.
606 rc
= qede_init_vport(qdev
);
610 SLIST_INIT(&qdev
->vlan_list_head
);
612 /* Add primary mac for PF */
614 qede_mac_addr_set(eth_dev
, &qdev
->primary_mac
);
616 /* Enable VLAN offloads by default */
617 qede_vlan_offload_set(eth_dev
, ETH_VLAN_STRIP_MASK
|
618 ETH_VLAN_FILTER_MASK
|
619 ETH_VLAN_EXTEND_MASK
);
621 qdev
->state
= QEDE_DEV_CONFIG
;
623 DP_INFO(edev
, "Allocated RSS=%d TSS=%d (with CoS=%d)\n",
624 (int)QEDE_RSS_COUNT(qdev
), (int)QEDE_TSS_COUNT(qdev
),
630 /* Info about HW descriptor ring limitations */
631 static const struct rte_eth_desc_lim qede_rx_desc_lim
= {
632 .nb_max
= NUM_RX_BDS_MAX
,
634 .nb_align
= 128 /* lowest common multiple */
637 static const struct rte_eth_desc_lim qede_tx_desc_lim
= {
638 .nb_max
= NUM_TX_BDS_MAX
,
644 qede_dev_info_get(struct rte_eth_dev
*eth_dev
,
645 struct rte_eth_dev_info
*dev_info
)
647 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
648 struct ecore_dev
*edev
= &qdev
->edev
;
649 struct qed_link_output link
;
650 uint32_t speed_cap
= 0;
652 PMD_INIT_FUNC_TRACE(edev
);
654 dev_info
->min_rx_bufsize
= (uint32_t)(ETHER_MIN_MTU
+
656 dev_info
->max_rx_pktlen
= (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN
;
657 dev_info
->rx_desc_lim
= qede_rx_desc_lim
;
658 dev_info
->tx_desc_lim
= qede_tx_desc_lim
;
659 dev_info
->max_rx_queues
= (uint16_t)QEDE_MAX_RSS_CNT(qdev
);
660 dev_info
->max_tx_queues
= dev_info
->max_rx_queues
;
661 dev_info
->max_mac_addrs
= qdev
->dev_info
.num_mac_addrs
;
663 dev_info
->max_vfs
= 0;
665 dev_info
->max_vfs
= (uint16_t)NUM_OF_VFS(&qdev
->edev
);
666 dev_info
->driver_name
= qdev
->drv_ver
;
667 dev_info
->reta_size
= ECORE_RSS_IND_TABLE_SIZE
;
668 dev_info
->flow_type_rss_offloads
= (uint64_t)QEDE_RSS_OFFLOAD_ALL
;
670 dev_info
->default_txconf
= (struct rte_eth_txconf
) {
671 .txq_flags
= QEDE_TXQ_FLAGS
,
674 dev_info
->rx_offload_capa
= (DEV_RX_OFFLOAD_VLAN_STRIP
|
675 DEV_RX_OFFLOAD_IPV4_CKSUM
|
676 DEV_RX_OFFLOAD_UDP_CKSUM
|
677 DEV_RX_OFFLOAD_TCP_CKSUM
);
678 dev_info
->tx_offload_capa
= (DEV_TX_OFFLOAD_VLAN_INSERT
|
679 DEV_TX_OFFLOAD_IPV4_CKSUM
|
680 DEV_TX_OFFLOAD_UDP_CKSUM
|
681 DEV_TX_OFFLOAD_TCP_CKSUM
);
683 memset(&link
, 0, sizeof(struct qed_link_output
));
684 qdev
->ops
->common
->get_link(edev
, &link
);
685 if (link
.adv_speed
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
)
686 speed_cap
|= ETH_LINK_SPEED_1G
;
687 if (link
.adv_speed
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
)
688 speed_cap
|= ETH_LINK_SPEED_10G
;
689 if (link
.adv_speed
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
)
690 speed_cap
|= ETH_LINK_SPEED_25G
;
691 if (link
.adv_speed
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
)
692 speed_cap
|= ETH_LINK_SPEED_40G
;
693 if (link
.adv_speed
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
)
694 speed_cap
|= ETH_LINK_SPEED_50G
;
695 if (link
.adv_speed
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
)
696 speed_cap
|= ETH_LINK_SPEED_100G
;
697 dev_info
->speed_capa
= speed_cap
;
700 /* return 0 means link status changed, -1 means not changed */
702 qede_link_update(struct rte_eth_dev
*eth_dev
, __rte_unused
int wait_to_complete
)
704 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
705 struct ecore_dev
*edev
= &qdev
->edev
;
706 uint16_t link_duplex
;
707 struct qed_link_output link
;
708 struct rte_eth_link
*curr
= ð_dev
->data
->dev_link
;
710 memset(&link
, 0, sizeof(struct qed_link_output
));
711 qdev
->ops
->common
->get_link(edev
, &link
);
714 curr
->link_speed
= link
.speed
;
717 switch (link
.duplex
) {
718 case QEDE_DUPLEX_HALF
:
719 link_duplex
= ETH_LINK_HALF_DUPLEX
;
721 case QEDE_DUPLEX_FULL
:
722 link_duplex
= ETH_LINK_FULL_DUPLEX
;
724 case QEDE_DUPLEX_UNKNOWN
:
728 curr
->link_duplex
= link_duplex
;
731 curr
->link_status
= (link
.link_up
) ? ETH_LINK_UP
: ETH_LINK_DOWN
;
734 curr
->link_autoneg
= (link
.supported_caps
& QEDE_SUPPORTED_AUTONEG
) ?
735 ETH_LINK_AUTONEG
: ETH_LINK_FIXED
;
737 DP_INFO(edev
, "Link - Speed %u Mode %u AN %u Status %u\n",
738 curr
->link_speed
, curr
->link_duplex
,
739 curr
->link_autoneg
, curr
->link_status
);
741 /* return 0 means link status changed, -1 means not changed */
742 return ((curr
->link_status
== link
.link_up
) ? -1 : 0);
746 qede_rx_mode_setting(struct rte_eth_dev
*eth_dev
,
747 enum qed_filter_rx_mode_type accept_flags
)
749 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
750 struct ecore_dev
*edev
= &qdev
->edev
;
751 struct qed_filter_params rx_mode
;
753 DP_INFO(edev
, "%s mode %u\n", __func__
, accept_flags
);
755 memset(&rx_mode
, 0, sizeof(struct qed_filter_params
));
756 rx_mode
.type
= QED_FILTER_TYPE_RX_MODE
;
757 rx_mode
.filter
.accept_flags
= accept_flags
;
758 qdev
->ops
->filter_config(edev
, &rx_mode
);
761 static void qede_promiscuous_enable(struct rte_eth_dev
*eth_dev
)
763 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
764 struct ecore_dev
*edev
= &qdev
->edev
;
766 PMD_INIT_FUNC_TRACE(edev
);
768 enum qed_filter_rx_mode_type type
= QED_FILTER_RX_MODE_TYPE_PROMISC
;
770 if (rte_eth_allmulticast_get(eth_dev
->data
->port_id
) == 1)
771 type
|= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC
;
773 qede_rx_mode_setting(eth_dev
, type
);
776 static void qede_promiscuous_disable(struct rte_eth_dev
*eth_dev
)
778 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
779 struct ecore_dev
*edev
= &qdev
->edev
;
781 PMD_INIT_FUNC_TRACE(edev
);
783 if (rte_eth_allmulticast_get(eth_dev
->data
->port_id
) == 1)
784 qede_rx_mode_setting(eth_dev
,
785 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC
);
787 qede_rx_mode_setting(eth_dev
, QED_FILTER_RX_MODE_TYPE_REGULAR
);
790 static void qede_poll_sp_sb_cb(void *param
)
792 struct rte_eth_dev
*eth_dev
= (struct rte_eth_dev
*)param
;
793 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
794 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
797 qede_interrupt_action(ECORE_LEADING_HWFN(edev
));
798 qede_interrupt_action(&edev
->hwfns
[1]);
800 rc
= rte_eal_alarm_set(timer_period
* US_PER_S
,
804 DP_ERR(edev
, "Unable to start periodic"
805 " timer rc %d\n", rc
);
806 assert(false && "Unable to start periodic timer");
810 static void qede_dev_close(struct rte_eth_dev
*eth_dev
)
812 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
813 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
816 PMD_INIT_FUNC_TRACE(edev
);
818 /* dev_stop() shall cleanup fp resources in hw but without releasing
819 * dma memories and sw structures so that dev_start() can be called
820 * by the app without reconfiguration. However, in dev_close() we
821 * can release all the resources and device can be brought up newly
823 if (qdev
->state
!= QEDE_DEV_STOP
)
824 qede_dev_stop(eth_dev
);
826 DP_INFO(edev
, "Device is already stopped\n");
828 rc
= qdev
->ops
->vport_stop(edev
, 0);
830 DP_ERR(edev
, "Failed to stop VPORT\n");
832 qede_dealloc_fp_resc(eth_dev
);
834 qdev
->ops
->common
->slowpath_stop(edev
);
836 qdev
->ops
->common
->remove(edev
);
838 rte_intr_disable(ð_dev
->pci_dev
->intr_handle
);
840 rte_intr_callback_unregister(ð_dev
->pci_dev
->intr_handle
,
841 qede_interrupt_handler
, (void *)eth_dev
);
843 if (edev
->num_hwfns
> 1)
844 rte_eal_alarm_cancel(qede_poll_sp_sb_cb
, (void *)eth_dev
);
846 qdev
->state
= QEDE_DEV_INIT
; /* Go back to init state */
850 qede_get_stats(struct rte_eth_dev
*eth_dev
, struct rte_eth_stats
*eth_stats
)
852 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
853 struct ecore_dev
*edev
= &qdev
->edev
;
854 struct ecore_eth_stats stats
;
855 unsigned int i
= 0, j
= 0, qid
;
856 struct qede_tx_queue
*txq
;
858 qdev
->ops
->get_vport_stats(edev
, &stats
);
861 eth_stats
->ipackets
= stats
.rx_ucast_pkts
+
862 stats
.rx_mcast_pkts
+ stats
.rx_bcast_pkts
;
864 eth_stats
->ibytes
= stats
.rx_ucast_bytes
+
865 stats
.rx_mcast_bytes
+ stats
.rx_bcast_bytes
;
867 eth_stats
->ierrors
= stats
.rx_crc_errors
+
868 stats
.rx_align_errors
+
869 stats
.rx_carrier_errors
+
870 stats
.rx_oversize_packets
+
871 stats
.rx_jabbers
+ stats
.rx_undersize_packets
;
873 eth_stats
->rx_nombuf
= stats
.no_buff_discards
;
875 eth_stats
->imissed
= stats
.mftag_filter_discards
+
876 stats
.mac_filter_discards
+
877 stats
.no_buff_discards
+ stats
.brb_truncates
+ stats
.brb_discards
;
880 eth_stats
->opackets
= stats
.tx_ucast_pkts
+
881 stats
.tx_mcast_pkts
+ stats
.tx_bcast_pkts
;
883 eth_stats
->obytes
= stats
.tx_ucast_bytes
+
884 stats
.tx_mcast_bytes
+ stats
.tx_bcast_bytes
;
886 eth_stats
->oerrors
= stats
.tx_err_drop_pkts
;
889 for (qid
= 0; qid
< QEDE_QUEUE_CNT(qdev
); qid
++) {
890 if (qdev
->fp_array
[qid
].type
& QEDE_FASTPATH_RX
) {
891 eth_stats
->q_ipackets
[i
] =
893 ((char *)(qdev
->fp_array
[(qid
)].rxq
)) +
894 offsetof(struct qede_rx_queue
,
896 eth_stats
->q_errors
[i
] =
898 ((char *)(qdev
->fp_array
[(qid
)].rxq
)) +
899 offsetof(struct qede_rx_queue
,
902 ((char *)(qdev
->fp_array
[(qid
)].rxq
)) +
903 offsetof(struct qede_rx_queue
,
908 if (qdev
->fp_array
[qid
].type
& QEDE_FASTPATH_TX
) {
909 txq
= qdev
->fp_array
[(qid
)].txqs
[0];
910 eth_stats
->q_opackets
[j
] =
911 *((uint64_t *)(uintptr_t)
912 (((uint64_t)(uintptr_t)(txq
)) +
913 offsetof(struct qede_tx_queue
,
921 qede_get_xstats_count(struct qede_dev
*qdev
) {
922 return RTE_DIM(qede_xstats_strings
) +
923 (RTE_DIM(qede_rxq_xstats_strings
) * QEDE_RSS_COUNT(qdev
));
927 qede_get_xstats_names(__rte_unused
struct rte_eth_dev
*dev
,
928 struct rte_eth_xstat_name
*xstats_names
, unsigned limit
)
930 struct qede_dev
*qdev
= dev
->data
->dev_private
;
931 const unsigned int stat_cnt
= qede_get_xstats_count(qdev
);
932 unsigned int i
, qid
, stat_idx
= 0;
934 if (xstats_names
!= NULL
) {
935 for (i
= 0; i
< RTE_DIM(qede_xstats_strings
); i
++) {
936 snprintf(xstats_names
[stat_idx
].name
,
937 sizeof(xstats_names
[stat_idx
].name
),
939 qede_xstats_strings
[i
].name
);
943 for (qid
= 0; qid
< QEDE_RSS_COUNT(qdev
); qid
++) {
944 for (i
= 0; i
< RTE_DIM(qede_rxq_xstats_strings
); i
++) {
945 snprintf(xstats_names
[stat_idx
].name
,
946 sizeof(xstats_names
[stat_idx
].name
),
948 qede_rxq_xstats_strings
[i
].name
, qid
,
949 qede_rxq_xstats_strings
[i
].name
+ 4);
959 qede_get_xstats(struct rte_eth_dev
*dev
, struct rte_eth_xstat
*xstats
,
962 struct qede_dev
*qdev
= dev
->data
->dev_private
;
963 struct ecore_dev
*edev
= &qdev
->edev
;
964 struct ecore_eth_stats stats
;
965 const unsigned int num
= qede_get_xstats_count(qdev
);
966 unsigned int i
, qid
, stat_idx
= 0;
971 qdev
->ops
->get_vport_stats(edev
, &stats
);
973 for (i
= 0; i
< RTE_DIM(qede_xstats_strings
); i
++) {
974 xstats
[stat_idx
].value
= *(uint64_t *)(((char *)&stats
) +
975 qede_xstats_strings
[i
].offset
);
979 for (qid
= 0; qid
< QEDE_QUEUE_CNT(qdev
); qid
++) {
980 if (qdev
->fp_array
[qid
].type
& QEDE_FASTPATH_RX
) {
981 for (i
= 0; i
< RTE_DIM(qede_rxq_xstats_strings
); i
++) {
982 xstats
[stat_idx
].value
= *(uint64_t *)(
983 ((char *)(qdev
->fp_array
[(qid
)].rxq
)) +
984 qede_rxq_xstats_strings
[i
].offset
);
994 qede_reset_xstats(struct rte_eth_dev
*dev
)
996 struct qede_dev
*qdev
= dev
->data
->dev_private
;
997 struct ecore_dev
*edev
= &qdev
->edev
;
999 ecore_reset_vport_stats(edev
);
1002 int qede_dev_set_link_state(struct rte_eth_dev
*eth_dev
, bool link_up
)
1004 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
1005 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
1006 struct qed_link_params link_params
;
1009 DP_INFO(edev
, "setting link state %d\n", link_up
);
1010 memset(&link_params
, 0, sizeof(link_params
));
1011 link_params
.link_up
= link_up
;
1012 rc
= qdev
->ops
->common
->set_link(edev
, &link_params
);
1013 if (rc
!= ECORE_SUCCESS
)
1014 DP_ERR(edev
, "Unable to set link state %d\n", link_up
);
1019 static int qede_dev_set_link_up(struct rte_eth_dev
*eth_dev
)
1021 return qede_dev_set_link_state(eth_dev
, true);
1024 static int qede_dev_set_link_down(struct rte_eth_dev
*eth_dev
)
1026 return qede_dev_set_link_state(eth_dev
, false);
1029 static void qede_reset_stats(struct rte_eth_dev
*eth_dev
)
1031 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
1032 struct ecore_dev
*edev
= &qdev
->edev
;
1034 ecore_reset_vport_stats(edev
);
1037 static void qede_allmulticast_enable(struct rte_eth_dev
*eth_dev
)
1039 enum qed_filter_rx_mode_type type
=
1040 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC
;
1042 if (rte_eth_promiscuous_get(eth_dev
->data
->port_id
) == 1)
1043 type
|= QED_FILTER_RX_MODE_TYPE_PROMISC
;
1045 qede_rx_mode_setting(eth_dev
, type
);
1048 static void qede_allmulticast_disable(struct rte_eth_dev
*eth_dev
)
1050 if (rte_eth_promiscuous_get(eth_dev
->data
->port_id
) == 1)
1051 qede_rx_mode_setting(eth_dev
, QED_FILTER_RX_MODE_TYPE_PROMISC
);
1053 qede_rx_mode_setting(eth_dev
, QED_FILTER_RX_MODE_TYPE_REGULAR
);
1056 static int qede_flow_ctrl_set(struct rte_eth_dev
*eth_dev
,
1057 struct rte_eth_fc_conf
*fc_conf
)
1059 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
1060 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
1061 struct qed_link_output current_link
;
1062 struct qed_link_params params
;
1064 memset(¤t_link
, 0, sizeof(current_link
));
1065 qdev
->ops
->common
->get_link(edev
, ¤t_link
);
1067 memset(¶ms
, 0, sizeof(params
));
1068 params
.override_flags
|= QED_LINK_OVERRIDE_PAUSE_CONFIG
;
1069 if (fc_conf
->autoneg
) {
1070 if (!(current_link
.supported_caps
& QEDE_SUPPORTED_AUTONEG
)) {
1071 DP_ERR(edev
, "Autoneg not supported\n");
1074 params
.pause_config
|= QED_LINK_PAUSE_AUTONEG_ENABLE
;
1077 /* Pause is assumed to be supported (SUPPORTED_Pause) */
1078 if (fc_conf
->mode
== RTE_FC_FULL
)
1079 params
.pause_config
|= (QED_LINK_PAUSE_TX_ENABLE
|
1080 QED_LINK_PAUSE_RX_ENABLE
);
1081 if (fc_conf
->mode
== RTE_FC_TX_PAUSE
)
1082 params
.pause_config
|= QED_LINK_PAUSE_TX_ENABLE
;
1083 if (fc_conf
->mode
== RTE_FC_RX_PAUSE
)
1084 params
.pause_config
|= QED_LINK_PAUSE_RX_ENABLE
;
1086 params
.link_up
= true;
1087 (void)qdev
->ops
->common
->set_link(edev
, ¶ms
);
1092 static int qede_flow_ctrl_get(struct rte_eth_dev
*eth_dev
,
1093 struct rte_eth_fc_conf
*fc_conf
)
1095 struct qede_dev
*qdev
= QEDE_INIT_QDEV(eth_dev
);
1096 struct ecore_dev
*edev
= QEDE_INIT_EDEV(qdev
);
1097 struct qed_link_output current_link
;
1099 memset(¤t_link
, 0, sizeof(current_link
));
1100 qdev
->ops
->common
->get_link(edev
, ¤t_link
);
1102 if (current_link
.pause_config
& QED_LINK_PAUSE_AUTONEG_ENABLE
)
1103 fc_conf
->autoneg
= true;
1105 if (current_link
.pause_config
& (QED_LINK_PAUSE_RX_ENABLE
|
1106 QED_LINK_PAUSE_TX_ENABLE
))
1107 fc_conf
->mode
= RTE_FC_FULL
;
1108 else if (current_link
.pause_config
& QED_LINK_PAUSE_RX_ENABLE
)
1109 fc_conf
->mode
= RTE_FC_RX_PAUSE
;
1110 else if (current_link
.pause_config
& QED_LINK_PAUSE_TX_ENABLE
)
1111 fc_conf
->mode
= RTE_FC_TX_PAUSE
;
1113 fc_conf
->mode
= RTE_FC_NONE
;
1118 static const uint32_t *
1119 qede_dev_supported_ptypes_get(struct rte_eth_dev
*eth_dev
)
1121 static const uint32_t ptypes
[] = {
1127 if (eth_dev
->rx_pkt_burst
== qede_recv_pkts
)
1133 void qede_init_rss_caps(uint8_t *rss_caps
, uint64_t hf
)
1136 *rss_caps
|= (hf
& ETH_RSS_IPV4
) ? ECORE_RSS_IPV4
: 0;
1137 *rss_caps
|= (hf
& ETH_RSS_IPV6
) ? ECORE_RSS_IPV6
: 0;
1138 *rss_caps
|= (hf
& ETH_RSS_IPV6_EX
) ? ECORE_RSS_IPV6
: 0;
1139 *rss_caps
|= (hf
& ETH_RSS_NONFRAG_IPV4_TCP
) ? ECORE_RSS_IPV4_TCP
: 0;
1140 *rss_caps
|= (hf
& ETH_RSS_NONFRAG_IPV6_TCP
) ? ECORE_RSS_IPV6_TCP
: 0;
1141 *rss_caps
|= (hf
& ETH_RSS_IPV6_TCP_EX
) ? ECORE_RSS_IPV6_TCP
: 0;
1144 static int qede_rss_hash_update(struct rte_eth_dev
*eth_dev
,
1145 struct rte_eth_rss_conf
*rss_conf
)
1147 struct qed_update_vport_params vport_update_params
;
1148 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
1149 struct ecore_dev
*edev
= &qdev
->edev
;
1150 uint32_t *key
= (uint32_t *)rss_conf
->rss_key
;
1151 uint64_t hf
= rss_conf
->rss_hf
;
1154 memset(&vport_update_params
, 0, sizeof(vport_update_params
));
1158 qede_init_rss_caps(&qdev
->rss_params
.rss_caps
, hf
);
1159 memcpy(&vport_update_params
.rss_params
, &qdev
->rss_params
,
1160 sizeof(vport_update_params
.rss_params
));
1162 memcpy(qdev
->rss_params
.rss_key
, rss_conf
->rss_key
,
1163 rss_conf
->rss_key_len
);
1164 vport_update_params
.update_rss_flg
= 1;
1165 qdev
->rss_enabled
= 1;
1168 qdev
->rss_enabled
= 0;
1171 /* If the mapping doesn't fit any supported, return */
1172 if (qdev
->rss_params
.rss_caps
== 0 && hf
!= 0)
1175 DP_INFO(edev
, "%s\n", (vport_update_params
.update_rss_flg
) ?
1176 "Enabling RSS" : "Disabling RSS");
1178 vport_update_params
.vport_id
= 0;
1180 return qdev
->ops
->vport_update(edev
, &vport_update_params
);
1183 int qede_rss_hash_conf_get(struct rte_eth_dev
*eth_dev
,
1184 struct rte_eth_rss_conf
*rss_conf
)
1186 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
1189 if (rss_conf
->rss_key_len
< sizeof(qdev
->rss_params
.rss_key
))
1192 if (rss_conf
->rss_key
)
1193 memcpy(rss_conf
->rss_key
, qdev
->rss_params
.rss_key
,
1194 sizeof(qdev
->rss_params
.rss_key
));
1197 hf
|= (qdev
->rss_params
.rss_caps
& ECORE_RSS_IPV4
) ?
1199 hf
|= (qdev
->rss_params
.rss_caps
& ECORE_RSS_IPV6
) ?
1201 hf
|= (qdev
->rss_params
.rss_caps
& ECORE_RSS_IPV6
) ?
1202 ETH_RSS_IPV6_EX
: 0;
1203 hf
|= (qdev
->rss_params
.rss_caps
& ECORE_RSS_IPV4_TCP
) ?
1204 ETH_RSS_NONFRAG_IPV4_TCP
: 0;
1205 hf
|= (qdev
->rss_params
.rss_caps
& ECORE_RSS_IPV6_TCP
) ?
1206 ETH_RSS_NONFRAG_IPV6_TCP
: 0;
1207 hf
|= (qdev
->rss_params
.rss_caps
& ECORE_RSS_IPV6_TCP
) ?
1208 ETH_RSS_IPV6_TCP_EX
: 0;
1210 rss_conf
->rss_hf
= hf
;
1215 static int qede_rss_reta_update(struct rte_eth_dev
*eth_dev
,
1216 struct rte_eth_rss_reta_entry64
*reta_conf
,
1219 struct qed_update_vport_params vport_update_params
;
1220 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
1221 struct ecore_dev
*edev
= &qdev
->edev
;
1222 uint16_t i
, idx
, shift
;
1224 if (reta_size
> ETH_RSS_RETA_SIZE_128
) {
1225 DP_ERR(edev
, "reta_size %d is not supported by hardware\n",
1230 memset(&vport_update_params
, 0, sizeof(vport_update_params
));
1231 memcpy(&vport_update_params
.rss_params
, &qdev
->rss_params
,
1232 sizeof(vport_update_params
.rss_params
));
1234 for (i
= 0; i
< reta_size
; i
++) {
1235 idx
= i
/ RTE_RETA_GROUP_SIZE
;
1236 shift
= i
% RTE_RETA_GROUP_SIZE
;
1237 if (reta_conf
[idx
].mask
& (1ULL << shift
)) {
1238 uint8_t entry
= reta_conf
[idx
].reta
[shift
];
1239 qdev
->rss_params
.rss_ind_table
[i
] = entry
;
1243 vport_update_params
.update_rss_flg
= 1;
1244 vport_update_params
.vport_id
= 0;
1246 return qdev
->ops
->vport_update(edev
, &vport_update_params
);
1249 int qede_rss_reta_query(struct rte_eth_dev
*eth_dev
,
1250 struct rte_eth_rss_reta_entry64
*reta_conf
,
1253 struct qede_dev
*qdev
= eth_dev
->data
->dev_private
;
1254 uint16_t i
, idx
, shift
;
1256 if (reta_size
> ETH_RSS_RETA_SIZE_128
) {
1257 struct ecore_dev
*edev
= &qdev
->edev
;
1258 DP_ERR(edev
, "reta_size %d is not supported\n",
1262 for (i
= 0; i
< reta_size
; i
++) {
1263 idx
= i
/ RTE_RETA_GROUP_SIZE
;
1264 shift
= i
% RTE_RETA_GROUP_SIZE
;
1265 if (reta_conf
[idx
].mask
& (1ULL << shift
)) {
1266 uint8_t entry
= qdev
->rss_params
.rss_ind_table
[i
];
1267 reta_conf
[idx
].reta
[shift
] = entry
;
1274 int qede_set_mtu(struct rte_eth_dev
*dev
, uint16_t mtu
)
1276 uint32_t frame_size
;
1277 struct qede_dev
*qdev
= dev
->data
->dev_private
;
1278 struct rte_eth_dev_info dev_info
= {0};
1280 qede_dev_info_get(dev
, &dev_info
);
1283 frame_size
= mtu
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
+ 4;
1285 if ((mtu
< ETHER_MIN_MTU
) || (frame_size
> dev_info
.max_rx_pktlen
))
1288 if (!dev
->data
->scattered_rx
&&
1289 frame_size
> dev
->data
->min_rx_buf_size
- RTE_PKTMBUF_HEADROOM
)
1292 if (frame_size
> ETHER_MAX_LEN
)
1293 dev
->data
->dev_conf
.rxmode
.jumbo_frame
= 1;
1295 dev
->data
->dev_conf
.rxmode
.jumbo_frame
= 0;
1297 /* update max frame size */
1298 dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
= frame_size
;
1301 qede_dev_start(dev
);
1306 static const struct eth_dev_ops qede_eth_dev_ops
= {
1307 .dev_configure
= qede_dev_configure
,
1308 .dev_infos_get
= qede_dev_info_get
,
1309 .rx_queue_setup
= qede_rx_queue_setup
,
1310 .rx_queue_release
= qede_rx_queue_release
,
1311 .tx_queue_setup
= qede_tx_queue_setup
,
1312 .tx_queue_release
= qede_tx_queue_release
,
1313 .dev_start
= qede_dev_start
,
1314 .dev_set_link_up
= qede_dev_set_link_up
,
1315 .dev_set_link_down
= qede_dev_set_link_down
,
1316 .link_update
= qede_link_update
,
1317 .promiscuous_enable
= qede_promiscuous_enable
,
1318 .promiscuous_disable
= qede_promiscuous_disable
,
1319 .allmulticast_enable
= qede_allmulticast_enable
,
1320 .allmulticast_disable
= qede_allmulticast_disable
,
1321 .dev_stop
= qede_dev_stop
,
1322 .dev_close
= qede_dev_close
,
1323 .stats_get
= qede_get_stats
,
1324 .stats_reset
= qede_reset_stats
,
1325 .xstats_get
= qede_get_xstats
,
1326 .xstats_reset
= qede_reset_xstats
,
1327 .xstats_get_names
= qede_get_xstats_names
,
1328 .mac_addr_add
= qede_mac_addr_add
,
1329 .mac_addr_remove
= qede_mac_addr_remove
,
1330 .mac_addr_set
= qede_mac_addr_set
,
1331 .vlan_offload_set
= qede_vlan_offload_set
,
1332 .vlan_filter_set
= qede_vlan_filter_set
,
1333 .flow_ctrl_set
= qede_flow_ctrl_set
,
1334 .flow_ctrl_get
= qede_flow_ctrl_get
,
1335 .dev_supported_ptypes_get
= qede_dev_supported_ptypes_get
,
1336 .rss_hash_update
= qede_rss_hash_update
,
1337 .rss_hash_conf_get
= qede_rss_hash_conf_get
,
1338 .reta_update
= qede_rss_reta_update
,
1339 .reta_query
= qede_rss_reta_query
,
1340 .mtu_set
= qede_set_mtu
,
1343 static const struct eth_dev_ops qede_eth_vf_dev_ops
= {
1344 .dev_configure
= qede_dev_configure
,
1345 .dev_infos_get
= qede_dev_info_get
,
1346 .rx_queue_setup
= qede_rx_queue_setup
,
1347 .rx_queue_release
= qede_rx_queue_release
,
1348 .tx_queue_setup
= qede_tx_queue_setup
,
1349 .tx_queue_release
= qede_tx_queue_release
,
1350 .dev_start
= qede_dev_start
,
1351 .dev_set_link_up
= qede_dev_set_link_up
,
1352 .dev_set_link_down
= qede_dev_set_link_down
,
1353 .link_update
= qede_link_update
,
1354 .promiscuous_enable
= qede_promiscuous_enable
,
1355 .promiscuous_disable
= qede_promiscuous_disable
,
1356 .allmulticast_enable
= qede_allmulticast_enable
,
1357 .allmulticast_disable
= qede_allmulticast_disable
,
1358 .dev_stop
= qede_dev_stop
,
1359 .dev_close
= qede_dev_close
,
1360 .stats_get
= qede_get_stats
,
1361 .stats_reset
= qede_reset_stats
,
1362 .xstats_get
= qede_get_xstats
,
1363 .xstats_reset
= qede_reset_xstats
,
1364 .xstats_get_names
= qede_get_xstats_names
,
1365 .vlan_offload_set
= qede_vlan_offload_set
,
1366 .vlan_filter_set
= qede_vlan_filter_set
,
1367 .dev_supported_ptypes_get
= qede_dev_supported_ptypes_get
,
1368 .rss_hash_update
= qede_rss_hash_update
,
1369 .rss_hash_conf_get
= qede_rss_hash_conf_get
,
1370 .reta_update
= qede_rss_reta_update
,
1371 .reta_query
= qede_rss_reta_query
,
1372 .mtu_set
= qede_set_mtu
,
1375 static void qede_update_pf_params(struct ecore_dev
*edev
)
1377 struct ecore_pf_params pf_params
;
1379 memset(&pf_params
, 0, sizeof(struct ecore_pf_params
));
1380 pf_params
.eth_pf_params
.num_cons
= 64;
1381 qed_ops
->common
->update_pf_params(edev
, &pf_params
);
1384 static int qede_common_dev_init(struct rte_eth_dev
*eth_dev
, bool is_vf
)
1386 struct rte_pci_device
*pci_dev
;
1387 struct rte_pci_addr pci_addr
;
1388 struct qede_dev
*adapter
;
1389 struct ecore_dev
*edev
;
1390 struct qed_dev_eth_info dev_info
;
1391 struct qed_slowpath_params params
;
1392 static bool do_once
= true;
1393 uint8_t bulletin_change
;
1394 uint8_t vf_mac
[ETHER_ADDR_LEN
];
1395 uint8_t is_mac_forced
;
1397 /* Fix up ecore debug level */
1398 uint32_t dp_module
= ~0 & ~ECORE_MSG_HW
;
1399 uint8_t dp_level
= ECORE_LEVEL_VERBOSE
;
1400 uint32_t max_mac_addrs
;
1403 /* Extract key data structures */
1404 adapter
= eth_dev
->data
->dev_private
;
1405 edev
= &adapter
->edev
;
1406 pci_addr
= eth_dev
->pci_dev
->addr
;
1408 PMD_INIT_FUNC_TRACE(edev
);
1410 snprintf(edev
->name
, NAME_SIZE
, PCI_SHORT_PRI_FMT
":dpdk-port-%u",
1411 pci_addr
.bus
, pci_addr
.devid
, pci_addr
.function
,
1412 eth_dev
->data
->port_id
);
1414 eth_dev
->rx_pkt_burst
= qede_recv_pkts
;
1415 eth_dev
->tx_pkt_burst
= qede_xmit_pkts
;
1417 if (rte_eal_process_type() != RTE_PROC_PRIMARY
) {
1418 DP_NOTICE(edev
, false,
1419 "Skipping device init from secondary process\n");
1423 pci_dev
= eth_dev
->pci_dev
;
1425 rte_eth_copy_pci_info(eth_dev
, pci_dev
);
1427 qed_ops
= qed_get_eth_ops();
1429 DP_ERR(edev
, "Failed to get qed_eth_ops_pass\n");
1433 DP_INFO(edev
, "Starting qede probe\n");
1435 rc
= qed_ops
->common
->probe(edev
, pci_dev
, QED_PROTOCOL_ETH
,
1436 dp_module
, dp_level
, is_vf
);
1439 DP_ERR(edev
, "qede probe failed rc %d\n", rc
);
1443 qede_update_pf_params(edev
);
1445 rte_intr_callback_register(ð_dev
->pci_dev
->intr_handle
,
1446 qede_interrupt_handler
, (void *)eth_dev
);
1448 if (rte_intr_enable(ð_dev
->pci_dev
->intr_handle
)) {
1449 DP_ERR(edev
, "rte_intr_enable() failed\n");
1453 /* Start the Slowpath-process */
1454 memset(¶ms
, 0, sizeof(struct qed_slowpath_params
));
1455 params
.int_mode
= ECORE_INT_MODE_MSIX
;
1456 params
.drv_major
= QEDE_PMD_VERSION_MAJOR
;
1457 params
.drv_minor
= QEDE_PMD_VERSION_MINOR
;
1458 params
.drv_rev
= QEDE_PMD_VERSION_REVISION
;
1459 params
.drv_eng
= QEDE_PMD_VERSION_PATCH
;
1460 strncpy((char *)params
.name
, QEDE_PMD_VER_PREFIX
,
1461 QEDE_PMD_DRV_VER_STR_SIZE
);
1463 /* For CMT mode device do periodic polling for slowpath events.
1464 * This is required since uio device uses only one MSI-x
1465 * interrupt vector but we need one for each engine.
1467 if (edev
->num_hwfns
> 1 && IS_PF(edev
)) {
1468 rc
= rte_eal_alarm_set(timer_period
* US_PER_S
,
1472 DP_ERR(edev
, "Unable to start periodic"
1473 " timer rc %d\n", rc
);
1478 rc
= qed_ops
->common
->slowpath_start(edev
, ¶ms
);
1480 DP_ERR(edev
, "Cannot start slowpath rc = %d\n", rc
);
1481 rte_eal_alarm_cancel(qede_poll_sp_sb_cb
,
1486 rc
= qed_ops
->fill_dev_info(edev
, &dev_info
);
1488 DP_ERR(edev
, "Cannot get device_info rc %d\n", rc
);
1489 qed_ops
->common
->slowpath_stop(edev
);
1490 qed_ops
->common
->remove(edev
);
1491 rte_eal_alarm_cancel(qede_poll_sp_sb_cb
,
1496 qede_alloc_etherdev(adapter
, &dev_info
);
1498 adapter
->ops
->common
->set_id(edev
, edev
->name
, QEDE_PMD_VERSION
);
1501 adapter
->dev_info
.num_mac_addrs
=
1502 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev
),
1505 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev
),
1506 &adapter
->dev_info
.num_mac_addrs
);
1508 /* Allocate memory for storing MAC addr */
1509 eth_dev
->data
->mac_addrs
= rte_zmalloc(edev
->name
,
1511 adapter
->dev_info
.num_mac_addrs
),
1512 RTE_CACHE_LINE_SIZE
);
1514 if (eth_dev
->data
->mac_addrs
== NULL
) {
1515 DP_ERR(edev
, "Failed to allocate MAC address\n");
1516 qed_ops
->common
->slowpath_stop(edev
);
1517 qed_ops
->common
->remove(edev
);
1518 rte_eal_alarm_cancel(qede_poll_sp_sb_cb
,
1524 ether_addr_copy((struct ether_addr
*)edev
->hwfns
[0].
1525 hw_info
.hw_mac_addr
,
1526 ð_dev
->data
->mac_addrs
[0]);
1527 ether_addr_copy(ð_dev
->data
->mac_addrs
[0],
1528 &adapter
->primary_mac
);
1530 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev
),
1532 if (bulletin_change
) {
1534 ecore_vf_bulletin_get_forced_mac(
1535 ECORE_LEADING_HWFN(edev
),
1538 if (is_mac_exist
&& is_mac_forced
) {
1539 DP_INFO(edev
, "VF macaddr received from PF\n");
1540 ether_addr_copy((struct ether_addr
*)&vf_mac
,
1541 ð_dev
->data
->mac_addrs
[0]);
1542 ether_addr_copy(ð_dev
->data
->mac_addrs
[0],
1543 &adapter
->primary_mac
);
1545 DP_NOTICE(edev
, false,
1546 "No VF macaddr assigned\n");
1551 eth_dev
->dev_ops
= (is_vf
) ? &qede_eth_vf_dev_ops
: &qede_eth_dev_ops
;
1554 qede_print_adapter_info(adapter
);
1558 adapter
->state
= QEDE_DEV_INIT
;
1560 DP_NOTICE(edev
, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
1561 adapter
->primary_mac
.addr_bytes
[0],
1562 adapter
->primary_mac
.addr_bytes
[1],
1563 adapter
->primary_mac
.addr_bytes
[2],
1564 adapter
->primary_mac
.addr_bytes
[3],
1565 adapter
->primary_mac
.addr_bytes
[4],
1566 adapter
->primary_mac
.addr_bytes
[5]);
1571 static int qedevf_eth_dev_init(struct rte_eth_dev
*eth_dev
)
1573 return qede_common_dev_init(eth_dev
, 1);
1576 static int qede_eth_dev_init(struct rte_eth_dev
*eth_dev
)
1578 return qede_common_dev_init(eth_dev
, 0);
1581 static int qede_dev_common_uninit(struct rte_eth_dev
*eth_dev
)
1583 /* only uninitialize in the primary process */
1584 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
1587 /* safe to close dev here */
1588 qede_dev_close(eth_dev
);
1590 eth_dev
->dev_ops
= NULL
;
1591 eth_dev
->rx_pkt_burst
= NULL
;
1592 eth_dev
->tx_pkt_burst
= NULL
;
1594 if (eth_dev
->data
->mac_addrs
)
1595 rte_free(eth_dev
->data
->mac_addrs
);
1597 eth_dev
->data
->mac_addrs
= NULL
;
1602 static int qede_eth_dev_uninit(struct rte_eth_dev
*eth_dev
)
1604 return qede_dev_common_uninit(eth_dev
);
1607 static int qedevf_eth_dev_uninit(struct rte_eth_dev
*eth_dev
)
1609 return qede_dev_common_uninit(eth_dev
);
1612 static struct rte_pci_id pci_id_qedevf_map
[] = {
1613 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
1615 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_VF
)
1618 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_IOV
)
1623 static struct rte_pci_id pci_id_qede_map
[] = {
1624 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
1626 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980E
)
1629 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980S
)
1632 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_40
)
1635 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25
)
1638 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_100
)
1643 static struct eth_driver rte_qedevf_pmd
= {
1645 .id_table
= pci_id_qedevf_map
,
1647 RTE_PCI_DRV_NEED_MAPPING
| RTE_PCI_DRV_INTR_LSC
,
1648 .probe
= rte_eth_dev_pci_probe
,
1649 .remove
= rte_eth_dev_pci_remove
,
1651 .eth_dev_init
= qedevf_eth_dev_init
,
1652 .eth_dev_uninit
= qedevf_eth_dev_uninit
,
1653 .dev_private_size
= sizeof(struct qede_dev
),
1656 static struct eth_driver rte_qede_pmd
= {
1658 .id_table
= pci_id_qede_map
,
1660 RTE_PCI_DRV_NEED_MAPPING
| RTE_PCI_DRV_INTR_LSC
,
1661 .probe
= rte_eth_dev_pci_probe
,
1662 .remove
= rte_eth_dev_pci_remove
,
1664 .eth_dev_init
= qede_eth_dev_init
,
1665 .eth_dev_uninit
= qede_eth_dev_uninit
,
1666 .dev_private_size
= sizeof(struct qede_dev
),
1669 RTE_PMD_REGISTER_PCI(net_qede
, rte_qede_pmd
.pci_drv
);
1670 RTE_PMD_REGISTER_PCI_TABLE(net_qede
, pci_id_qede_map
);
1671 RTE_PMD_REGISTER_PCI(net_qede_vf
, rte_qedevf_pmd
.pci_drv
);
1672 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf
, pci_id_qedevf_map
);