]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/drivers/net/qede/qede_ethdev.c
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / dpdk / drivers / net / qede / qede_ethdev.c
1 /*
2 * Copyright (c) 2016 QLogic Corporation.
3 * All rights reserved.
4 * www.qlogic.com
5 *
6 * See LICENSE.qede_pmd for copyright and licensing details.
7 */
8
9 #include "qede_ethdev.h"
10 #include <rte_alarm.h>
11 #include <rte_version.h>
12
13 /* Globals */
14 static const struct qed_eth_ops *qed_ops;
15 static const char *drivername = "qede pmd";
16 static int64_t timer_period = 1;
17
18 struct rte_qede_xstats_name_off {
19 char name[RTE_ETH_XSTATS_NAME_SIZE];
20 uint64_t offset;
21 };
22
23 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
24 {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)},
25 {"rx_multicast_bytes",
26 offsetof(struct ecore_eth_stats, rx_mcast_bytes)},
27 {"rx_broadcast_bytes",
28 offsetof(struct ecore_eth_stats, rx_bcast_bytes)},
29 {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)},
30 {"rx_multicast_packets",
31 offsetof(struct ecore_eth_stats, rx_mcast_pkts)},
32 {"rx_broadcast_packets",
33 offsetof(struct ecore_eth_stats, rx_bcast_pkts)},
34
35 {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)},
36 {"tx_multicast_bytes",
37 offsetof(struct ecore_eth_stats, tx_mcast_bytes)},
38 {"tx_broadcast_bytes",
39 offsetof(struct ecore_eth_stats, tx_bcast_bytes)},
40 {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)},
41 {"tx_multicast_packets",
42 offsetof(struct ecore_eth_stats, tx_mcast_pkts)},
43 {"tx_broadcast_packets",
44 offsetof(struct ecore_eth_stats, tx_bcast_pkts)},
45
46 {"rx_64_byte_packets",
47 offsetof(struct ecore_eth_stats, rx_64_byte_packets)},
48 {"rx_65_to_127_byte_packets",
49 offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)},
50 {"rx_128_to_255_byte_packets",
51 offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)},
52 {"rx_256_to_511_byte_packets",
53 offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)},
54 {"rx_512_to_1023_byte_packets",
55 offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)},
56 {"rx_1024_to_1518_byte_packets",
57 offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)},
58 {"rx_1519_to_1522_byte_packets",
59 offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)},
60 {"rx_1519_to_2047_byte_packets",
61 offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)},
62 {"rx_2048_to_4095_byte_packets",
63 offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)},
64 {"rx_4096_to_9216_byte_packets",
65 offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)},
66 {"rx_9217_to_16383_byte_packets",
67 offsetof(struct ecore_eth_stats,
68 rx_9217_to_16383_byte_packets)},
69 {"tx_64_byte_packets",
70 offsetof(struct ecore_eth_stats, tx_64_byte_packets)},
71 {"tx_65_to_127_byte_packets",
72 offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)},
73 {"tx_128_to_255_byte_packets",
74 offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)},
75 {"tx_256_to_511_byte_packets",
76 offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)},
77 {"tx_512_to_1023_byte_packets",
78 offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)},
79 {"tx_1024_to_1518_byte_packets",
80 offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)},
81 {"trx_1519_to_1522_byte_packets",
82 offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)},
83 {"tx_2048_to_4095_byte_packets",
84 offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)},
85 {"tx_4096_to_9216_byte_packets",
86 offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)},
87 {"tx_9217_to_16383_byte_packets",
88 offsetof(struct ecore_eth_stats,
89 tx_9217_to_16383_byte_packets)},
90
91 {"rx_mac_crtl_frames",
92 offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)},
93 {"tx_mac_control_frames",
94 offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)},
95 {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)},
96 {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)},
97 {"rx_priority_flow_control_frames",
98 offsetof(struct ecore_eth_stats, rx_pfc_frames)},
99 {"tx_priority_flow_control_frames",
100 offsetof(struct ecore_eth_stats, tx_pfc_frames)},
101
102 {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)},
103 {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)},
104 {"rx_carrier_errors",
105 offsetof(struct ecore_eth_stats, rx_carrier_errors)},
106 {"rx_oversize_packet_errors",
107 offsetof(struct ecore_eth_stats, rx_oversize_packets)},
108 {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)},
109 {"rx_undersize_packet_errors",
110 offsetof(struct ecore_eth_stats, rx_undersize_packets)},
111 {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)},
112 {"rx_host_buffer_not_available",
113 offsetof(struct ecore_eth_stats, no_buff_discards)},
114 /* Number of packets discarded because they are bigger than MTU */
115 {"rx_packet_too_big_discards",
116 offsetof(struct ecore_eth_stats, packet_too_big_discard)},
117 {"rx_ttl_zero_discards",
118 offsetof(struct ecore_eth_stats, ttl0_discard)},
119 {"rx_multi_function_tag_filter_discards",
120 offsetof(struct ecore_eth_stats, mftag_filter_discards)},
121 {"rx_mac_filter_discards",
122 offsetof(struct ecore_eth_stats, mac_filter_discards)},
123 {"rx_hw_buffer_truncates",
124 offsetof(struct ecore_eth_stats, brb_truncates)},
125 {"rx_hw_buffer_discards",
126 offsetof(struct ecore_eth_stats, brb_discards)},
127 {"tx_lpi_entry_count",
128 offsetof(struct ecore_eth_stats, tx_lpi_entry_count)},
129 {"tx_total_collisions",
130 offsetof(struct ecore_eth_stats, tx_total_collisions)},
131 {"tx_error_drop_packets",
132 offsetof(struct ecore_eth_stats, tx_err_drop_pkts)},
133
134 {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)},
135 {"rx_mac_unicast_packets",
136 offsetof(struct ecore_eth_stats, rx_mac_uc_packets)},
137 {"rx_mac_multicast_packets",
138 offsetof(struct ecore_eth_stats, rx_mac_mc_packets)},
139 {"rx_mac_broadcast_packets",
140 offsetof(struct ecore_eth_stats, rx_mac_bc_packets)},
141 {"rx_mac_frames_ok",
142 offsetof(struct ecore_eth_stats, rx_mac_frames_ok)},
143 {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)},
144 {"tx_mac_unicast_packets",
145 offsetof(struct ecore_eth_stats, tx_mac_uc_packets)},
146 {"tx_mac_multicast_packets",
147 offsetof(struct ecore_eth_stats, tx_mac_mc_packets)},
148 {"tx_mac_broadcast_packets",
149 offsetof(struct ecore_eth_stats, tx_mac_bc_packets)},
150
151 {"lro_coalesced_packets",
152 offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)},
153 {"lro_coalesced_events",
154 offsetof(struct ecore_eth_stats, tpa_coalesced_events)},
155 {"lro_aborts_num",
156 offsetof(struct ecore_eth_stats, tpa_aborts_num)},
157 {"lro_not_coalesced_packets",
158 offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)},
159 {"lro_coalesced_bytes",
160 offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)},
161 };
162
163 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
164 {"rx_q_segments",
165 offsetof(struct qede_rx_queue, rx_segs)},
166 {"rx_q_hw_errors",
167 offsetof(struct qede_rx_queue, rx_hw_errors)},
168 {"rx_q_allocation_errors",
169 offsetof(struct qede_rx_queue, rx_alloc_errors)}
170 };
171
172 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
173 {
174 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
175 }
176
177 static void
178 qede_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
179 {
180 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
181 struct qede_dev *qdev = eth_dev->data->dev_private;
182 struct ecore_dev *edev = &qdev->edev;
183
184 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
185 if (rte_intr_enable(&eth_dev->pci_dev->intr_handle))
186 DP_ERR(edev, "rte_intr_enable failed\n");
187 }
188
189 static void
190 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
191 {
192 rte_memcpy(&qdev->dev_info, info, sizeof(*info));
193 qdev->num_tc = qdev->dev_info.num_tc;
194 qdev->ops = qed_ops;
195 }
196
197 static void qede_print_adapter_info(struct qede_dev *qdev)
198 {
199 struct ecore_dev *edev = &qdev->edev;
200 struct qed_dev_info *info = &qdev->dev_info.common;
201 static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
202 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
203
204 DP_INFO(edev, "*********************************\n");
205 DP_INFO(edev, " DPDK version:%s\n", rte_version());
206 DP_INFO(edev, " Chip details : %s%d\n",
207 ECORE_IS_BB(edev) ? "BB" : "AH",
208 CHIP_REV_IS_A0(edev) ? 0 : 1);
209 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
210 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
211 snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
212 ver_str, QEDE_PMD_VERSION);
213 DP_INFO(edev, " Driver version : %s\n", drv_ver);
214 DP_INFO(edev, " Firmware version : %s\n", ver_str);
215
216 snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
217 "%d.%d.%d.%d",
218 (info->mfw_rev >> 24) & 0xff,
219 (info->mfw_rev >> 16) & 0xff,
220 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
221 DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
222 DP_INFO(edev, " Firmware file : %s\n", fw_file);
223 DP_INFO(edev, "*********************************\n");
224 }
225
226 static int
227 qede_set_ucast_rx_mac(struct qede_dev *qdev,
228 enum qed_filter_xcast_params_type opcode,
229 uint8_t mac[ETHER_ADDR_LEN])
230 {
231 struct ecore_dev *edev = &qdev->edev;
232 struct qed_filter_params filter_cmd;
233
234 memset(&filter_cmd, 0, sizeof(filter_cmd));
235 filter_cmd.type = QED_FILTER_TYPE_UCAST;
236 filter_cmd.filter.ucast.type = opcode;
237 filter_cmd.filter.ucast.mac_valid = 1;
238 rte_memcpy(&filter_cmd.filter.ucast.mac[0], &mac[0], ETHER_ADDR_LEN);
239 return qdev->ops->filter_config(edev, &filter_cmd);
240 }
241
242 static void
243 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
244 uint32_t index, __rte_unused uint32_t pool)
245 {
246 struct qede_dev *qdev = eth_dev->data->dev_private;
247 struct ecore_dev *edev = &qdev->edev;
248 int rc;
249
250 PMD_INIT_FUNC_TRACE(edev);
251
252 if (index >= qdev->dev_info.num_mac_addrs) {
253 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
254 index, qdev->dev_info.num_mac_addrs);
255 return;
256 }
257
258 /* Adding macaddr even though promiscuous mode is set */
259 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
260 DP_INFO(edev, "Port is in promisc mode, yet adding it\n");
261
262 /* Add MAC filters according to the unicast secondary macs */
263 rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
264 mac_addr->addr_bytes);
265 if (rc)
266 DP_ERR(edev, "Unable to add macaddr rc=%d\n", rc);
267 }
268
269 static void
270 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
271 {
272 struct qede_dev *qdev = eth_dev->data->dev_private;
273 struct ecore_dev *edev = &qdev->edev;
274 struct ether_addr mac_addr;
275 int rc;
276
277 PMD_INIT_FUNC_TRACE(edev);
278
279 if (index >= qdev->dev_info.num_mac_addrs) {
280 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
281 index, qdev->dev_info.num_mac_addrs);
282 return;
283 }
284
285 /* Use the index maintained by rte */
286 ether_addr_copy(&eth_dev->data->mac_addrs[index], &mac_addr);
287 rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
288 mac_addr.addr_bytes);
289 if (rc)
290 DP_ERR(edev, "Unable to remove macaddr rc=%d\n", rc);
291 }
292
293 static void
294 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
295 {
296 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
297 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
298 int rc;
299
300 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
301 mac_addr->addr_bytes)) {
302 DP_ERR(edev, "Setting MAC address is not allowed\n");
303 ether_addr_copy(&qdev->primary_mac,
304 &eth_dev->data->mac_addrs[0]);
305 return;
306 }
307
308 /* First remove the primary mac */
309 rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
310 qdev->primary_mac.addr_bytes);
311
312 if (rc) {
313 DP_ERR(edev, "Unable to remove current macaddr"
314 " Reverting to previous default mac\n");
315 ether_addr_copy(&qdev->primary_mac,
316 &eth_dev->data->mac_addrs[0]);
317 return;
318 }
319
320 /* Add new MAC */
321 rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
322 mac_addr->addr_bytes);
323
324 if (rc)
325 DP_ERR(edev, "Unable to add new default mac\n");
326 else
327 ether_addr_copy(mac_addr, &qdev->primary_mac);
328 }
329
330
331
332
333 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
334 {
335 struct ecore_dev *edev = &qdev->edev;
336 struct qed_update_vport_params params = {
337 .vport_id = 0,
338 .accept_any_vlan = action,
339 .update_accept_any_vlan_flg = 1,
340 };
341 int rc;
342
343 /* Proceed only if action actually needs to be performed */
344 if (qdev->accept_any_vlan == action)
345 return;
346
347 rc = qdev->ops->vport_update(edev, &params);
348 if (rc) {
349 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
350 action ? "enable" : "disable");
351 } else {
352 DP_INFO(edev, "%s accept-any-vlan\n",
353 action ? "enabled" : "disabled");
354 qdev->accept_any_vlan = action;
355 }
356 }
357
358 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
359 {
360 struct qed_update_vport_params vport_update_params;
361 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
362 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
363 int rc;
364
365 memset(&vport_update_params, 0, sizeof(vport_update_params));
366 vport_update_params.vport_id = 0;
367 vport_update_params.update_inner_vlan_removal_flg = 1;
368 vport_update_params.inner_vlan_removal_flg = set_stripping;
369 rc = qdev->ops->vport_update(edev, &vport_update_params);
370 if (rc) {
371 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
372 return rc;
373 }
374
375 return 0;
376 }
377
378 static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
379 {
380 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
381 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
382 struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
383
384 if (mask & ETH_VLAN_STRIP_MASK) {
385 if (rxmode->hw_vlan_strip)
386 (void)qede_vlan_stripping(eth_dev, 1);
387 else
388 (void)qede_vlan_stripping(eth_dev, 0);
389 }
390
391 if (mask & ETH_VLAN_FILTER_MASK) {
392 /* VLAN filtering kicks in when a VLAN is added */
393 if (rxmode->hw_vlan_filter) {
394 qede_vlan_filter_set(eth_dev, 0, 1);
395 } else {
396 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
397 DP_NOTICE(edev, false,
398 " Please remove existing VLAN filters"
399 " before disabling VLAN filtering\n");
400 /* Signal app that VLAN filtering is still
401 * enabled
402 */
403 rxmode->hw_vlan_filter = true;
404 } else {
405 qede_vlan_filter_set(eth_dev, 0, 0);
406 }
407 }
408 }
409
410 if (mask & ETH_VLAN_EXTEND_MASK)
411 DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
412 " and classification is based on outer tag only\n");
413
414 DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
415 mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
416 }
417
418 static int qede_set_ucast_rx_vlan(struct qede_dev *qdev,
419 enum qed_filter_xcast_params_type opcode,
420 uint16_t vid)
421 {
422 struct qed_filter_params filter_cmd;
423 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
424
425 memset(&filter_cmd, 0, sizeof(filter_cmd));
426 filter_cmd.type = QED_FILTER_TYPE_UCAST;
427 filter_cmd.filter.ucast.type = opcode;
428 filter_cmd.filter.ucast.vlan_valid = 1;
429 filter_cmd.filter.ucast.vlan = vid;
430
431 return qdev->ops->filter_config(edev, &filter_cmd);
432 }
433
434 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
435 uint16_t vlan_id, int on)
436 {
437 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
438 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
439 struct qed_dev_eth_info *dev_info = &qdev->dev_info;
440 struct qede_vlan_entry *tmp = NULL;
441 struct qede_vlan_entry *vlan;
442 int rc;
443
444 if (on) {
445 if (qdev->configured_vlans == dev_info->num_vlan_filters) {
446 DP_INFO(edev, "Reached max VLAN filter limit"
447 " enabling accept_any_vlan\n");
448 qede_config_accept_any_vlan(qdev, true);
449 return 0;
450 }
451
452 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
453 if (tmp->vid == vlan_id) {
454 DP_ERR(edev, "VLAN %u already configured\n",
455 vlan_id);
456 return -EEXIST;
457 }
458 }
459
460 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
461 RTE_CACHE_LINE_SIZE);
462
463 if (!vlan) {
464 DP_ERR(edev, "Did not allocate memory for VLAN\n");
465 return -ENOMEM;
466 }
467
468 rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_ADD,
469 vlan_id);
470 if (rc) {
471 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
472 rc);
473 rte_free(vlan);
474 } else {
475 vlan->vid = vlan_id;
476 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
477 qdev->configured_vlans++;
478 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
479 vlan_id, qdev->configured_vlans);
480 }
481 } else {
482 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
483 if (tmp->vid == vlan_id)
484 break;
485 }
486
487 if (!tmp) {
488 if (qdev->configured_vlans == 0) {
489 DP_INFO(edev,
490 "No VLAN filters configured yet\n");
491 return 0;
492 }
493
494 DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
495 return -EINVAL;
496 }
497
498 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
499
500 rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_DEL,
501 vlan_id);
502 if (rc) {
503 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
504 vlan_id, rc);
505 } else {
506 qdev->configured_vlans--;
507 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
508 vlan_id, qdev->configured_vlans);
509 }
510 }
511
512 return rc;
513 }
514
515 static int qede_init_vport(struct qede_dev *qdev)
516 {
517 struct ecore_dev *edev = &qdev->edev;
518 struct qed_start_vport_params start = {0};
519 int rc;
520
521 start.remove_inner_vlan = 1;
522 start.gro_enable = 0;
523 start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
524 start.vport_id = 0;
525 start.drop_ttl0 = false;
526 start.clear_stats = 1;
527 start.handle_ptp_pkts = 0;
528
529 rc = qdev->ops->vport_start(edev, &start);
530 if (rc) {
531 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
532 return rc;
533 }
534
535 DP_INFO(edev,
536 "Start vport ramrod passed, vport_id = %d, MTU = %u\n",
537 start.vport_id, ETHER_MTU);
538
539 return 0;
540 }
541
542 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
543 {
544 struct qede_dev *qdev = eth_dev->data->dev_private;
545 struct ecore_dev *edev = &qdev->edev;
546 struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
547 int rc, i, j;
548
549 PMD_INIT_FUNC_TRACE(edev);
550
551 /* Check requirements for 100G mode */
552 if (edev->num_hwfns > 1) {
553 if (eth_dev->data->nb_rx_queues < 2 ||
554 eth_dev->data->nb_tx_queues < 2) {
555 DP_NOTICE(edev, false,
556 "100G mode needs min. 2 RX/TX queues\n");
557 return -EINVAL;
558 }
559
560 if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
561 (eth_dev->data->nb_tx_queues % 2 != 0)) {
562 DP_NOTICE(edev, false,
563 "100G mode needs even no. of RX/TX queues\n");
564 return -EINVAL;
565 }
566 }
567
568 /* Sanity checks and throw warnings */
569 if (rxmode->enable_scatter == 1)
570 eth_dev->data->scattered_rx = 1;
571
572 if (rxmode->enable_lro == 1) {
573 DP_INFO(edev, "LRO is not supported\n");
574 return -EINVAL;
575 }
576
577 if (!rxmode->hw_strip_crc)
578 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
579
580 if (!rxmode->hw_ip_checksum)
581 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
582 "in hw\n");
583
584 /* Check for the port restart case */
585 if (qdev->state != QEDE_DEV_INIT) {
586 rc = qdev->ops->vport_stop(edev, 0);
587 if (rc != 0)
588 return rc;
589 qede_dealloc_fp_resc(eth_dev);
590 }
591
592 qdev->fp_num_tx = eth_dev->data->nb_tx_queues;
593 qdev->fp_num_rx = eth_dev->data->nb_rx_queues;
594 qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx;
595
596 /* Fastpath status block should be initialized before sending
597 * VPORT-START in the case of VF. Anyway, do it for both VF/PF.
598 */
599 rc = qede_alloc_fp_resc(qdev);
600 if (rc != 0)
601 return rc;
602
603 /* Issue VPORT-START with default config values to allow
604 * other port configurations early on.
605 */
606 rc = qede_init_vport(qdev);
607 if (rc != 0)
608 return rc;
609
610 SLIST_INIT(&qdev->vlan_list_head);
611
612 /* Add primary mac for PF */
613 if (IS_PF(edev))
614 qede_mac_addr_set(eth_dev, &qdev->primary_mac);
615
616 /* Enable VLAN offloads by default */
617 qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
618 ETH_VLAN_FILTER_MASK |
619 ETH_VLAN_EXTEND_MASK);
620
621 qdev->state = QEDE_DEV_CONFIG;
622
623 DP_INFO(edev, "Allocated RSS=%d TSS=%d (with CoS=%d)\n",
624 (int)QEDE_RSS_COUNT(qdev), (int)QEDE_TSS_COUNT(qdev),
625 qdev->num_tc);
626
627 return 0;
628 }
629
630 /* Info about HW descriptor ring limitations */
631 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
632 .nb_max = NUM_RX_BDS_MAX,
633 .nb_min = 128,
634 .nb_align = 128 /* lowest common multiple */
635 };
636
637 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
638 .nb_max = NUM_TX_BDS_MAX,
639 .nb_min = 256,
640 .nb_align = 256
641 };
642
643 static void
644 qede_dev_info_get(struct rte_eth_dev *eth_dev,
645 struct rte_eth_dev_info *dev_info)
646 {
647 struct qede_dev *qdev = eth_dev->data->dev_private;
648 struct ecore_dev *edev = &qdev->edev;
649 struct qed_link_output link;
650 uint32_t speed_cap = 0;
651
652 PMD_INIT_FUNC_TRACE(edev);
653
654 dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
655 QEDE_ETH_OVERHEAD);
656 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
657 dev_info->rx_desc_lim = qede_rx_desc_lim;
658 dev_info->tx_desc_lim = qede_tx_desc_lim;
659 dev_info->max_rx_queues = (uint16_t)QEDE_MAX_RSS_CNT(qdev);
660 dev_info->max_tx_queues = dev_info->max_rx_queues;
661 dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs;
662 if (IS_VF(edev))
663 dev_info->max_vfs = 0;
664 else
665 dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev);
666 dev_info->driver_name = qdev->drv_ver;
667 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
668 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
669
670 dev_info->default_txconf = (struct rte_eth_txconf) {
671 .txq_flags = QEDE_TXQ_FLAGS,
672 };
673
674 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
675 DEV_RX_OFFLOAD_IPV4_CKSUM |
676 DEV_RX_OFFLOAD_UDP_CKSUM |
677 DEV_RX_OFFLOAD_TCP_CKSUM);
678 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
679 DEV_TX_OFFLOAD_IPV4_CKSUM |
680 DEV_TX_OFFLOAD_UDP_CKSUM |
681 DEV_TX_OFFLOAD_TCP_CKSUM);
682
683 memset(&link, 0, sizeof(struct qed_link_output));
684 qdev->ops->common->get_link(edev, &link);
685 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
686 speed_cap |= ETH_LINK_SPEED_1G;
687 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
688 speed_cap |= ETH_LINK_SPEED_10G;
689 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
690 speed_cap |= ETH_LINK_SPEED_25G;
691 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
692 speed_cap |= ETH_LINK_SPEED_40G;
693 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
694 speed_cap |= ETH_LINK_SPEED_50G;
695 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
696 speed_cap |= ETH_LINK_SPEED_100G;
697 dev_info->speed_capa = speed_cap;
698 }
699
700 /* return 0 means link status changed, -1 means not changed */
701 static int
702 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
703 {
704 struct qede_dev *qdev = eth_dev->data->dev_private;
705 struct ecore_dev *edev = &qdev->edev;
706 uint16_t link_duplex;
707 struct qed_link_output link;
708 struct rte_eth_link *curr = &eth_dev->data->dev_link;
709
710 memset(&link, 0, sizeof(struct qed_link_output));
711 qdev->ops->common->get_link(edev, &link);
712
713 /* Link Speed */
714 curr->link_speed = link.speed;
715
716 /* Link Mode */
717 switch (link.duplex) {
718 case QEDE_DUPLEX_HALF:
719 link_duplex = ETH_LINK_HALF_DUPLEX;
720 break;
721 case QEDE_DUPLEX_FULL:
722 link_duplex = ETH_LINK_FULL_DUPLEX;
723 break;
724 case QEDE_DUPLEX_UNKNOWN:
725 default:
726 link_duplex = -1;
727 }
728 curr->link_duplex = link_duplex;
729
730 /* Link Status */
731 curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
732
733 /* AN */
734 curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
735 ETH_LINK_AUTONEG : ETH_LINK_FIXED;
736
737 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
738 curr->link_speed, curr->link_duplex,
739 curr->link_autoneg, curr->link_status);
740
741 /* return 0 means link status changed, -1 means not changed */
742 return ((curr->link_status == link.link_up) ? -1 : 0);
743 }
744
745 static void
746 qede_rx_mode_setting(struct rte_eth_dev *eth_dev,
747 enum qed_filter_rx_mode_type accept_flags)
748 {
749 struct qede_dev *qdev = eth_dev->data->dev_private;
750 struct ecore_dev *edev = &qdev->edev;
751 struct qed_filter_params rx_mode;
752
753 DP_INFO(edev, "%s mode %u\n", __func__, accept_flags);
754
755 memset(&rx_mode, 0, sizeof(struct qed_filter_params));
756 rx_mode.type = QED_FILTER_TYPE_RX_MODE;
757 rx_mode.filter.accept_flags = accept_flags;
758 qdev->ops->filter_config(edev, &rx_mode);
759 }
760
761 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
762 {
763 struct qede_dev *qdev = eth_dev->data->dev_private;
764 struct ecore_dev *edev = &qdev->edev;
765
766 PMD_INIT_FUNC_TRACE(edev);
767
768 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
769
770 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
771 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
772
773 qede_rx_mode_setting(eth_dev, type);
774 }
775
776 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
777 {
778 struct qede_dev *qdev = eth_dev->data->dev_private;
779 struct ecore_dev *edev = &qdev->edev;
780
781 PMD_INIT_FUNC_TRACE(edev);
782
783 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
784 qede_rx_mode_setting(eth_dev,
785 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
786 else
787 qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
788 }
789
790 static void qede_poll_sp_sb_cb(void *param)
791 {
792 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
793 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
794 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
795 int rc;
796
797 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
798 qede_interrupt_action(&edev->hwfns[1]);
799
800 rc = rte_eal_alarm_set(timer_period * US_PER_S,
801 qede_poll_sp_sb_cb,
802 (void *)eth_dev);
803 if (rc != 0) {
804 DP_ERR(edev, "Unable to start periodic"
805 " timer rc %d\n", rc);
806 assert(false && "Unable to start periodic timer");
807 }
808 }
809
810 static void qede_dev_close(struct rte_eth_dev *eth_dev)
811 {
812 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
813 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
814 int rc;
815
816 PMD_INIT_FUNC_TRACE(edev);
817
818 /* dev_stop() shall cleanup fp resources in hw but without releasing
819 * dma memories and sw structures so that dev_start() can be called
820 * by the app without reconfiguration. However, in dev_close() we
821 * can release all the resources and device can be brought up newly
822 */
823 if (qdev->state != QEDE_DEV_STOP)
824 qede_dev_stop(eth_dev);
825 else
826 DP_INFO(edev, "Device is already stopped\n");
827
828 rc = qdev->ops->vport_stop(edev, 0);
829 if (rc != 0)
830 DP_ERR(edev, "Failed to stop VPORT\n");
831
832 qede_dealloc_fp_resc(eth_dev);
833
834 qdev->ops->common->slowpath_stop(edev);
835
836 qdev->ops->common->remove(edev);
837
838 rte_intr_disable(&eth_dev->pci_dev->intr_handle);
839
840 rte_intr_callback_unregister(&eth_dev->pci_dev->intr_handle,
841 qede_interrupt_handler, (void *)eth_dev);
842
843 if (edev->num_hwfns > 1)
844 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
845
846 qdev->state = QEDE_DEV_INIT; /* Go back to init state */
847 }
848
849 static void
850 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
851 {
852 struct qede_dev *qdev = eth_dev->data->dev_private;
853 struct ecore_dev *edev = &qdev->edev;
854 struct ecore_eth_stats stats;
855 unsigned int i = 0, j = 0, qid;
856 struct qede_tx_queue *txq;
857
858 qdev->ops->get_vport_stats(edev, &stats);
859
860 /* RX Stats */
861 eth_stats->ipackets = stats.rx_ucast_pkts +
862 stats.rx_mcast_pkts + stats.rx_bcast_pkts;
863
864 eth_stats->ibytes = stats.rx_ucast_bytes +
865 stats.rx_mcast_bytes + stats.rx_bcast_bytes;
866
867 eth_stats->ierrors = stats.rx_crc_errors +
868 stats.rx_align_errors +
869 stats.rx_carrier_errors +
870 stats.rx_oversize_packets +
871 stats.rx_jabbers + stats.rx_undersize_packets;
872
873 eth_stats->rx_nombuf = stats.no_buff_discards;
874
875 eth_stats->imissed = stats.mftag_filter_discards +
876 stats.mac_filter_discards +
877 stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;
878
879 /* TX stats */
880 eth_stats->opackets = stats.tx_ucast_pkts +
881 stats.tx_mcast_pkts + stats.tx_bcast_pkts;
882
883 eth_stats->obytes = stats.tx_ucast_bytes +
884 stats.tx_mcast_bytes + stats.tx_bcast_bytes;
885
886 eth_stats->oerrors = stats.tx_err_drop_pkts;
887
888 /* Queue stats */
889 for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
890 if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
891 eth_stats->q_ipackets[i] =
892 *(uint64_t *)(
893 ((char *)(qdev->fp_array[(qid)].rxq)) +
894 offsetof(struct qede_rx_queue,
895 rcv_pkts));
896 eth_stats->q_errors[i] =
897 *(uint64_t *)(
898 ((char *)(qdev->fp_array[(qid)].rxq)) +
899 offsetof(struct qede_rx_queue,
900 rx_hw_errors)) +
901 *(uint64_t *)(
902 ((char *)(qdev->fp_array[(qid)].rxq)) +
903 offsetof(struct qede_rx_queue,
904 rx_alloc_errors));
905 i++;
906 }
907
908 if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
909 txq = qdev->fp_array[(qid)].txqs[0];
910 eth_stats->q_opackets[j] =
911 *((uint64_t *)(uintptr_t)
912 (((uint64_t)(uintptr_t)(txq)) +
913 offsetof(struct qede_tx_queue,
914 xmit_pkts)));
915 j++;
916 }
917 }
918 }
919
920 static unsigned
921 qede_get_xstats_count(struct qede_dev *qdev) {
922 return RTE_DIM(qede_xstats_strings) +
923 (RTE_DIM(qede_rxq_xstats_strings) * QEDE_RSS_COUNT(qdev));
924 }
925
926 static int
927 qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
928 struct rte_eth_xstat_name *xstats_names, unsigned limit)
929 {
930 struct qede_dev *qdev = dev->data->dev_private;
931 const unsigned int stat_cnt = qede_get_xstats_count(qdev);
932 unsigned int i, qid, stat_idx = 0;
933
934 if (xstats_names != NULL) {
935 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
936 snprintf(xstats_names[stat_idx].name,
937 sizeof(xstats_names[stat_idx].name),
938 "%s",
939 qede_xstats_strings[i].name);
940 stat_idx++;
941 }
942
943 for (qid = 0; qid < QEDE_RSS_COUNT(qdev); qid++) {
944 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
945 snprintf(xstats_names[stat_idx].name,
946 sizeof(xstats_names[stat_idx].name),
947 "%.4s%d%s",
948 qede_rxq_xstats_strings[i].name, qid,
949 qede_rxq_xstats_strings[i].name + 4);
950 stat_idx++;
951 }
952 }
953 }
954
955 return stat_cnt;
956 }
957
958 static int
959 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
960 unsigned int n)
961 {
962 struct qede_dev *qdev = dev->data->dev_private;
963 struct ecore_dev *edev = &qdev->edev;
964 struct ecore_eth_stats stats;
965 const unsigned int num = qede_get_xstats_count(qdev);
966 unsigned int i, qid, stat_idx = 0;
967
968 if (n < num)
969 return num;
970
971 qdev->ops->get_vport_stats(edev, &stats);
972
973 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
974 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
975 qede_xstats_strings[i].offset);
976 stat_idx++;
977 }
978
979 for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
980 if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
981 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
982 xstats[stat_idx].value = *(uint64_t *)(
983 ((char *)(qdev->fp_array[(qid)].rxq)) +
984 qede_rxq_xstats_strings[i].offset);
985 stat_idx++;
986 }
987 }
988 }
989
990 return stat_idx;
991 }
992
993 static void
994 qede_reset_xstats(struct rte_eth_dev *dev)
995 {
996 struct qede_dev *qdev = dev->data->dev_private;
997 struct ecore_dev *edev = &qdev->edev;
998
999 ecore_reset_vport_stats(edev);
1000 }
1001
1002 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
1003 {
1004 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1005 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1006 struct qed_link_params link_params;
1007 int rc;
1008
1009 DP_INFO(edev, "setting link state %d\n", link_up);
1010 memset(&link_params, 0, sizeof(link_params));
1011 link_params.link_up = link_up;
1012 rc = qdev->ops->common->set_link(edev, &link_params);
1013 if (rc != ECORE_SUCCESS)
1014 DP_ERR(edev, "Unable to set link state %d\n", link_up);
1015
1016 return rc;
1017 }
1018
1019 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
1020 {
1021 return qede_dev_set_link_state(eth_dev, true);
1022 }
1023
1024 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
1025 {
1026 return qede_dev_set_link_state(eth_dev, false);
1027 }
1028
1029 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
1030 {
1031 struct qede_dev *qdev = eth_dev->data->dev_private;
1032 struct ecore_dev *edev = &qdev->edev;
1033
1034 ecore_reset_vport_stats(edev);
1035 }
1036
1037 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
1038 {
1039 enum qed_filter_rx_mode_type type =
1040 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1041
1042 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1043 type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
1044
1045 qede_rx_mode_setting(eth_dev, type);
1046 }
1047
1048 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
1049 {
1050 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1051 qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_PROMISC);
1052 else
1053 qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
1054 }
1055
1056 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1057 struct rte_eth_fc_conf *fc_conf)
1058 {
1059 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1060 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1061 struct qed_link_output current_link;
1062 struct qed_link_params params;
1063
1064 memset(&current_link, 0, sizeof(current_link));
1065 qdev->ops->common->get_link(edev, &current_link);
1066
1067 memset(&params, 0, sizeof(params));
1068 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
1069 if (fc_conf->autoneg) {
1070 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
1071 DP_ERR(edev, "Autoneg not supported\n");
1072 return -EINVAL;
1073 }
1074 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1075 }
1076
1077 /* Pause is assumed to be supported (SUPPORTED_Pause) */
1078 if (fc_conf->mode == RTE_FC_FULL)
1079 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
1080 QED_LINK_PAUSE_RX_ENABLE);
1081 if (fc_conf->mode == RTE_FC_TX_PAUSE)
1082 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1083 if (fc_conf->mode == RTE_FC_RX_PAUSE)
1084 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1085
1086 params.link_up = true;
1087 (void)qdev->ops->common->set_link(edev, &params);
1088
1089 return 0;
1090 }
1091
1092 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1093 struct rte_eth_fc_conf *fc_conf)
1094 {
1095 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1096 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1097 struct qed_link_output current_link;
1098
1099 memset(&current_link, 0, sizeof(current_link));
1100 qdev->ops->common->get_link(edev, &current_link);
1101
1102 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1103 fc_conf->autoneg = true;
1104
1105 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
1106 QED_LINK_PAUSE_TX_ENABLE))
1107 fc_conf->mode = RTE_FC_FULL;
1108 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
1109 fc_conf->mode = RTE_FC_RX_PAUSE;
1110 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
1111 fc_conf->mode = RTE_FC_TX_PAUSE;
1112 else
1113 fc_conf->mode = RTE_FC_NONE;
1114
1115 return 0;
1116 }
1117
1118 static const uint32_t *
1119 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
1120 {
1121 static const uint32_t ptypes[] = {
1122 RTE_PTYPE_L3_IPV4,
1123 RTE_PTYPE_L3_IPV6,
1124 RTE_PTYPE_UNKNOWN
1125 };
1126
1127 if (eth_dev->rx_pkt_burst == qede_recv_pkts)
1128 return ptypes;
1129
1130 return NULL;
1131 }
1132
1133 void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
1134 {
1135 *rss_caps = 0;
1136 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
1137 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
1138 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
1139 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
1140 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
1141 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
1142 }
1143
1144 static int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
1145 struct rte_eth_rss_conf *rss_conf)
1146 {
1147 struct qed_update_vport_params vport_update_params;
1148 struct qede_dev *qdev = eth_dev->data->dev_private;
1149 struct ecore_dev *edev = &qdev->edev;
1150 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1151 uint64_t hf = rss_conf->rss_hf;
1152 int i;
1153
1154 memset(&vport_update_params, 0, sizeof(vport_update_params));
1155
1156 if (hf != 0) {
1157 /* Enable RSS */
1158 qede_init_rss_caps(&qdev->rss_params.rss_caps, hf);
1159 memcpy(&vport_update_params.rss_params, &qdev->rss_params,
1160 sizeof(vport_update_params.rss_params));
1161 if (key)
1162 memcpy(qdev->rss_params.rss_key, rss_conf->rss_key,
1163 rss_conf->rss_key_len);
1164 vport_update_params.update_rss_flg = 1;
1165 qdev->rss_enabled = 1;
1166 } else {
1167 /* Disable RSS */
1168 qdev->rss_enabled = 0;
1169 }
1170
1171 /* If the mapping doesn't fit any supported, return */
1172 if (qdev->rss_params.rss_caps == 0 && hf != 0)
1173 return -EINVAL;
1174
1175 DP_INFO(edev, "%s\n", (vport_update_params.update_rss_flg) ?
1176 "Enabling RSS" : "Disabling RSS");
1177
1178 vport_update_params.vport_id = 0;
1179
1180 return qdev->ops->vport_update(edev, &vport_update_params);
1181 }
1182
1183 int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
1184 struct rte_eth_rss_conf *rss_conf)
1185 {
1186 struct qede_dev *qdev = eth_dev->data->dev_private;
1187 uint64_t hf;
1188
1189 if (rss_conf->rss_key_len < sizeof(qdev->rss_params.rss_key))
1190 return -EINVAL;
1191
1192 if (rss_conf->rss_key)
1193 memcpy(rss_conf->rss_key, qdev->rss_params.rss_key,
1194 sizeof(qdev->rss_params.rss_key));
1195
1196 hf = 0;
1197 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4) ?
1198 ETH_RSS_IPV4 : 0;
1199 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6) ?
1200 ETH_RSS_IPV6 : 0;
1201 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6) ?
1202 ETH_RSS_IPV6_EX : 0;
1203 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4_TCP) ?
1204 ETH_RSS_NONFRAG_IPV4_TCP : 0;
1205 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
1206 ETH_RSS_NONFRAG_IPV6_TCP : 0;
1207 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
1208 ETH_RSS_IPV6_TCP_EX : 0;
1209
1210 rss_conf->rss_hf = hf;
1211
1212 return 0;
1213 }
1214
1215 static int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
1216 struct rte_eth_rss_reta_entry64 *reta_conf,
1217 uint16_t reta_size)
1218 {
1219 struct qed_update_vport_params vport_update_params;
1220 struct qede_dev *qdev = eth_dev->data->dev_private;
1221 struct ecore_dev *edev = &qdev->edev;
1222 uint16_t i, idx, shift;
1223
1224 if (reta_size > ETH_RSS_RETA_SIZE_128) {
1225 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
1226 reta_size);
1227 return -EINVAL;
1228 }
1229
1230 memset(&vport_update_params, 0, sizeof(vport_update_params));
1231 memcpy(&vport_update_params.rss_params, &qdev->rss_params,
1232 sizeof(vport_update_params.rss_params));
1233
1234 for (i = 0; i < reta_size; i++) {
1235 idx = i / RTE_RETA_GROUP_SIZE;
1236 shift = i % RTE_RETA_GROUP_SIZE;
1237 if (reta_conf[idx].mask & (1ULL << shift)) {
1238 uint8_t entry = reta_conf[idx].reta[shift];
1239 qdev->rss_params.rss_ind_table[i] = entry;
1240 }
1241 }
1242
1243 vport_update_params.update_rss_flg = 1;
1244 vport_update_params.vport_id = 0;
1245
1246 return qdev->ops->vport_update(edev, &vport_update_params);
1247 }
1248
1249 int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
1250 struct rte_eth_rss_reta_entry64 *reta_conf,
1251 uint16_t reta_size)
1252 {
1253 struct qede_dev *qdev = eth_dev->data->dev_private;
1254 uint16_t i, idx, shift;
1255
1256 if (reta_size > ETH_RSS_RETA_SIZE_128) {
1257 struct ecore_dev *edev = &qdev->edev;
1258 DP_ERR(edev, "reta_size %d is not supported\n",
1259 reta_size);
1260 }
1261
1262 for (i = 0; i < reta_size; i++) {
1263 idx = i / RTE_RETA_GROUP_SIZE;
1264 shift = i % RTE_RETA_GROUP_SIZE;
1265 if (reta_conf[idx].mask & (1ULL << shift)) {
1266 uint8_t entry = qdev->rss_params.rss_ind_table[i];
1267 reta_conf[idx].reta[shift] = entry;
1268 }
1269 }
1270
1271 return 0;
1272 }
1273
1274 int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
1275 {
1276 uint32_t frame_size;
1277 struct qede_dev *qdev = dev->data->dev_private;
1278 struct rte_eth_dev_info dev_info = {0};
1279
1280 qede_dev_info_get(dev, &dev_info);
1281
1282 /* VLAN_TAG = 4 */
1283 frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4;
1284
1285 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1286 return -EINVAL;
1287
1288 if (!dev->data->scattered_rx &&
1289 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
1290 return -EINVAL;
1291
1292 if (frame_size > ETHER_MAX_LEN)
1293 dev->data->dev_conf.rxmode.jumbo_frame = 1;
1294 else
1295 dev->data->dev_conf.rxmode.jumbo_frame = 0;
1296
1297 /* update max frame size */
1298 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1299 qdev->mtu = mtu;
1300 qede_dev_stop(dev);
1301 qede_dev_start(dev);
1302
1303 return 0;
1304 }
1305
1306 static const struct eth_dev_ops qede_eth_dev_ops = {
1307 .dev_configure = qede_dev_configure,
1308 .dev_infos_get = qede_dev_info_get,
1309 .rx_queue_setup = qede_rx_queue_setup,
1310 .rx_queue_release = qede_rx_queue_release,
1311 .tx_queue_setup = qede_tx_queue_setup,
1312 .tx_queue_release = qede_tx_queue_release,
1313 .dev_start = qede_dev_start,
1314 .dev_set_link_up = qede_dev_set_link_up,
1315 .dev_set_link_down = qede_dev_set_link_down,
1316 .link_update = qede_link_update,
1317 .promiscuous_enable = qede_promiscuous_enable,
1318 .promiscuous_disable = qede_promiscuous_disable,
1319 .allmulticast_enable = qede_allmulticast_enable,
1320 .allmulticast_disable = qede_allmulticast_disable,
1321 .dev_stop = qede_dev_stop,
1322 .dev_close = qede_dev_close,
1323 .stats_get = qede_get_stats,
1324 .stats_reset = qede_reset_stats,
1325 .xstats_get = qede_get_xstats,
1326 .xstats_reset = qede_reset_xstats,
1327 .xstats_get_names = qede_get_xstats_names,
1328 .mac_addr_add = qede_mac_addr_add,
1329 .mac_addr_remove = qede_mac_addr_remove,
1330 .mac_addr_set = qede_mac_addr_set,
1331 .vlan_offload_set = qede_vlan_offload_set,
1332 .vlan_filter_set = qede_vlan_filter_set,
1333 .flow_ctrl_set = qede_flow_ctrl_set,
1334 .flow_ctrl_get = qede_flow_ctrl_get,
1335 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
1336 .rss_hash_update = qede_rss_hash_update,
1337 .rss_hash_conf_get = qede_rss_hash_conf_get,
1338 .reta_update = qede_rss_reta_update,
1339 .reta_query = qede_rss_reta_query,
1340 .mtu_set = qede_set_mtu,
1341 };
1342
1343 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
1344 .dev_configure = qede_dev_configure,
1345 .dev_infos_get = qede_dev_info_get,
1346 .rx_queue_setup = qede_rx_queue_setup,
1347 .rx_queue_release = qede_rx_queue_release,
1348 .tx_queue_setup = qede_tx_queue_setup,
1349 .tx_queue_release = qede_tx_queue_release,
1350 .dev_start = qede_dev_start,
1351 .dev_set_link_up = qede_dev_set_link_up,
1352 .dev_set_link_down = qede_dev_set_link_down,
1353 .link_update = qede_link_update,
1354 .promiscuous_enable = qede_promiscuous_enable,
1355 .promiscuous_disable = qede_promiscuous_disable,
1356 .allmulticast_enable = qede_allmulticast_enable,
1357 .allmulticast_disable = qede_allmulticast_disable,
1358 .dev_stop = qede_dev_stop,
1359 .dev_close = qede_dev_close,
1360 .stats_get = qede_get_stats,
1361 .stats_reset = qede_reset_stats,
1362 .xstats_get = qede_get_xstats,
1363 .xstats_reset = qede_reset_xstats,
1364 .xstats_get_names = qede_get_xstats_names,
1365 .vlan_offload_set = qede_vlan_offload_set,
1366 .vlan_filter_set = qede_vlan_filter_set,
1367 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
1368 .rss_hash_update = qede_rss_hash_update,
1369 .rss_hash_conf_get = qede_rss_hash_conf_get,
1370 .reta_update = qede_rss_reta_update,
1371 .reta_query = qede_rss_reta_query,
1372 .mtu_set = qede_set_mtu,
1373 };
1374
1375 static void qede_update_pf_params(struct ecore_dev *edev)
1376 {
1377 struct ecore_pf_params pf_params;
1378 /* 32 rx + 32 tx */
1379 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
1380 pf_params.eth_pf_params.num_cons = 64;
1381 qed_ops->common->update_pf_params(edev, &pf_params);
1382 }
1383
1384 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
1385 {
1386 struct rte_pci_device *pci_dev;
1387 struct rte_pci_addr pci_addr;
1388 struct qede_dev *adapter;
1389 struct ecore_dev *edev;
1390 struct qed_dev_eth_info dev_info;
1391 struct qed_slowpath_params params;
1392 static bool do_once = true;
1393 uint8_t bulletin_change;
1394 uint8_t vf_mac[ETHER_ADDR_LEN];
1395 uint8_t is_mac_forced;
1396 bool is_mac_exist;
1397 /* Fix up ecore debug level */
1398 uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
1399 uint8_t dp_level = ECORE_LEVEL_VERBOSE;
1400 uint32_t max_mac_addrs;
1401 int rc;
1402
1403 /* Extract key data structures */
1404 adapter = eth_dev->data->dev_private;
1405 edev = &adapter->edev;
1406 pci_addr = eth_dev->pci_dev->addr;
1407
1408 PMD_INIT_FUNC_TRACE(edev);
1409
1410 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
1411 pci_addr.bus, pci_addr.devid, pci_addr.function,
1412 eth_dev->data->port_id);
1413
1414 eth_dev->rx_pkt_burst = qede_recv_pkts;
1415 eth_dev->tx_pkt_burst = qede_xmit_pkts;
1416
1417 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1418 DP_NOTICE(edev, false,
1419 "Skipping device init from secondary process\n");
1420 return 0;
1421 }
1422
1423 pci_dev = eth_dev->pci_dev;
1424
1425 rte_eth_copy_pci_info(eth_dev, pci_dev);
1426
1427 qed_ops = qed_get_eth_ops();
1428 if (!qed_ops) {
1429 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
1430 return -EINVAL;
1431 }
1432
1433 DP_INFO(edev, "Starting qede probe\n");
1434
1435 rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
1436 dp_module, dp_level, is_vf);
1437
1438 if (rc != 0) {
1439 DP_ERR(edev, "qede probe failed rc %d\n", rc);
1440 return -ENODEV;
1441 }
1442
1443 qede_update_pf_params(edev);
1444
1445 rte_intr_callback_register(&eth_dev->pci_dev->intr_handle,
1446 qede_interrupt_handler, (void *)eth_dev);
1447
1448 if (rte_intr_enable(&eth_dev->pci_dev->intr_handle)) {
1449 DP_ERR(edev, "rte_intr_enable() failed\n");
1450 return -ENODEV;
1451 }
1452
1453 /* Start the Slowpath-process */
1454 memset(&params, 0, sizeof(struct qed_slowpath_params));
1455 params.int_mode = ECORE_INT_MODE_MSIX;
1456 params.drv_major = QEDE_PMD_VERSION_MAJOR;
1457 params.drv_minor = QEDE_PMD_VERSION_MINOR;
1458 params.drv_rev = QEDE_PMD_VERSION_REVISION;
1459 params.drv_eng = QEDE_PMD_VERSION_PATCH;
1460 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
1461 QEDE_PMD_DRV_VER_STR_SIZE);
1462
1463 /* For CMT mode device do periodic polling for slowpath events.
1464 * This is required since uio device uses only one MSI-x
1465 * interrupt vector but we need one for each engine.
1466 */
1467 if (edev->num_hwfns > 1 && IS_PF(edev)) {
1468 rc = rte_eal_alarm_set(timer_period * US_PER_S,
1469 qede_poll_sp_sb_cb,
1470 (void *)eth_dev);
1471 if (rc != 0) {
1472 DP_ERR(edev, "Unable to start periodic"
1473 " timer rc %d\n", rc);
1474 return -EINVAL;
1475 }
1476 }
1477
1478 rc = qed_ops->common->slowpath_start(edev, &params);
1479 if (rc) {
1480 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
1481 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
1482 (void *)eth_dev);
1483 return -ENODEV;
1484 }
1485
1486 rc = qed_ops->fill_dev_info(edev, &dev_info);
1487 if (rc) {
1488 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
1489 qed_ops->common->slowpath_stop(edev);
1490 qed_ops->common->remove(edev);
1491 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
1492 (void *)eth_dev);
1493 return -ENODEV;
1494 }
1495
1496 qede_alloc_etherdev(adapter, &dev_info);
1497
1498 adapter->ops->common->set_id(edev, edev->name, QEDE_PMD_VERSION);
1499
1500 if (!is_vf)
1501 adapter->dev_info.num_mac_addrs =
1502 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
1503 ECORE_MAC);
1504 else
1505 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
1506 &adapter->dev_info.num_mac_addrs);
1507
1508 /* Allocate memory for storing MAC addr */
1509 eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
1510 (ETHER_ADDR_LEN *
1511 adapter->dev_info.num_mac_addrs),
1512 RTE_CACHE_LINE_SIZE);
1513
1514 if (eth_dev->data->mac_addrs == NULL) {
1515 DP_ERR(edev, "Failed to allocate MAC address\n");
1516 qed_ops->common->slowpath_stop(edev);
1517 qed_ops->common->remove(edev);
1518 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
1519 (void *)eth_dev);
1520 return -ENOMEM;
1521 }
1522
1523 if (!is_vf) {
1524 ether_addr_copy((struct ether_addr *)edev->hwfns[0].
1525 hw_info.hw_mac_addr,
1526 &eth_dev->data->mac_addrs[0]);
1527 ether_addr_copy(&eth_dev->data->mac_addrs[0],
1528 &adapter->primary_mac);
1529 } else {
1530 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
1531 &bulletin_change);
1532 if (bulletin_change) {
1533 is_mac_exist =
1534 ecore_vf_bulletin_get_forced_mac(
1535 ECORE_LEADING_HWFN(edev),
1536 vf_mac,
1537 &is_mac_forced);
1538 if (is_mac_exist && is_mac_forced) {
1539 DP_INFO(edev, "VF macaddr received from PF\n");
1540 ether_addr_copy((struct ether_addr *)&vf_mac,
1541 &eth_dev->data->mac_addrs[0]);
1542 ether_addr_copy(&eth_dev->data->mac_addrs[0],
1543 &adapter->primary_mac);
1544 } else {
1545 DP_NOTICE(edev, false,
1546 "No VF macaddr assigned\n");
1547 }
1548 }
1549 }
1550
1551 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
1552
1553 if (do_once) {
1554 qede_print_adapter_info(adapter);
1555 do_once = false;
1556 }
1557
1558 adapter->state = QEDE_DEV_INIT;
1559
1560 DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
1561 adapter->primary_mac.addr_bytes[0],
1562 adapter->primary_mac.addr_bytes[1],
1563 adapter->primary_mac.addr_bytes[2],
1564 adapter->primary_mac.addr_bytes[3],
1565 adapter->primary_mac.addr_bytes[4],
1566 adapter->primary_mac.addr_bytes[5]);
1567
1568 return rc;
1569 }
1570
1571 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
1572 {
1573 return qede_common_dev_init(eth_dev, 1);
1574 }
1575
1576 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
1577 {
1578 return qede_common_dev_init(eth_dev, 0);
1579 }
1580
1581 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
1582 {
1583 /* only uninitialize in the primary process */
1584 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1585 return 0;
1586
1587 /* safe to close dev here */
1588 qede_dev_close(eth_dev);
1589
1590 eth_dev->dev_ops = NULL;
1591 eth_dev->rx_pkt_burst = NULL;
1592 eth_dev->tx_pkt_burst = NULL;
1593
1594 if (eth_dev->data->mac_addrs)
1595 rte_free(eth_dev->data->mac_addrs);
1596
1597 eth_dev->data->mac_addrs = NULL;
1598
1599 return 0;
1600 }
1601
1602 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
1603 {
1604 return qede_dev_common_uninit(eth_dev);
1605 }
1606
1607 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
1608 {
1609 return qede_dev_common_uninit(eth_dev);
1610 }
1611
1612 static struct rte_pci_id pci_id_qedevf_map[] = {
1613 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
1614 {
1615 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_VF)
1616 },
1617 {
1618 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_IOV)
1619 },
1620 {.vendor_id = 0,}
1621 };
1622
1623 static struct rte_pci_id pci_id_qede_map[] = {
1624 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
1625 {
1626 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980E)
1627 },
1628 {
1629 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980S)
1630 },
1631 {
1632 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_40)
1633 },
1634 {
1635 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25)
1636 },
1637 {
1638 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_100)
1639 },
1640 {.vendor_id = 0,}
1641 };
1642
1643 static struct eth_driver rte_qedevf_pmd = {
1644 .pci_drv = {
1645 .id_table = pci_id_qedevf_map,
1646 .drv_flags =
1647 RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1648 .probe = rte_eth_dev_pci_probe,
1649 .remove = rte_eth_dev_pci_remove,
1650 },
1651 .eth_dev_init = qedevf_eth_dev_init,
1652 .eth_dev_uninit = qedevf_eth_dev_uninit,
1653 .dev_private_size = sizeof(struct qede_dev),
1654 };
1655
1656 static struct eth_driver rte_qede_pmd = {
1657 .pci_drv = {
1658 .id_table = pci_id_qede_map,
1659 .drv_flags =
1660 RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1661 .probe = rte_eth_dev_pci_probe,
1662 .remove = rte_eth_dev_pci_remove,
1663 },
1664 .eth_dev_init = qede_eth_dev_init,
1665 .eth_dev_uninit = qede_eth_dev_uninit,
1666 .dev_private_size = sizeof(struct qede_dev),
1667 };
1668
1669 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd.pci_drv);
1670 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
1671 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd.pci_drv);
1672 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);