]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / bnx2x / bnx2x_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
3 * Copyright (c) 2015-2018 Cavium Inc.
4 * All rights reserved.
5 * www.cavium.com
6 */
7
8 #include "bnx2x.h"
9 #include "bnx2x_rxtx.h"
10
11 #include <rte_dev.h>
12 #include <rte_ethdev_pci.h>
13 #include <rte_alarm.h>
14
15 int bnx2x_logtype_init;
16 int bnx2x_logtype_driver;
17
18 /*
19 * The set of PCI devices this driver supports
20 */
21 #define BROADCOM_PCI_VENDOR_ID 0x14E4
22 static const struct rte_pci_id pci_id_bnx2x_map[] = {
23 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800) },
24 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57711) },
25 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810) },
26 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811) },
27 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_OBS) },
28 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_4_10) },
29 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_2_20) },
30 #ifdef RTE_LIBRTE_BNX2X_MF_SUPPORT
31 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_MF) },
32 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_MF) },
33 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_MF) },
34 #endif
35 { .vendor_id = 0, }
36 };
37
38 static const struct rte_pci_id pci_id_bnx2xvf_map[] = {
39 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800_VF) },
40 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_VF) },
41 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_VF) },
42 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_VF) },
43 { .vendor_id = 0, }
44 };
45
46 struct rte_bnx2x_xstats_name_off {
47 char name[RTE_ETH_XSTATS_NAME_SIZE];
48 uint32_t offset_hi;
49 uint32_t offset_lo;
50 };
51
52 static const struct rte_bnx2x_xstats_name_off bnx2x_xstats_strings[] = {
53 {"rx_buffer_drops",
54 offsetof(struct bnx2x_eth_stats, brb_drop_hi),
55 offsetof(struct bnx2x_eth_stats, brb_drop_lo)},
56 {"rx_buffer_truncates",
57 offsetof(struct bnx2x_eth_stats, brb_truncate_hi),
58 offsetof(struct bnx2x_eth_stats, brb_truncate_lo)},
59 {"rx_buffer_truncate_discard",
60 offsetof(struct bnx2x_eth_stats, brb_truncate_discard),
61 offsetof(struct bnx2x_eth_stats, brb_truncate_discard)},
62 {"mac_filter_discard",
63 offsetof(struct bnx2x_eth_stats, mac_filter_discard),
64 offsetof(struct bnx2x_eth_stats, mac_filter_discard)},
65 {"no_match_vlan_tag_discard",
66 offsetof(struct bnx2x_eth_stats, mf_tag_discard),
67 offsetof(struct bnx2x_eth_stats, mf_tag_discard)},
68 {"tx_pause",
69 offsetof(struct bnx2x_eth_stats, pause_frames_sent_hi),
70 offsetof(struct bnx2x_eth_stats, pause_frames_sent_lo)},
71 {"rx_pause",
72 offsetof(struct bnx2x_eth_stats, pause_frames_received_hi),
73 offsetof(struct bnx2x_eth_stats, pause_frames_received_lo)},
74 {"tx_priority_flow_control",
75 offsetof(struct bnx2x_eth_stats, pfc_frames_sent_hi),
76 offsetof(struct bnx2x_eth_stats, pfc_frames_sent_lo)},
77 {"rx_priority_flow_control",
78 offsetof(struct bnx2x_eth_stats, pfc_frames_received_hi),
79 offsetof(struct bnx2x_eth_stats, pfc_frames_received_lo)}
80 };
81
82 static int
83 bnx2x_link_update(struct rte_eth_dev *dev)
84 {
85 struct bnx2x_softc *sc = dev->data->dev_private;
86 struct rte_eth_link link;
87
88 PMD_INIT_FUNC_TRACE();
89
90 bnx2x_link_status_update(sc);
91 memset(&link, 0, sizeof(link));
92 mb();
93 link.link_speed = sc->link_vars.line_speed;
94 switch (sc->link_vars.duplex) {
95 case DUPLEX_FULL:
96 link.link_duplex = ETH_LINK_FULL_DUPLEX;
97 break;
98 case DUPLEX_HALF:
99 link.link_duplex = ETH_LINK_HALF_DUPLEX;
100 break;
101 }
102 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
103 ETH_LINK_SPEED_FIXED);
104 link.link_status = sc->link_vars.link_up;
105
106 return rte_eth_linkstatus_set(dev, &link);
107 }
108
109 static void
110 bnx2x_interrupt_action(struct rte_eth_dev *dev)
111 {
112 struct bnx2x_softc *sc = dev->data->dev_private;
113 uint32_t link_status;
114
115 bnx2x_intr_legacy(sc, 0);
116
117 if (sc->periodic_flags & PERIODIC_GO)
118 bnx2x_periodic_callout(sc);
119 link_status = REG_RD(sc, sc->link_params.shmem_base +
120 offsetof(struct shmem_region,
121 port_mb[sc->link_params.port].link_status));
122 if ((link_status & LINK_STATUS_LINK_UP) != dev->data->dev_link.link_status)
123 bnx2x_link_update(dev);
124 }
125
126 static void
127 bnx2x_interrupt_handler(void *param)
128 {
129 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
130 struct bnx2x_softc *sc = dev->data->dev_private;
131
132 PMD_DEBUG_PERIODIC_LOG(INFO, "Interrupt handled");
133
134 bnx2x_interrupt_action(dev);
135 rte_intr_enable(&sc->pci_dev->intr_handle);
136 }
137
138 static void bnx2x_periodic_start(void *param)
139 {
140 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
141 struct bnx2x_softc *sc = dev->data->dev_private;
142 int ret = 0;
143
144 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
145 bnx2x_interrupt_action(dev);
146 if (IS_PF(sc)) {
147 ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
148 bnx2x_periodic_start, (void *)dev);
149 if (ret) {
150 PMD_DRV_LOG(ERR, "Unable to start periodic"
151 " timer rc %d", ret);
152 assert(false && "Unable to start periodic timer");
153 }
154 }
155 }
156
157 void bnx2x_periodic_stop(void *param)
158 {
159 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
160 struct bnx2x_softc *sc = dev->data->dev_private;
161
162 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
163
164 rte_eal_alarm_cancel(bnx2x_periodic_start, (void *)dev);
165 }
166
167 /*
168 * Devops - helper functions can be called from user application
169 */
170
171 static int
172 bnx2x_dev_configure(struct rte_eth_dev *dev)
173 {
174 struct bnx2x_softc *sc = dev->data->dev_private;
175 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
176
177 int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
178
179 PMD_INIT_FUNC_TRACE();
180
181 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
182 sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
183
184 if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
185 PMD_DRV_LOG(ERR, "The number of TX queues is greater than number of RX queues");
186 return -EINVAL;
187 }
188
189 sc->num_queues = MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
190 if (sc->num_queues > mp_ncpus) {
191 PMD_DRV_LOG(ERR, "The number of queues is more than number of CPUs");
192 return -EINVAL;
193 }
194
195 PMD_DRV_LOG(DEBUG, "num_queues=%d, mtu=%d",
196 sc->num_queues, sc->mtu);
197
198 /* allocate ilt */
199 if (bnx2x_alloc_ilt_mem(sc) != 0) {
200 PMD_DRV_LOG(ERR, "bnx2x_alloc_ilt_mem was failed");
201 return -ENXIO;
202 }
203
204 /* allocate the host hardware/software hsi structures */
205 if (bnx2x_alloc_hsi_mem(sc) != 0) {
206 PMD_DRV_LOG(ERR, "bnx2x_alloc_hsi_mem was failed");
207 bnx2x_free_ilt_mem(sc);
208 return -ENXIO;
209 }
210
211 return 0;
212 }
213
214 static int
215 bnx2x_dev_start(struct rte_eth_dev *dev)
216 {
217 struct bnx2x_softc *sc = dev->data->dev_private;
218 int ret = 0;
219
220 PMD_INIT_FUNC_TRACE();
221
222 /* start the periodic callout */
223 if (sc->periodic_flags & PERIODIC_STOP)
224 bnx2x_periodic_start(dev);
225
226 ret = bnx2x_init(sc);
227 if (ret) {
228 PMD_DRV_LOG(DEBUG, "bnx2x_init failed (%d)", ret);
229 return -1;
230 }
231
232 if (IS_PF(sc)) {
233 rte_intr_callback_register(&sc->pci_dev->intr_handle,
234 bnx2x_interrupt_handler, (void *)dev);
235
236 if (rte_intr_enable(&sc->pci_dev->intr_handle))
237 PMD_DRV_LOG(ERR, "rte_intr_enable failed");
238 }
239
240 ret = bnx2x_dev_rx_init(dev);
241 if (ret != 0) {
242 PMD_DRV_LOG(DEBUG, "bnx2x_dev_rx_init returned error code");
243 return -3;
244 }
245
246 /* Print important adapter info for the user. */
247 bnx2x_print_adapter_info(sc);
248
249 return ret;
250 }
251
252 static void
253 bnx2x_dev_stop(struct rte_eth_dev *dev)
254 {
255 struct bnx2x_softc *sc = dev->data->dev_private;
256 int ret = 0;
257
258 PMD_INIT_FUNC_TRACE();
259
260 if (IS_PF(sc)) {
261 rte_intr_disable(&sc->pci_dev->intr_handle);
262 rte_intr_callback_unregister(&sc->pci_dev->intr_handle,
263 bnx2x_interrupt_handler, (void *)dev);
264 }
265
266 /* stop the periodic callout */
267 bnx2x_periodic_stop(dev);
268
269 ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE);
270 if (ret) {
271 PMD_DRV_LOG(DEBUG, "bnx2x_nic_unload failed (%d)", ret);
272 return;
273 }
274
275 return;
276 }
277
278 static void
279 bnx2x_dev_close(struct rte_eth_dev *dev)
280 {
281 struct bnx2x_softc *sc = dev->data->dev_private;
282
283 PMD_INIT_FUNC_TRACE();
284
285 if (IS_VF(sc))
286 bnx2x_vf_close(sc);
287
288 bnx2x_dev_clear_queues(dev);
289 memset(&(dev->data->dev_link), 0 , sizeof(struct rte_eth_link));
290
291 /* free the host hardware/software hsi structures */
292 bnx2x_free_hsi_mem(sc);
293
294 /* free ilt */
295 bnx2x_free_ilt_mem(sc);
296 }
297
298 static void
299 bnx2x_promisc_enable(struct rte_eth_dev *dev)
300 {
301 struct bnx2x_softc *sc = dev->data->dev_private;
302
303 PMD_INIT_FUNC_TRACE();
304 sc->rx_mode = BNX2X_RX_MODE_PROMISC;
305 if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
306 sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
307 bnx2x_set_rx_mode(sc);
308 }
309
310 static void
311 bnx2x_promisc_disable(struct rte_eth_dev *dev)
312 {
313 struct bnx2x_softc *sc = dev->data->dev_private;
314
315 PMD_INIT_FUNC_TRACE();
316 sc->rx_mode = BNX2X_RX_MODE_NORMAL;
317 if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
318 sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
319 bnx2x_set_rx_mode(sc);
320 }
321
322 static void
323 bnx2x_dev_allmulticast_enable(struct rte_eth_dev *dev)
324 {
325 struct bnx2x_softc *sc = dev->data->dev_private;
326
327 PMD_INIT_FUNC_TRACE();
328 sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
329 if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
330 sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
331 bnx2x_set_rx_mode(sc);
332 }
333
334 static void
335 bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev)
336 {
337 struct bnx2x_softc *sc = dev->data->dev_private;
338
339 PMD_INIT_FUNC_TRACE();
340 sc->rx_mode = BNX2X_RX_MODE_NORMAL;
341 if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
342 sc->rx_mode = BNX2X_RX_MODE_PROMISC;
343 bnx2x_set_rx_mode(sc);
344 }
345
346 static int
347 bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
348 {
349 PMD_INIT_FUNC_TRACE();
350
351 return bnx2x_link_update(dev);
352 }
353
354 static int
355 bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
356 {
357 struct bnx2x_softc *sc = dev->data->dev_private;
358 int ret = 0;
359
360 ret = bnx2x_link_update(dev);
361
362 bnx2x_check_bull(sc);
363 if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
364 PMD_DRV_LOG(ERR, "PF indicated channel is down."
365 "VF device is no longer operational");
366 dev->data->dev_link.link_status = ETH_LINK_DOWN;
367 }
368
369 return ret;
370 }
371
372 static int
373 bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
374 {
375 struct bnx2x_softc *sc = dev->data->dev_private;
376 uint32_t brb_truncate_discard;
377 uint64_t brb_drops;
378 uint64_t brb_truncates;
379
380 PMD_INIT_FUNC_TRACE();
381
382 bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
383
384 memset(stats, 0, sizeof (struct rte_eth_stats));
385
386 stats->ipackets =
387 HILO_U64(sc->eth_stats.total_unicast_packets_received_hi,
388 sc->eth_stats.total_unicast_packets_received_lo) +
389 HILO_U64(sc->eth_stats.total_multicast_packets_received_hi,
390 sc->eth_stats.total_multicast_packets_received_lo) +
391 HILO_U64(sc->eth_stats.total_broadcast_packets_received_hi,
392 sc->eth_stats.total_broadcast_packets_received_lo);
393
394 stats->opackets =
395 HILO_U64(sc->eth_stats.total_unicast_packets_transmitted_hi,
396 sc->eth_stats.total_unicast_packets_transmitted_lo) +
397 HILO_U64(sc->eth_stats.total_multicast_packets_transmitted_hi,
398 sc->eth_stats.total_multicast_packets_transmitted_lo) +
399 HILO_U64(sc->eth_stats.total_broadcast_packets_transmitted_hi,
400 sc->eth_stats.total_broadcast_packets_transmitted_lo);
401
402 stats->ibytes =
403 HILO_U64(sc->eth_stats.total_bytes_received_hi,
404 sc->eth_stats.total_bytes_received_lo);
405
406 stats->obytes =
407 HILO_U64(sc->eth_stats.total_bytes_transmitted_hi,
408 sc->eth_stats.total_bytes_transmitted_lo);
409
410 stats->ierrors =
411 HILO_U64(sc->eth_stats.error_bytes_received_hi,
412 sc->eth_stats.error_bytes_received_lo);
413
414 stats->oerrors = 0;
415
416 stats->rx_nombuf =
417 HILO_U64(sc->eth_stats.no_buff_discard_hi,
418 sc->eth_stats.no_buff_discard_lo);
419
420 brb_drops =
421 HILO_U64(sc->eth_stats.brb_drop_hi,
422 sc->eth_stats.brb_drop_lo);
423
424 brb_truncates =
425 HILO_U64(sc->eth_stats.brb_truncate_hi,
426 sc->eth_stats.brb_truncate_lo);
427
428 brb_truncate_discard = sc->eth_stats.brb_truncate_discard;
429
430 stats->imissed = brb_drops + brb_truncates +
431 brb_truncate_discard + stats->rx_nombuf;
432
433 return 0;
434 }
435
436 static int
437 bnx2x_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
438 struct rte_eth_xstat_name *xstats_names,
439 __rte_unused unsigned limit)
440 {
441 unsigned int i, stat_cnt = RTE_DIM(bnx2x_xstats_strings);
442
443 if (xstats_names != NULL)
444 for (i = 0; i < stat_cnt; i++)
445 snprintf(xstats_names[i].name,
446 sizeof(xstats_names[i].name),
447 "%s",
448 bnx2x_xstats_strings[i].name);
449
450 return stat_cnt;
451 }
452
453 static int
454 bnx2x_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
455 unsigned int n)
456 {
457 struct bnx2x_softc *sc = dev->data->dev_private;
458 unsigned int num = RTE_DIM(bnx2x_xstats_strings);
459
460 if (n < num)
461 return num;
462
463 bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
464
465 for (num = 0; num < n; num++) {
466 if (bnx2x_xstats_strings[num].offset_hi !=
467 bnx2x_xstats_strings[num].offset_lo)
468 xstats[num].value = HILO_U64(
469 *(uint32_t *)((char *)&sc->eth_stats +
470 bnx2x_xstats_strings[num].offset_hi),
471 *(uint32_t *)((char *)&sc->eth_stats +
472 bnx2x_xstats_strings[num].offset_lo));
473 else
474 xstats[num].value =
475 *(uint64_t *)((char *)&sc->eth_stats +
476 bnx2x_xstats_strings[num].offset_lo);
477 xstats[num].id = num;
478 }
479
480 return num;
481 }
482
483 static void
484 bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
485 {
486 struct bnx2x_softc *sc = dev->data->dev_private;
487 dev_info->max_rx_queues = sc->max_rx_queues;
488 dev_info->max_tx_queues = sc->max_tx_queues;
489 dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
490 dev_info->max_rx_pktlen = BNX2X_MAX_RX_PKT_LEN;
491 dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS;
492 dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
493 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
494 }
495
496 static int
497 bnx2x_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
498 uint32_t index, uint32_t pool)
499 {
500 struct bnx2x_softc *sc = dev->data->dev_private;
501
502 if (sc->mac_ops.mac_addr_add) {
503 sc->mac_ops.mac_addr_add(dev, mac_addr, index, pool);
504 return 0;
505 }
506 return -ENOTSUP;
507 }
508
509 static void
510 bnx2x_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
511 {
512 struct bnx2x_softc *sc = dev->data->dev_private;
513
514 if (sc->mac_ops.mac_addr_remove)
515 sc->mac_ops.mac_addr_remove(dev, index);
516 }
517
518 static const struct eth_dev_ops bnx2x_eth_dev_ops = {
519 .dev_configure = bnx2x_dev_configure,
520 .dev_start = bnx2x_dev_start,
521 .dev_stop = bnx2x_dev_stop,
522 .dev_close = bnx2x_dev_close,
523 .promiscuous_enable = bnx2x_promisc_enable,
524 .promiscuous_disable = bnx2x_promisc_disable,
525 .allmulticast_enable = bnx2x_dev_allmulticast_enable,
526 .allmulticast_disable = bnx2x_dev_allmulticast_disable,
527 .link_update = bnx2x_dev_link_update,
528 .stats_get = bnx2x_dev_stats_get,
529 .xstats_get = bnx2x_dev_xstats_get,
530 .xstats_get_names = bnx2x_get_xstats_names,
531 .dev_infos_get = bnx2x_dev_infos_get,
532 .rx_queue_setup = bnx2x_dev_rx_queue_setup,
533 .rx_queue_release = bnx2x_dev_rx_queue_release,
534 .tx_queue_setup = bnx2x_dev_tx_queue_setup,
535 .tx_queue_release = bnx2x_dev_tx_queue_release,
536 .mac_addr_add = bnx2x_mac_addr_add,
537 .mac_addr_remove = bnx2x_mac_addr_remove,
538 };
539
540 /*
541 * dev_ops for virtual function
542 */
543 static const struct eth_dev_ops bnx2xvf_eth_dev_ops = {
544 .dev_configure = bnx2x_dev_configure,
545 .dev_start = bnx2x_dev_start,
546 .dev_stop = bnx2x_dev_stop,
547 .dev_close = bnx2x_dev_close,
548 .promiscuous_enable = bnx2x_promisc_enable,
549 .promiscuous_disable = bnx2x_promisc_disable,
550 .allmulticast_enable = bnx2x_dev_allmulticast_enable,
551 .allmulticast_disable = bnx2x_dev_allmulticast_disable,
552 .link_update = bnx2xvf_dev_link_update,
553 .stats_get = bnx2x_dev_stats_get,
554 .xstats_get = bnx2x_dev_xstats_get,
555 .xstats_get_names = bnx2x_get_xstats_names,
556 .dev_infos_get = bnx2x_dev_infos_get,
557 .rx_queue_setup = bnx2x_dev_rx_queue_setup,
558 .rx_queue_release = bnx2x_dev_rx_queue_release,
559 .tx_queue_setup = bnx2x_dev_tx_queue_setup,
560 .tx_queue_release = bnx2x_dev_tx_queue_release,
561 .mac_addr_add = bnx2x_mac_addr_add,
562 .mac_addr_remove = bnx2x_mac_addr_remove,
563 };
564
565
566 static int
567 bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
568 {
569 int ret = 0;
570 struct rte_pci_device *pci_dev;
571 struct bnx2x_softc *sc;
572
573 PMD_INIT_FUNC_TRACE();
574
575 eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops;
576 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
577
578 rte_eth_copy_pci_info(eth_dev, pci_dev);
579
580 sc = eth_dev->data->dev_private;
581 sc->pcie_bus = pci_dev->addr.bus;
582 sc->pcie_device = pci_dev->addr.devid;
583
584 if (is_vf)
585 sc->flags = BNX2X_IS_VF_FLAG;
586
587 sc->devinfo.vendor_id = pci_dev->id.vendor_id;
588 sc->devinfo.device_id = pci_dev->id.device_id;
589 sc->devinfo.subvendor_id = pci_dev->id.subsystem_vendor_id;
590 sc->devinfo.subdevice_id = pci_dev->id.subsystem_device_id;
591
592 sc->pcie_func = pci_dev->addr.function;
593 sc->bar[BAR0].base_addr = (void *)pci_dev->mem_resource[0].addr;
594 if (is_vf)
595 sc->bar[BAR1].base_addr = (void *)
596 ((uintptr_t)pci_dev->mem_resource[0].addr + PXP_VF_ADDR_DB_START);
597 else
598 sc->bar[BAR1].base_addr = pci_dev->mem_resource[2].addr;
599
600 assert(sc->bar[BAR0].base_addr);
601 assert(sc->bar[BAR1].base_addr);
602
603 bnx2x_load_firmware(sc);
604 assert(sc->firmware);
605
606 if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
607 sc->udp_rss = 1;
608
609 sc->rx_budget = BNX2X_RX_BUDGET;
610 sc->hc_rx_ticks = BNX2X_RX_TICKS;
611 sc->hc_tx_ticks = BNX2X_TX_TICKS;
612
613 sc->interrupt_mode = INTR_MODE_SINGLE_MSIX;
614 sc->rx_mode = BNX2X_RX_MODE_NORMAL;
615
616 sc->pci_dev = pci_dev;
617 ret = bnx2x_attach(sc);
618 if (ret) {
619 PMD_DRV_LOG(ERR, "bnx2x_attach failed (%d)", ret);
620 return ret;
621 }
622
623 /* schedule periodic poll for slowpath link events */
624 if (IS_PF(sc)) {
625 ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
626 bnx2x_periodic_start, (void *)eth_dev);
627 if (ret) {
628 PMD_DRV_LOG(ERR, "Unable to start periodic"
629 " timer rc %d", ret);
630 return -EINVAL;
631 }
632 }
633
634 eth_dev->data->mac_addrs = (struct ether_addr *)sc->link_params.mac_addr;
635
636 PMD_DRV_LOG(INFO, "pcie_bus=%d, pcie_device=%d",
637 sc->pcie_bus, sc->pcie_device);
638 PMD_DRV_LOG(INFO, "bar0.addr=%p, bar1.addr=%p",
639 sc->bar[BAR0].base_addr, sc->bar[BAR1].base_addr);
640 PMD_DRV_LOG(INFO, "port=%d, path=%d, vnic=%d, func=%d",
641 PORT_ID(sc), PATH_ID(sc), VNIC_ID(sc), FUNC_ID(sc));
642 PMD_DRV_LOG(INFO, "portID=%d vendorID=0x%x deviceID=0x%x",
643 eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id);
644
645 if (IS_VF(sc)) {
646 rte_spinlock_init(&sc->vf2pf_lock);
647
648 ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
649 &sc->vf2pf_mbox_mapping, "vf2pf_mbox",
650 RTE_CACHE_LINE_SIZE);
651 if (ret)
652 goto out;
653
654 sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *)
655 sc->vf2pf_mbox_mapping.vaddr;
656
657 ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
658 &sc->pf2vf_bulletin_mapping, "vf2pf_bull",
659 RTE_CACHE_LINE_SIZE);
660 if (ret)
661 goto out;
662
663 sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *)
664 sc->pf2vf_bulletin_mapping.vaddr;
665
666 ret = bnx2x_vf_get_resources(sc, sc->max_tx_queues,
667 sc->max_rx_queues);
668 if (ret)
669 goto out;
670 }
671
672 return 0;
673
674 out:
675 bnx2x_periodic_stop(eth_dev);
676 return ret;
677 }
678
679 static int
680 eth_bnx2x_dev_init(struct rte_eth_dev *eth_dev)
681 {
682 PMD_INIT_FUNC_TRACE();
683 return bnx2x_common_dev_init(eth_dev, 0);
684 }
685
686 static int
687 eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev)
688 {
689 PMD_INIT_FUNC_TRACE();
690 return bnx2x_common_dev_init(eth_dev, 1);
691 }
692
693 static struct rte_pci_driver rte_bnx2x_pmd;
694 static struct rte_pci_driver rte_bnx2xvf_pmd;
695
696 static int eth_bnx2x_pci_probe(struct rte_pci_driver *pci_drv,
697 struct rte_pci_device *pci_dev)
698 {
699 if (pci_drv == &rte_bnx2x_pmd)
700 return rte_eth_dev_pci_generic_probe(pci_dev,
701 sizeof(struct bnx2x_softc), eth_bnx2x_dev_init);
702 else if (pci_drv == &rte_bnx2xvf_pmd)
703 return rte_eth_dev_pci_generic_probe(pci_dev,
704 sizeof(struct bnx2x_softc), eth_bnx2xvf_dev_init);
705 else
706 return -EINVAL;
707 }
708
709 static int eth_bnx2x_pci_remove(struct rte_pci_device *pci_dev)
710 {
711 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
712 }
713
714 static struct rte_pci_driver rte_bnx2x_pmd = {
715 .id_table = pci_id_bnx2x_map,
716 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
717 .probe = eth_bnx2x_pci_probe,
718 .remove = eth_bnx2x_pci_remove,
719 };
720
721 /*
722 * virtual function driver struct
723 */
724 static struct rte_pci_driver rte_bnx2xvf_pmd = {
725 .id_table = pci_id_bnx2xvf_map,
726 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
727 .probe = eth_bnx2x_pci_probe,
728 .remove = eth_bnx2x_pci_remove,
729 };
730
731 RTE_PMD_REGISTER_PCI(net_bnx2x, rte_bnx2x_pmd);
732 RTE_PMD_REGISTER_PCI_TABLE(net_bnx2x, pci_id_bnx2x_map);
733 RTE_PMD_REGISTER_KMOD_DEP(net_bnx2x, "* igb_uio | uio_pci_generic | vfio-pci");
734 RTE_PMD_REGISTER_PCI(net_bnx2xvf, rte_bnx2xvf_pmd);
735 RTE_PMD_REGISTER_PCI_TABLE(net_bnx2xvf, pci_id_bnx2xvf_map);
736 RTE_PMD_REGISTER_KMOD_DEP(net_bnx2xvf, "* igb_uio | vfio-pci");
737
738 RTE_INIT(bnx2x_init_log)
739 {
740 bnx2x_logtype_init = rte_log_register("pmd.net.bnx2x.init");
741 if (bnx2x_logtype_init >= 0)
742 rte_log_set_level(bnx2x_logtype_init, RTE_LOG_NOTICE);
743 bnx2x_logtype_driver = rte_log_register("pmd.net.bnx2x.driver");
744 if (bnx2x_logtype_driver >= 0)
745 rte_log_set_level(bnx2x_logtype_driver, RTE_LOG_NOTICE);
746 }