1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Microsoft Corporation
3 * Copyright(c) 2013-2016 Brocade Communications Systems, Inc.
13 #include <rte_ethdev.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
17 #include <rte_devargs.h>
18 #include <rte_malloc.h>
19 #include <rte_kvargs.h>
20 #include <rte_atomic.h>
21 #include <rte_branch_prediction.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_cycles.h>
25 #include <rte_errno.h>
26 #include <rte_memory.h>
29 #include <rte_bus_vmbus.h>
37 #define HN_TX_OFFLOAD_CAPS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
38 DEV_TX_OFFLOAD_TCP_CKSUM | \
39 DEV_TX_OFFLOAD_UDP_CKSUM | \
40 DEV_TX_OFFLOAD_TCP_TSO | \
41 DEV_TX_OFFLOAD_MULTI_SEGS | \
42 DEV_TX_OFFLOAD_VLAN_INSERT)
44 #define HN_RX_OFFLOAD_CAPS (DEV_RX_OFFLOAD_CHECKSUM | \
45 DEV_RX_OFFLOAD_VLAN_STRIP)
48 int hn_logtype_driver
;
50 struct hn_xstats_name_off
{
51 char name
[RTE_ETH_XSTATS_NAME_SIZE
];
55 static const struct hn_xstats_name_off hn_stat_strings
[] = {
56 { "good_packets", offsetof(struct hn_stats
, packets
) },
57 { "good_bytes", offsetof(struct hn_stats
, bytes
) },
58 { "errors", offsetof(struct hn_stats
, errors
) },
59 { "ring full", offsetof(struct hn_stats
, ring_full
) },
60 { "multicast_packets", offsetof(struct hn_stats
, multicast
) },
61 { "broadcast_packets", offsetof(struct hn_stats
, broadcast
) },
62 { "undersize_packets", offsetof(struct hn_stats
, size_bins
[0]) },
63 { "size_64_packets", offsetof(struct hn_stats
, size_bins
[1]) },
64 { "size_65_127_packets", offsetof(struct hn_stats
, size_bins
[2]) },
65 { "size_128_255_packets", offsetof(struct hn_stats
, size_bins
[3]) },
66 { "size_256_511_packets", offsetof(struct hn_stats
, size_bins
[4]) },
67 { "size_512_1023_packets", offsetof(struct hn_stats
, size_bins
[5]) },
68 { "size_1024_1518_packets", offsetof(struct hn_stats
, size_bins
[6]) },
69 { "size_1519_max_packets", offsetof(struct hn_stats
, size_bins
[7]) },
72 static struct rte_eth_dev
*
73 eth_dev_vmbus_allocate(struct rte_vmbus_device
*dev
, size_t private_data_size
)
75 struct rte_eth_dev
*eth_dev
;
81 name
= dev
->device
.name
;
83 if (rte_eal_process_type() == RTE_PROC_PRIMARY
) {
84 eth_dev
= rte_eth_dev_allocate(name
);
86 PMD_DRV_LOG(NOTICE
, "can not allocate rte ethdev");
90 if (private_data_size
) {
91 eth_dev
->data
->dev_private
=
92 rte_zmalloc_socket(name
, private_data_size
,
93 RTE_CACHE_LINE_SIZE
, dev
->device
.numa_node
);
94 if (!eth_dev
->data
->dev_private
) {
95 PMD_DRV_LOG(NOTICE
, "can not allocate driver data");
96 rte_eth_dev_release_port(eth_dev
);
101 eth_dev
= rte_eth_dev_attach_secondary(name
);
103 PMD_DRV_LOG(NOTICE
, "can not attach secondary");
108 eth_dev
->device
= &dev
->device
;
110 /* interrupt is simulated */
111 dev
->intr_handle
.type
= RTE_INTR_HANDLE_EXT
;
112 eth_dev
->data
->dev_flags
|= RTE_ETH_DEV_INTR_LSC
;
113 eth_dev
->intr_handle
= &dev
->intr_handle
;
115 /* allow ethdev to remove on close */
116 eth_dev
->data
->dev_flags
|= RTE_ETH_DEV_CLOSE_REMOVE
;
122 eth_dev_vmbus_release(struct rte_eth_dev
*eth_dev
)
124 /* mac_addrs must not be freed alone because part of dev_private */
125 eth_dev
->data
->mac_addrs
= NULL
;
126 /* free ether device */
127 rte_eth_dev_release_port(eth_dev
);
129 eth_dev
->device
= NULL
;
130 eth_dev
->intr_handle
= NULL
;
133 /* handle "latency=X" from devargs */
134 static int hn_set_latency(const char *key
, const char *value
, void *opaque
)
136 struct hn_data
*hv
= opaque
;
141 lat
= strtoul(value
, &endp
, 0);
143 if (*value
== '\0' || *endp
!= '\0') {
144 PMD_DRV_LOG(ERR
, "invalid parameter %s=%s", key
, value
);
148 PMD_DRV_LOG(DEBUG
, "set latency %lu usec", lat
);
150 hv
->latency
= lat
* 1000; /* usec to nsec */
154 /* Parse device arguments */
155 static int hn_parse_args(const struct rte_eth_dev
*dev
)
157 struct hn_data
*hv
= dev
->data
->dev_private
;
158 struct rte_devargs
*devargs
= dev
->device
->devargs
;
159 static const char * const valid_keys
[] = {
163 struct rte_kvargs
*kvlist
;
169 PMD_INIT_LOG(DEBUG
, "device args %s %s",
170 devargs
->name
, devargs
->args
);
172 kvlist
= rte_kvargs_parse(devargs
->args
, valid_keys
);
174 PMD_DRV_LOG(NOTICE
, "invalid parameters");
178 ret
= rte_kvargs_process(kvlist
, "latency", hn_set_latency
, hv
);
180 PMD_DRV_LOG(ERR
, "Unable to process latency arg\n");
182 rte_kvargs_free(kvlist
);
186 /* Update link status.
187 * Note: the DPDK definition of "wait_to_complete"
188 * means block this call until link is up.
189 * which is not worth supporting.
192 hn_dev_link_update(struct rte_eth_dev
*dev
,
193 int wait_to_complete
)
195 struct hn_data
*hv
= dev
->data
->dev_private
;
196 struct rte_eth_link link
, old
;
199 old
= dev
->data
->dev_link
;
201 error
= hn_rndis_get_linkstatus(hv
);
205 hn_rndis_get_linkspeed(hv
);
207 hn_vf_link_update(dev
, wait_to_complete
);
209 link
= (struct rte_eth_link
) {
210 .link_duplex
= ETH_LINK_FULL_DUPLEX
,
211 .link_autoneg
= ETH_LINK_SPEED_FIXED
,
212 .link_speed
= hv
->link_speed
/ 10000,
215 if (hv
->link_status
== NDIS_MEDIA_STATE_CONNECTED
)
216 link
.link_status
= ETH_LINK_UP
;
218 link
.link_status
= ETH_LINK_DOWN
;
220 if (old
.link_status
== link
.link_status
)
223 PMD_INIT_LOG(DEBUG
, "Port %d is %s", dev
->data
->port_id
,
224 (link
.link_status
== ETH_LINK_UP
) ? "up" : "down");
226 return rte_eth_linkstatus_set(dev
, &link
);
229 static void hn_dev_info_get(struct rte_eth_dev
*dev
,
230 struct rte_eth_dev_info
*dev_info
)
232 struct hn_data
*hv
= dev
->data
->dev_private
;
234 dev_info
->speed_capa
= ETH_LINK_SPEED_10G
;
235 dev_info
->min_rx_bufsize
= HN_MIN_RX_BUF_SIZE
;
236 dev_info
->max_rx_pktlen
= HN_MAX_XFER_LEN
;
237 dev_info
->max_mac_addrs
= 1;
239 dev_info
->hash_key_size
= NDIS_HASH_KEYSIZE_TOEPLITZ
;
240 dev_info
->flow_type_rss_offloads
=
241 ETH_RSS_IPV4
| ETH_RSS_IPV6
| ETH_RSS_TCP
| ETH_RSS_UDP
;
243 dev_info
->max_rx_queues
= hv
->max_queues
;
244 dev_info
->max_tx_queues
= hv
->max_queues
;
246 hn_rndis_get_offload(hv
, dev_info
);
247 hn_vf_info_get(hv
, dev_info
);
251 hn_dev_promiscuous_enable(struct rte_eth_dev
*dev
)
253 struct hn_data
*hv
= dev
->data
->dev_private
;
255 hn_rndis_set_rxfilter(hv
, NDIS_PACKET_TYPE_PROMISCUOUS
);
256 hn_vf_promiscuous_enable(dev
);
260 hn_dev_promiscuous_disable(struct rte_eth_dev
*dev
)
262 struct hn_data
*hv
= dev
->data
->dev_private
;
265 filter
= NDIS_PACKET_TYPE_DIRECTED
| NDIS_PACKET_TYPE_BROADCAST
;
266 if (dev
->data
->all_multicast
)
267 filter
|= NDIS_PACKET_TYPE_ALL_MULTICAST
;
268 hn_rndis_set_rxfilter(hv
, filter
);
269 hn_vf_promiscuous_disable(dev
);
273 hn_dev_allmulticast_enable(struct rte_eth_dev
*dev
)
275 struct hn_data
*hv
= dev
->data
->dev_private
;
277 hn_rndis_set_rxfilter(hv
, NDIS_PACKET_TYPE_DIRECTED
|
278 NDIS_PACKET_TYPE_ALL_MULTICAST
|
279 NDIS_PACKET_TYPE_BROADCAST
);
280 hn_vf_allmulticast_enable(dev
);
284 hn_dev_allmulticast_disable(struct rte_eth_dev
*dev
)
286 struct hn_data
*hv
= dev
->data
->dev_private
;
288 hn_rndis_set_rxfilter(hv
, NDIS_PACKET_TYPE_DIRECTED
|
289 NDIS_PACKET_TYPE_BROADCAST
);
290 hn_vf_allmulticast_disable(dev
);
294 hn_dev_mc_addr_list(struct rte_eth_dev
*dev
,
295 struct ether_addr
*mc_addr_set
,
298 /* No filtering on the synthetic path, but can do it on VF */
299 return hn_vf_mc_addr_list(dev
, mc_addr_set
, nb_mc_addr
);
302 /* Setup shared rx/tx queue data */
303 static int hn_subchan_configure(struct hn_data
*hv
,
306 struct vmbus_channel
*primary
= hn_primary_chan(hv
);
308 unsigned int retry
= 0;
311 "open %u subchannels", subchan
);
313 /* Send create sub channels command */
314 err
= hn_nvs_alloc_subchans(hv
, &subchan
);
318 while (subchan
> 0) {
319 struct vmbus_channel
*new_sc
;
322 err
= rte_vmbus_subchan_open(primary
, &new_sc
);
323 if (err
== -ENOENT
&& ++retry
< 1000) {
324 /* This can happen if not ready yet */
331 "open subchannel failed: %d", err
);
335 rte_vmbus_set_latency(hv
->vmbus
, new_sc
, hv
->latency
);
338 chn_index
= rte_vmbus_sub_channel_index(new_sc
);
339 if (chn_index
== 0 || chn_index
> hv
->max_queues
) {
341 "Invalid subchannel offermsg channel %u",
346 PMD_DRV_LOG(DEBUG
, "new sub channel %u", chn_index
);
347 hv
->channels
[chn_index
] = new_sc
;
354 static int hn_dev_configure(struct rte_eth_dev
*dev
)
356 const struct rte_eth_conf
*dev_conf
= &dev
->data
->dev_conf
;
357 const struct rte_eth_rxmode
*rxmode
= &dev_conf
->rxmode
;
358 const struct rte_eth_txmode
*txmode
= &dev_conf
->txmode
;
360 const struct rte_eth_rss_conf
*rss_conf
=
361 &dev_conf
->rx_adv_conf
.rss_conf
;
362 struct hn_data
*hv
= dev
->data
->dev_private
;
363 uint64_t unsupported
;
366 PMD_INIT_FUNC_TRACE();
368 unsupported
= txmode
->offloads
& ~HN_TX_OFFLOAD_CAPS
;
371 "unsupported TX offload: %#" PRIx64
,
376 unsupported
= rxmode
->offloads
& ~HN_RX_OFFLOAD_CAPS
;
379 "unsupported RX offload: %#" PRIx64
,
384 hv
->vlan_strip
= !!(rxmode
->offloads
& DEV_RX_OFFLOAD_VLAN_STRIP
);
386 err
= hn_rndis_conf_offload(hv
, txmode
->offloads
,
390 "offload configure failed");
394 hv
->num_queues
= RTE_MAX(dev
->data
->nb_rx_queues
,
395 dev
->data
->nb_tx_queues
);
396 subchan
= hv
->num_queues
- 1;
398 err
= hn_subchan_configure(hv
, subchan
);
401 "subchannel configuration failed");
405 err
= hn_rndis_conf_rss(hv
, rss_conf
);
408 "rss configuration failed");
413 return hn_vf_configure(dev
, dev_conf
);
416 static int hn_dev_stats_get(struct rte_eth_dev
*dev
,
417 struct rte_eth_stats
*stats
)
421 hn_vf_stats_get(dev
, stats
);
423 for (i
= 0; i
< dev
->data
->nb_tx_queues
; i
++) {
424 const struct hn_tx_queue
*txq
= dev
->data
->tx_queues
[i
];
429 stats
->opackets
+= txq
->stats
.packets
;
430 stats
->obytes
+= txq
->stats
.bytes
;
431 stats
->oerrors
+= txq
->stats
.errors
;
433 if (i
< RTE_ETHDEV_QUEUE_STAT_CNTRS
) {
434 stats
->q_opackets
[i
] = txq
->stats
.packets
;
435 stats
->q_obytes
[i
] = txq
->stats
.bytes
;
439 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
440 const struct hn_rx_queue
*rxq
= dev
->data
->rx_queues
[i
];
445 stats
->ipackets
+= rxq
->stats
.packets
;
446 stats
->ibytes
+= rxq
->stats
.bytes
;
447 stats
->ierrors
+= rxq
->stats
.errors
;
448 stats
->imissed
+= rxq
->stats
.ring_full
;
450 if (i
< RTE_ETHDEV_QUEUE_STAT_CNTRS
) {
451 stats
->q_ipackets
[i
] = rxq
->stats
.packets
;
452 stats
->q_ibytes
[i
] = rxq
->stats
.bytes
;
456 stats
->rx_nombuf
= dev
->data
->rx_mbuf_alloc_failed
;
461 hn_dev_stats_reset(struct rte_eth_dev
*dev
)
465 PMD_INIT_FUNC_TRACE();
467 for (i
= 0; i
< dev
->data
->nb_tx_queues
; i
++) {
468 struct hn_tx_queue
*txq
= dev
->data
->tx_queues
[i
];
472 memset(&txq
->stats
, 0, sizeof(struct hn_stats
));
475 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
476 struct hn_rx_queue
*rxq
= dev
->data
->rx_queues
[i
];
481 memset(&rxq
->stats
, 0, sizeof(struct hn_stats
));
486 hn_dev_xstats_reset(struct rte_eth_dev
*dev
)
488 hn_dev_stats_reset(dev
);
489 hn_vf_xstats_reset(dev
);
493 hn_dev_xstats_count(struct rte_eth_dev
*dev
)
497 count
= dev
->data
->nb_tx_queues
* RTE_DIM(hn_stat_strings
);
498 count
+= dev
->data
->nb_rx_queues
* RTE_DIM(hn_stat_strings
);
500 ret
= hn_vf_xstats_get_names(dev
, NULL
, 0);
508 hn_dev_xstats_get_names(struct rte_eth_dev
*dev
,
509 struct rte_eth_xstat_name
*xstats_names
,
512 unsigned int i
, t
, count
= 0;
516 return hn_dev_xstats_count(dev
);
518 /* Note: limit checked in rte_eth_xstats_names() */
519 for (i
= 0; i
< dev
->data
->nb_tx_queues
; i
++) {
520 const struct hn_tx_queue
*txq
= dev
->data
->tx_queues
[i
];
528 for (t
= 0; t
< RTE_DIM(hn_stat_strings
); t
++)
529 snprintf(xstats_names
[count
++].name
,
530 RTE_ETH_XSTATS_NAME_SIZE
,
531 "tx_q%u_%s", i
, hn_stat_strings
[t
].name
);
534 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
535 const struct hn_rx_queue
*rxq
= dev
->data
->rx_queues
[i
];
543 for (t
= 0; t
< RTE_DIM(hn_stat_strings
); t
++)
544 snprintf(xstats_names
[count
++].name
,
545 RTE_ETH_XSTATS_NAME_SIZE
,
547 hn_stat_strings
[t
].name
);
550 ret
= hn_vf_xstats_get_names(dev
, xstats_names
+ count
,
559 hn_dev_xstats_get(struct rte_eth_dev
*dev
,
560 struct rte_eth_xstat
*xstats
,
563 unsigned int i
, t
, count
= 0;
564 const unsigned int nstats
= hn_dev_xstats_count(dev
);
568 PMD_INIT_FUNC_TRACE();
573 for (i
= 0; i
< dev
->data
->nb_tx_queues
; i
++) {
574 const struct hn_tx_queue
*txq
= dev
->data
->tx_queues
[i
];
579 stats
= (const char *)&txq
->stats
;
580 for (t
= 0; t
< RTE_DIM(hn_stat_strings
); t
++)
581 xstats
[count
++].value
= *(const uint64_t *)
582 (stats
+ hn_stat_strings
[t
].offset
);
585 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
586 const struct hn_rx_queue
*rxq
= dev
->data
->rx_queues
[i
];
591 stats
= (const char *)&rxq
->stats
;
592 for (t
= 0; t
< RTE_DIM(hn_stat_strings
); t
++)
593 xstats
[count
++].value
= *(const uint64_t *)
594 (stats
+ hn_stat_strings
[t
].offset
);
597 ret
= hn_vf_xstats_get(dev
, xstats
+ count
, n
- count
);
605 hn_dev_start(struct rte_eth_dev
*dev
)
607 struct hn_data
*hv
= dev
->data
->dev_private
;
610 PMD_INIT_FUNC_TRACE();
612 error
= hn_rndis_set_rxfilter(hv
,
613 NDIS_PACKET_TYPE_BROADCAST
|
614 NDIS_PACKET_TYPE_ALL_MULTICAST
|
615 NDIS_PACKET_TYPE_DIRECTED
);
619 error
= hn_vf_start(dev
);
621 hn_rndis_set_rxfilter(hv
, 0);
627 hn_dev_stop(struct rte_eth_dev
*dev
)
629 struct hn_data
*hv
= dev
->data
->dev_private
;
631 PMD_INIT_FUNC_TRACE();
633 hn_rndis_set_rxfilter(hv
, 0);
638 hn_dev_close(struct rte_eth_dev
*dev
)
640 PMD_INIT_FUNC_TRACE();
643 hn_dev_free_queues(dev
);
646 static const struct eth_dev_ops hn_eth_dev_ops
= {
647 .dev_configure
= hn_dev_configure
,
648 .dev_start
= hn_dev_start
,
649 .dev_stop
= hn_dev_stop
,
650 .dev_close
= hn_dev_close
,
651 .dev_infos_get
= hn_dev_info_get
,
652 .dev_supported_ptypes_get
= hn_vf_supported_ptypes
,
653 .promiscuous_enable
= hn_dev_promiscuous_enable
,
654 .promiscuous_disable
= hn_dev_promiscuous_disable
,
655 .allmulticast_enable
= hn_dev_allmulticast_enable
,
656 .allmulticast_disable
= hn_dev_allmulticast_disable
,
657 .set_mc_addr_list
= hn_dev_mc_addr_list
,
658 .tx_queue_setup
= hn_dev_tx_queue_setup
,
659 .tx_queue_release
= hn_dev_tx_queue_release
,
660 .tx_done_cleanup
= hn_dev_tx_done_cleanup
,
661 .rx_queue_setup
= hn_dev_rx_queue_setup
,
662 .rx_queue_release
= hn_dev_rx_queue_release
,
663 .link_update
= hn_dev_link_update
,
664 .stats_get
= hn_dev_stats_get
,
665 .stats_reset
= hn_dev_stats_reset
,
666 .xstats_get
= hn_dev_xstats_get
,
667 .xstats_get_names
= hn_dev_xstats_get_names
,
668 .xstats_reset
= hn_dev_xstats_reset
,
672 * Setup connection between PMD and kernel.
675 hn_attach(struct hn_data
*hv
, unsigned int mtu
)
680 error
= hn_nvs_attach(hv
, mtu
);
685 error
= hn_rndis_attach(hv
);
691 * Under certain conditions on certain versions of Hyper-V,
692 * the RNDIS rxfilter is _not_ zero on the hypervisor side
693 * after the successful RNDIS initialization.
695 hn_rndis_set_rxfilter(hv
, NDIS_PACKET_TYPE_NONE
);
704 hn_detach(struct hn_data
*hv
)
711 eth_hn_dev_init(struct rte_eth_dev
*eth_dev
)
713 struct hn_data
*hv
= eth_dev
->data
->dev_private
;
714 struct rte_device
*device
= eth_dev
->device
;
715 struct rte_vmbus_device
*vmbus
;
716 unsigned int rxr_cnt
;
719 PMD_INIT_FUNC_TRACE();
721 vmbus
= container_of(device
, struct rte_vmbus_device
, device
);
722 eth_dev
->dev_ops
= &hn_eth_dev_ops
;
723 eth_dev
->tx_pkt_burst
= &hn_xmit_pkts
;
724 eth_dev
->rx_pkt_burst
= &hn_recv_pkts
;
727 * for secondary processes, we don't initialize any further as primary
728 * has already done this work.
730 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
733 /* Since Hyper-V only supports one MAC address, just use local data */
734 eth_dev
->data
->mac_addrs
= &hv
->mac_addr
;
737 hv
->rxbuf_res
= &vmbus
->resource
[HV_RECV_BUF_MAP
];
738 hv
->chim_res
= &vmbus
->resource
[HV_SEND_BUF_MAP
];
739 hv
->port_id
= eth_dev
->data
->port_id
;
740 hv
->latency
= HN_CHAN_LATENCY_NS
;
742 hv
->vf_port
= HN_INVALID_PORT
;
744 err
= hn_parse_args(eth_dev
);
748 strlcpy(hv
->owner
.name
, eth_dev
->device
->name
,
749 RTE_ETH_MAX_OWNER_NAME_LEN
);
750 err
= rte_eth_dev_owner_new(&hv
->owner
.id
);
752 PMD_INIT_LOG(ERR
, "Can not get owner id");
756 /* Initialize primary channel input for control operations */
757 err
= rte_vmbus_chan_open(vmbus
, &hv
->channels
[0]);
761 rte_vmbus_set_latency(hv
->vmbus
, hv
->channels
[0], hv
->latency
);
763 hv
->primary
= hn_rx_queue_alloc(hv
, 0,
764 eth_dev
->device
->numa_node
);
769 err
= hn_attach(hv
, ETHER_MTU
);
773 err
= hn_tx_pool_init(eth_dev
);
777 err
= hn_rndis_get_eaddr(hv
, hv
->mac_addr
.addr_bytes
);
781 /* Multi queue requires later versions of windows server */
782 if (hv
->nvs_ver
< NVS_VERSION_5
)
785 max_chan
= rte_vmbus_max_channels(vmbus
);
786 PMD_INIT_LOG(DEBUG
, "VMBus max channels %d", max_chan
);
790 if (hn_rndis_query_rsscaps(hv
, &rxr_cnt
) != 0)
793 hv
->max_queues
= RTE_MIN(rxr_cnt
, (unsigned int)max_chan
);
795 /* If VF was reported but not added, do it now */
796 if (hv
->vf_present
&& !hn_vf_attached(hv
)) {
797 PMD_INIT_LOG(DEBUG
, "Adding VF device");
799 err
= hn_vf_add(eth_dev
, hv
);
807 PMD_INIT_LOG(NOTICE
, "device init failed");
809 hn_tx_pool_uninit(eth_dev
);
815 eth_hn_dev_uninit(struct rte_eth_dev
*eth_dev
)
817 struct hn_data
*hv
= eth_dev
->data
->dev_private
;
819 PMD_INIT_FUNC_TRACE();
821 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
824 hn_dev_stop(eth_dev
);
825 hn_dev_close(eth_dev
);
827 eth_dev
->dev_ops
= NULL
;
828 eth_dev
->tx_pkt_burst
= NULL
;
829 eth_dev
->rx_pkt_burst
= NULL
;
832 hn_tx_pool_uninit(eth_dev
);
833 rte_vmbus_chan_close(hv
->primary
->chan
);
834 rte_free(hv
->primary
);
835 rte_eth_dev_owner_delete(hv
->owner
.id
);
840 static int eth_hn_probe(struct rte_vmbus_driver
*drv __rte_unused
,
841 struct rte_vmbus_device
*dev
)
843 struct rte_eth_dev
*eth_dev
;
846 PMD_INIT_FUNC_TRACE();
848 eth_dev
= eth_dev_vmbus_allocate(dev
, sizeof(struct hn_data
));
852 ret
= eth_hn_dev_init(eth_dev
);
854 eth_dev_vmbus_release(eth_dev
);
856 rte_eth_dev_probing_finish(eth_dev
);
861 static int eth_hn_remove(struct rte_vmbus_device
*dev
)
863 struct rte_eth_dev
*eth_dev
;
866 PMD_INIT_FUNC_TRACE();
868 eth_dev
= rte_eth_dev_allocated(dev
->device
.name
);
872 ret
= eth_hn_dev_uninit(eth_dev
);
876 eth_dev_vmbus_release(eth_dev
);
880 /* Network device GUID */
881 static const rte_uuid_t hn_net_ids
[] = {
882 /* f8615163-df3e-46c5-913f-f2d2f965ed0e */
883 RTE_UUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x913f, 0xf2d2f965ed0eULL
),
887 static struct rte_vmbus_driver rte_netvsc_pmd
= {
888 .id_table
= hn_net_ids
,
889 .probe
= eth_hn_probe
,
890 .remove
= eth_hn_remove
,
893 RTE_PMD_REGISTER_VMBUS(net_netvsc
, rte_netvsc_pmd
);
894 RTE_PMD_REGISTER_KMOD_DEP(net_netvsc
, "* uio_hv_generic");
896 RTE_INIT(hn_init_log
)
898 hn_logtype_init
= rte_log_register("pmd.net.netvsc.init");
899 if (hn_logtype_init
>= 0)
900 rte_log_set_level(hn_logtype_init
, RTE_LOG_NOTICE
);
901 hn_logtype_driver
= rte_log_register("pmd.net.netvsc.driver");
902 if (hn_logtype_driver
>= 0)
903 rte_log_set_level(hn_logtype_driver
, RTE_LOG_NOTICE
);