4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
43 #define PAGE_SIZE sysconf(_SC_PAGE_SIZE)
45 #include <linux/binfmts.h>
46 #include <xen/xen-compat.h>
47 #if __XEN_LATEST_INTERFACE_VERSION__ < 0x00040200
52 #include <linux/virtio_ring.h>
55 #include <rte_ethdev.h>
56 #include <rte_malloc.h>
57 #include <rte_memcpy.h>
58 #include <rte_string_fns.h>
60 #include <cmdline_parse.h>
61 #include <cmdline_parse_etheraddr.h>
63 #include "rte_xen_lib.h"
64 #include "virtqueue.h"
65 #include "rte_eth_xenvirt.h"
67 #define VQ_DESC_NUM 256
68 #define VIRTIO_MBUF_BURST_SZ 64
70 /* virtio_idx is increased after new device is created.*/
71 static int virtio_idx
= 0;
73 static struct rte_eth_link pmd_link
= {
74 .link_speed
= ETH_SPEED_NUM_10G
,
75 .link_duplex
= ETH_LINK_FULL_DUPLEX
,
76 .link_status
= ETH_LINK_DOWN
,
77 .link_autoneg
= ETH_LINK_SPEED_FIXED
81 eth_xenvirt_free_queues(struct rte_eth_dev
*dev
);
84 eth_xenvirt_rx(void *q
, struct rte_mbuf
**rx_pkts
, uint16_t nb_pkts
)
86 struct virtqueue
*rxvq
= q
;
87 struct rte_mbuf
*rxm
, *new_mbuf
;
88 uint16_t nb_used
, num
;
89 uint32_t len
[VIRTIO_MBUF_BURST_SZ
];
91 struct pmd_internals
*pi
= rxvq
->internals
;
93 nb_used
= VIRTQUEUE_NUSED(rxvq
);
96 num
= (uint16_t)(likely(nb_used
<= nb_pkts
) ? nb_used
: nb_pkts
);
97 num
= (uint16_t)(likely(num
<= VIRTIO_MBUF_BURST_SZ
) ? num
: VIRTIO_MBUF_BURST_SZ
);
98 if (unlikely(num
== 0)) return 0;
100 num
= virtqueue_dequeue_burst(rxvq
, rx_pkts
, len
, num
);
101 PMD_RX_LOG(DEBUG
, "used:%d dequeue:%d\n", nb_used
, num
);
102 for (i
= 0; i
< num
; i
++) {
104 PMD_RX_LOG(DEBUG
, "packet len:%d\n", len
[i
]);
106 rxm
->data_off
= RTE_PKTMBUF_HEADROOM
;
107 rxm
->data_len
= (uint16_t)(len
[i
] - sizeof(struct virtio_net_hdr
));
109 rxm
->port
= pi
->port_id
;
110 rxm
->pkt_len
= (uint32_t)(len
[i
] - sizeof(struct virtio_net_hdr
));
112 /* allocate new mbuf for the used descriptor */
113 while (likely(!virtqueue_full(rxvq
))) {
114 new_mbuf
= rte_mbuf_raw_alloc(rxvq
->mpool
);
115 if (unlikely(new_mbuf
== NULL
)) {
118 if (unlikely(virtqueue_enqueue_recv_refill(rxvq
, new_mbuf
))) {
119 rte_pktmbuf_free_seg(new_mbuf
);
123 pi
->eth_stats
.ipackets
+= num
;
128 eth_xenvirt_tx(void *tx_queue
, struct rte_mbuf
**tx_pkts
, uint16_t nb_pkts
)
130 struct virtqueue
*txvq
= tx_queue
;
131 struct rte_mbuf
*txm
;
132 uint16_t nb_used
, nb_tx
, num
, i
;
134 uint32_t len
[VIRTIO_MBUF_BURST_SZ
];
135 struct rte_mbuf
*snd_pkts
[VIRTIO_MBUF_BURST_SZ
];
136 struct pmd_internals
*pi
= txvq
->internals
;
140 if (unlikely(nb_pkts
== 0))
143 PMD_TX_LOG(DEBUG
, "%d packets to xmit", nb_pkts
);
144 nb_used
= VIRTQUEUE_NUSED(txvq
);
148 num
= (uint16_t)(likely(nb_used
<= VIRTIO_MBUF_BURST_SZ
) ? nb_used
: VIRTIO_MBUF_BURST_SZ
);
149 num
= virtqueue_dequeue_burst(txvq
, snd_pkts
, len
, num
);
151 for (i
= 0; i
< num
; i
++) {
152 /* mergable not supported, one segment only */
153 rte_pktmbuf_free_seg(snd_pkts
[i
]);
156 while (nb_tx
< nb_pkts
) {
157 if (likely(!virtqueue_full(txvq
))) {
158 /* TODO drop tx_pkts if it contains multiple segments */
159 txm
= tx_pkts
[nb_tx
];
160 error
= virtqueue_enqueue_xmit(txvq
, txm
);
161 if (unlikely(error
)) {
163 PMD_TX_LOG(ERR
, "virtqueue_enqueue Free count = 0\n");
164 else if (error
== EMSGSIZE
)
165 PMD_TX_LOG(ERR
, "virtqueue_enqueue Free count < 1\n");
167 PMD_TX_LOG(ERR
, "virtqueue_enqueue error: %d\n", error
);
172 PMD_TX_LOG(ERR
, "No free tx descriptors to transmit\n");
173 /* virtqueue_notify not needed in our para-virt solution */
177 pi
->eth_stats
.opackets
+= nb_tx
;
182 eth_dev_configure(struct rte_eth_dev
*dev __rte_unused
)
184 RTE_LOG(ERR
, PMD
, "%s\n", __func__
);
189 * Create a shared page between guest and host.
190 * Host monitors this page if it is cleared on unmap, and then
191 * do necessary clean up.
194 gntalloc_vring_flag(int vtidx
)
196 char key_str
[PATH_MAX
];
197 char val_str
[PATH_MAX
];
201 if (grefwatch_from_alloc(&gref_tmp
, &ptr
)) {
202 RTE_LOG(ERR
, PMD
, "grefwatch_from_alloc error\n");
206 *(uint8_t *)ptr
= MAP_FLAG
;
207 snprintf(val_str
, sizeof(val_str
), "%u", gref_tmp
);
208 snprintf(key_str
, sizeof(key_str
),
209 DPDK_XENSTORE_PATH
"%d"VRING_FLAG_STR
, vtidx
);
210 xenstore_write(key_str
, val_str
);
214 * Notify host this virtio device is started.
215 * Host could start polling this device.
218 dev_start_notify(int vtidx
)
220 char key_str
[PATH_MAX
];
221 char val_str
[PATH_MAX
];
223 RTE_LOG(INFO
, PMD
, "%s: virtio %d is started\n", __func__
, vtidx
);
224 gntalloc_vring_flag(vtidx
);
226 snprintf(key_str
, sizeof(key_str
), "%s%s%d",
227 DPDK_XENSTORE_PATH
, EVENT_TYPE_START_STR
,
229 snprintf(val_str
, sizeof(val_str
), "1");
230 xenstore_write(key_str
, val_str
);
234 * Notify host this virtio device is stopped.
235 * Host could stop polling this device.
238 dev_stop_notify(int vtidx
)
245 update_mac_address(struct ether_addr
*mac_addrs
, int vtidx
)
247 char key_str
[PATH_MAX
];
248 char val_str
[PATH_MAX
];
251 if (mac_addrs
== NULL
) {
252 RTE_LOG(ERR
, PMD
, "%s: NULL pointer mac specified\n", __func__
);
255 rv
= snprintf(key_str
, sizeof(key_str
),
256 DPDK_XENSTORE_PATH
"%d_ether_addr", vtidx
);
259 rv
= snprintf(val_str
, sizeof(val_str
), "%02x:%02x:%02x:%02x:%02x:%02x",
260 mac_addrs
->addr_bytes
[0],
261 mac_addrs
->addr_bytes
[1],
262 mac_addrs
->addr_bytes
[2],
263 mac_addrs
->addr_bytes
[3],
264 mac_addrs
->addr_bytes
[4],
265 mac_addrs
->addr_bytes
[5]);
268 if (xenstore_write(key_str
, val_str
))
275 eth_dev_start(struct rte_eth_dev
*dev
)
277 struct virtqueue
*rxvq
= dev
->data
->rx_queues
[0];
278 struct virtqueue
*txvq
= dev
->data
->tx_queues
[0];
280 struct pmd_internals
*pi
= (struct pmd_internals
*)dev
->data
->dev_private
;
283 dev
->data
->dev_link
.link_status
= ETH_LINK_UP
;
284 while (!virtqueue_full(rxvq
)) {
285 m
= rte_mbuf_raw_alloc(rxvq
->mpool
);
288 /* Enqueue allocated buffers. */
289 if (virtqueue_enqueue_recv_refill(rxvq
, m
)) {
290 rte_pktmbuf_free_seg(m
);
295 rxvq
->internals
= pi
;
296 txvq
->internals
= pi
;
298 rv
= update_mac_address(dev
->data
->mac_addrs
, pi
->virtio_idx
);
301 dev_start_notify(pi
->virtio_idx
);
307 eth_dev_stop(struct rte_eth_dev
*dev
)
309 struct pmd_internals
*pi
= (struct pmd_internals
*)dev
->data
->dev_private
;
311 dev
->data
->dev_link
.link_status
= ETH_LINK_DOWN
;
312 dev_stop_notify(pi
->virtio_idx
);
316 * Notify host this virtio device is closed.
317 * Host could do necessary clean up to this device.
320 eth_dev_close(struct rte_eth_dev
*dev
)
322 eth_xenvirt_free_queues(dev
);
326 eth_dev_info(struct rte_eth_dev
*dev
,
327 struct rte_eth_dev_info
*dev_info
)
329 struct pmd_internals
*internals
= dev
->data
->dev_private
;
331 RTE_SET_USED(internals
);
332 dev_info
->max_mac_addrs
= 1;
333 dev_info
->max_rx_pktlen
= (uint32_t)2048;
334 dev_info
->max_rx_queues
= (uint16_t)1;
335 dev_info
->max_tx_queues
= (uint16_t)1;
336 dev_info
->min_rx_bufsize
= 0;
340 eth_stats_get(struct rte_eth_dev
*dev
, struct rte_eth_stats
*stats
)
342 struct pmd_internals
*internals
= dev
->data
->dev_private
;
344 rte_memcpy(stats
, &internals
->eth_stats
, sizeof(*stats
));
348 eth_stats_reset(struct rte_eth_dev
*dev
)
350 struct pmd_internals
*internals
= dev
->data
->dev_private
;
351 /* Reset software totals */
352 memset(&internals
->eth_stats
, 0, sizeof(internals
->eth_stats
));
356 eth_queue_release(void *q
)
362 eth_link_update(struct rte_eth_dev
*dev __rte_unused
,
363 int wait_to_complete __rte_unused
)
369 * Create shared vring between guest and host.
370 * Memory is allocated through grant alloc driver, so it is not physical continuous.
373 gntalloc_vring_create(int queue_type
, uint32_t size
, int vtidx
)
375 char key_str
[PATH_MAX
] = {0};
376 char val_str
[PATH_MAX
] = {0};
380 uint32_t *gref_arr
= NULL
;
381 phys_addr_t
*pa_arr
= NULL
;
382 uint64_t start_index
;
385 pg_size
= getpagesize();
386 size
= RTE_ALIGN_CEIL(size
, pg_size
);
387 pg_num
= size
/ pg_size
;
389 gref_arr
= calloc(pg_num
, sizeof(gref_arr
[0]));
390 pa_arr
= calloc(pg_num
, sizeof(pa_arr
[0]));
392 if (gref_arr
== NULL
|| pa_arr
== NULL
) {
393 RTE_LOG(ERR
, PMD
, "%s: calloc failed\n", __func__
);
397 va
= gntalloc(size
, gref_arr
, &start_index
);
399 RTE_LOG(ERR
, PMD
, "%s: gntalloc failed\n", __func__
);
403 if (get_phys_map(va
, pa_arr
, pg_num
, pg_size
))
406 /* write in xenstore gref and pfn for each page of vring */
407 if (grant_node_create(pg_num
, gref_arr
, pa_arr
, val_str
, sizeof(val_str
))) {
408 gntfree(va
, size
, start_index
);
413 if (queue_type
== VTNET_RQ
)
414 rv
= snprintf(key_str
, sizeof(key_str
), DPDK_XENSTORE_PATH
"%d"RXVRING_XENSTORE_STR
, vtidx
);
416 rv
= snprintf(key_str
, sizeof(key_str
), DPDK_XENSTORE_PATH
"%d"TXVRING_XENSTORE_STR
, vtidx
);
417 if (rv
== -1 || xenstore_write(key_str
, val_str
) == -1) {
418 gntfree(va
, size
, start_index
);
430 static struct virtqueue
*
431 virtio_queue_setup(struct rte_eth_dev
*dev
, int queue_type
)
433 struct virtqueue
*vq
= NULL
;
434 uint16_t vq_size
= VQ_DESC_NUM
;
436 char vq_name
[VIRTQUEUE_MAX_NAME_SZ
];
440 /* Allocate memory for virtqueue. */
441 if (queue_type
== VTNET_RQ
) {
442 snprintf(vq_name
, sizeof(vq_name
), "port%d_rvq",
444 vq
= rte_zmalloc(vq_name
, sizeof(struct virtqueue
) +
445 vq_size
* sizeof(struct vq_desc_extra
), RTE_CACHE_LINE_SIZE
);
447 RTE_LOG(ERR
, PMD
, "%s: unabled to allocate virtqueue\n", __func__
);
450 memcpy(vq
->vq_name
, vq_name
, sizeof(vq
->vq_name
));
451 } else if(queue_type
== VTNET_TQ
) {
452 snprintf(vq_name
, sizeof(vq_name
), "port%d_tvq",
454 vq
= rte_zmalloc(vq_name
, sizeof(struct virtqueue
) +
455 vq_size
* sizeof(struct vq_desc_extra
), RTE_CACHE_LINE_SIZE
);
457 RTE_LOG(ERR
, PMD
, "%s: unabled to allocate virtqueue\n", __func__
);
460 memcpy(vq
->vq_name
, vq_name
, sizeof(vq
->vq_name
));
463 memcpy(vq
->vq_name
, vq_name
, sizeof(vq
->vq_name
));
465 vq
->vq_alignment
= VIRTIO_PCI_VRING_ALIGN
;
466 vq
->vq_nentries
= vq_size
;
467 vq
->vq_free_cnt
= vq_size
;
468 /* Calcuate vring size according to virtio spec */
469 size
= vring_size(vq_size
, VIRTIO_PCI_VRING_ALIGN
);
470 vq
->vq_ring_size
= RTE_ALIGN_CEIL(size
, VIRTIO_PCI_VRING_ALIGN
);
471 /* Allocate memory for virtio vring through gntalloc driver*/
472 vq
->vq_ring_virt_mem
= gntalloc_vring_create(queue_type
, vq
->vq_ring_size
,
473 ((struct pmd_internals
*)dev
->data
->dev_private
)->virtio_idx
);
474 memset(vq
->vq_ring_virt_mem
, 0, vq
->vq_ring_size
);
476 vring_init(vr
, vq_size
, vq
->vq_ring_virt_mem
, vq
->vq_alignment
);
478 * Locally maintained last consumed index, this idex trails
481 vq
->vq_used_cons_idx
= 0;
482 vq
->vq_desc_head_idx
= 0;
483 vq
->vq_free_cnt
= vq
->vq_nentries
;
484 memset(vq
->vq_descx
, 0, sizeof(struct vq_desc_extra
) * vq
->vq_nentries
);
486 /* Chain all the descriptors in the ring with an END */
487 for (i
= 0; i
< vq_size
- 1; i
++)
488 vr
->desc
[i
].next
= (uint16_t)(i
+ 1);
489 vr
->desc
[i
].next
= VQ_RING_DESC_CHAIN_END
;
495 eth_rx_queue_setup(struct rte_eth_dev
*dev
,uint16_t rx_queue_id
,
496 uint16_t nb_rx_desc __rte_unused
,
497 unsigned int socket_id __rte_unused
,
498 const struct rte_eth_rxconf
*rx_conf __rte_unused
,
499 struct rte_mempool
*mb_pool
)
501 struct virtqueue
*vq
;
502 vq
= dev
->data
->rx_queues
[rx_queue_id
] = virtio_queue_setup(dev
, VTNET_RQ
);
508 eth_tx_queue_setup(struct rte_eth_dev
*dev
, uint16_t tx_queue_id
,
509 uint16_t nb_tx_desc __rte_unused
,
510 unsigned int socket_id __rte_unused
,
511 const struct rte_eth_txconf
*tx_conf __rte_unused
)
513 dev
->data
->tx_queues
[tx_queue_id
] = virtio_queue_setup(dev
, VTNET_TQ
);
518 eth_xenvirt_free_queues(struct rte_eth_dev
*dev
)
522 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
523 eth_queue_release(dev
->data
->rx_queues
[i
]);
524 dev
->data
->rx_queues
[i
] = NULL
;
526 dev
->data
->nb_rx_queues
= 0;
528 for (i
= 0; i
< dev
->data
->nb_tx_queues
; i
++) {
529 eth_queue_release(dev
->data
->tx_queues
[i
]);
530 dev
->data
->tx_queues
[i
] = NULL
;
532 dev
->data
->nb_tx_queues
= 0;
535 static const struct eth_dev_ops ops
= {
536 .dev_start
= eth_dev_start
,
537 .dev_stop
= eth_dev_stop
,
538 .dev_close
= eth_dev_close
,
539 .dev_configure
= eth_dev_configure
,
540 .dev_infos_get
= eth_dev_info
,
541 .rx_queue_setup
= eth_rx_queue_setup
,
542 .tx_queue_setup
= eth_tx_queue_setup
,
543 .rx_queue_release
= eth_queue_release
,
544 .tx_queue_release
= eth_queue_release
,
545 .link_update
= eth_link_update
,
546 .stats_get
= eth_stats_get
,
547 .stats_reset
= eth_stats_reset
,
552 rte_eth_xenvirt_parse_args(struct xenvirt_dict
*dict
,
553 const char *name
, const char *params
)
556 char *pairs
[RTE_ETH_XENVIRT_MAX_ARGS
];
565 args
= rte_zmalloc(NULL
, strlen(params
) + 1, RTE_CACHE_LINE_SIZE
);
567 RTE_LOG(ERR
, PMD
, "Couldn't parse %s device \n", name
);
570 rte_memcpy(args
, params
, strlen(params
));
572 num_of_pairs
= rte_strsplit(args
, strnlen(args
, MAX_ARG_STRLEN
),
574 RTE_ETH_XENVIRT_MAX_ARGS
,
575 RTE_ETH_XENVIRT_PAIRS_DELIM
);
577 for (i
= 0; i
< num_of_pairs
; i
++) {
580 rte_strsplit(pairs
[i
], strnlen(pairs
[i
], MAX_ARG_STRLEN
),
582 RTE_ETH_XENVIRT_KEY_VALUE_DELIM
);
584 if (pair
[0] == NULL
|| pair
[1] == NULL
|| pair
[0][0] == 0
585 || pair
[1][0] == 0) {
587 "Couldn't parse %s device,"
588 "wrong key or value \n", name
);
592 if (!strncmp(pair
[0], RTE_ETH_XENVIRT_MAC_PARAM
,
593 sizeof(RTE_ETH_XENVIRT_MAC_PARAM
))) {
594 if (cmdline_parse_etheraddr(NULL
,
597 sizeof(dict
->addr
)) < 0) {
599 "Invalid %s device ether address\n",
604 dict
->addr_valid
= 1;
619 static struct rte_vdev_driver pmd_xenvirt_drv
;
622 eth_dev_xenvirt_create(const char *name
, const char *params
,
623 const unsigned numa_node
,
624 enum dev_action action
)
626 struct rte_eth_dev_data
*data
= NULL
;
627 struct pmd_internals
*internals
= NULL
;
628 struct rte_eth_dev
*eth_dev
= NULL
;
629 struct xenvirt_dict dict
;
631 memset(&dict
, 0, sizeof(struct xenvirt_dict
));
633 RTE_LOG(INFO
, PMD
, "Creating virtio rings backed ethdev on numa socket %u\n",
635 RTE_SET_USED(action
);
637 if (rte_eth_xenvirt_parse_args(&dict
, name
, params
) < 0) {
638 RTE_LOG(ERR
, PMD
, "%s: Failed to parse ethdev parameters\n", __func__
);
642 /* now do all data allocation - for eth_dev structure, dummy pci driver
643 * and internal (private) data
645 data
= rte_zmalloc_socket(name
, sizeof(*data
), 0, numa_node
);
649 internals
= rte_zmalloc_socket(name
, sizeof(*internals
), 0, numa_node
);
650 if (internals
== NULL
)
653 /* reserve an ethdev entry */
654 eth_dev
= rte_eth_dev_allocate(name
);
658 data
->dev_private
= internals
;
659 data
->port_id
= eth_dev
->data
->port_id
;
660 data
->nb_rx_queues
= (uint16_t)1;
661 data
->nb_tx_queues
= (uint16_t)1;
662 data
->dev_link
= pmd_link
;
663 data
->mac_addrs
= rte_zmalloc("xen_virtio", ETHER_ADDR_LEN
, 0);
666 memcpy(&data
->mac_addrs
->addr_bytes
, &dict
.addr
, sizeof(struct ether_addr
));
668 eth_random_addr(&data
->mac_addrs
->addr_bytes
[0]);
670 eth_dev
->data
= data
;
671 eth_dev
->dev_ops
= &ops
;
673 eth_dev
->data
->dev_flags
= RTE_ETH_DEV_DETACHABLE
;
674 eth_dev
->data
->kdrv
= RTE_KDRV_NONE
;
675 eth_dev
->data
->drv_name
= pmd_xenvirt_drv
.driver
.name
;
676 eth_dev
->data
->numa_node
= numa_node
;
678 eth_dev
->rx_pkt_burst
= eth_xenvirt_rx
;
679 eth_dev
->tx_pkt_burst
= eth_xenvirt_tx
;
681 internals
->virtio_idx
= virtio_idx
++;
682 internals
->port_id
= eth_dev
->data
->port_id
;
695 eth_dev_xenvirt_free(const char *name
, const unsigned numa_node
)
697 struct rte_eth_dev
*eth_dev
= NULL
;
700 "Free virtio rings backed ethdev on numa socket %u\n",
703 /* find an ethdev entry */
704 eth_dev
= rte_eth_dev_allocated(name
);
708 if (eth_dev
->data
->dev_started
== 1) {
709 eth_dev_stop(eth_dev
);
710 eth_dev_close(eth_dev
);
713 eth_dev
->rx_pkt_burst
= NULL
;
714 eth_dev
->tx_pkt_burst
= NULL
;
715 eth_dev
->dev_ops
= NULL
;
717 rte_free(eth_dev
->data
);
718 rte_free(eth_dev
->data
->dev_private
);
719 rte_free(eth_dev
->data
->mac_addrs
);
726 /*TODO: Support multiple process model */
728 rte_pmd_xenvirt_probe(struct rte_vdev_device
*dev
)
730 if (virtio_idx
== 0) {
731 if (xenstore_init() != 0) {
732 RTE_LOG(ERR
, PMD
, "%s: xenstore init failed\n", __func__
);
735 if (gntalloc_open() != 0) {
736 RTE_LOG(ERR
, PMD
, "%s: grant init failed\n", __func__
);
740 eth_dev_xenvirt_create(rte_vdev_device_name(dev
),
741 rte_vdev_device_args(dev
), rte_socket_id(), DEV_CREATE
);
746 rte_pmd_xenvirt_remove(struct rte_vdev_device
*dev
)
748 eth_dev_xenvirt_free(rte_vdev_device_name(dev
), rte_socket_id());
750 if (virtio_idx
== 0) {
751 if (xenstore_uninit() != 0)
752 RTE_LOG(ERR
, PMD
, "%s: xenstore uninit failed\n", __func__
);
759 static struct rte_vdev_driver pmd_xenvirt_drv
= {
760 .probe
= rte_pmd_xenvirt_probe
,
761 .remove
= rte_pmd_xenvirt_remove
,
764 RTE_PMD_REGISTER_VDEV(net_xenvirt
, pmd_xenvirt_drv
);
765 RTE_PMD_REGISTER_ALIAS(net_xenvirt
, eth_xenvirt
);
766 RTE_PMD_REGISTER_PARAM_STRING(net_xenvirt
,