1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
9 #include <sys/socket.h>
11 #include <rte_malloc.h>
12 #include <rte_kvargs.h>
13 #include <rte_ethdev_vdev.h>
14 #include <rte_bus_vdev.h>
15 #include <rte_alarm.h>
17 #include "virtio_ethdev.h"
18 #include "virtio_logs.h"
19 #include "virtio_pci.h"
20 #include "virtqueue.h"
21 #include "virtio_rxtx.h"
22 #include "virtio_user/virtio_user_dev.h"
24 #define virtio_user_get_dev(hw) \
25 ((struct virtio_user_dev *)(hw)->virtio_user_dev)
28 virtio_user_server_reconnect(struct virtio_user_dev
*dev
)
32 struct rte_eth_dev
*eth_dev
= &rte_eth_devices
[dev
->port_id
];
34 connectfd
= accept(dev
->listenfd
, NULL
, NULL
);
38 dev
->vhostfd
= connectfd
;
39 if (dev
->ops
->send_request(dev
, VHOST_USER_GET_FEATURES
,
40 &dev
->device_features
) < 0) {
41 PMD_INIT_LOG(ERR
, "get_features failed: %s",
46 dev
->device_features
|= dev
->frontend_features
;
48 /* umask vhost-user unsupported features */
49 dev
->device_features
&= ~(dev
->unsupported_features
);
51 dev
->features
&= dev
->device_features
;
53 ret
= virtio_user_start_device(dev
);
57 if (dev
->queue_pairs
> 1) {
58 ret
= virtio_user_handle_mq(dev
, dev
->queue_pairs
);
60 PMD_INIT_LOG(ERR
, "Fails to enable multi-queue pairs!");
64 if (eth_dev
->data
->dev_flags
& RTE_ETH_DEV_INTR_LSC
) {
65 if (rte_intr_disable(eth_dev
->intr_handle
) < 0) {
66 PMD_DRV_LOG(ERR
, "interrupt disable failed");
69 rte_intr_callback_unregister(eth_dev
->intr_handle
,
70 virtio_interrupt_handler
,
72 eth_dev
->intr_handle
->fd
= connectfd
;
73 rte_intr_callback_register(eth_dev
->intr_handle
,
74 virtio_interrupt_handler
, eth_dev
);
76 if (rte_intr_enable(eth_dev
->intr_handle
) < 0) {
77 PMD_DRV_LOG(ERR
, "interrupt enable failed");
81 PMD_INIT_LOG(NOTICE
, "server mode virtio-user reconnection succeeds!");
86 virtio_user_delayed_handler(void *param
)
88 struct virtio_hw
*hw
= (struct virtio_hw
*)param
;
89 struct rte_eth_dev
*eth_dev
= &rte_eth_devices
[hw
->port_id
];
90 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
92 if (rte_intr_disable(eth_dev
->intr_handle
) < 0) {
93 PMD_DRV_LOG(ERR
, "interrupt disable failed");
96 rte_intr_callback_unregister(eth_dev
->intr_handle
,
97 virtio_interrupt_handler
, eth_dev
);
99 if (dev
->vhostfd
>= 0) {
103 eth_dev
->intr_handle
->fd
= dev
->listenfd
;
104 rte_intr_callback_register(eth_dev
->intr_handle
,
105 virtio_interrupt_handler
, eth_dev
);
106 if (rte_intr_enable(eth_dev
->intr_handle
) < 0) {
107 PMD_DRV_LOG(ERR
, "interrupt enable failed");
114 virtio_user_read_dev_config(struct virtio_hw
*hw
, size_t offset
,
115 void *dst
, int length
)
118 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
120 if (offset
== offsetof(struct virtio_net_config
, mac
) &&
121 length
== ETHER_ADDR_LEN
) {
122 for (i
= 0; i
< ETHER_ADDR_LEN
; ++i
)
123 ((uint8_t *)dst
)[i
] = dev
->mac_addr
[i
];
127 if (offset
== offsetof(struct virtio_net_config
, status
)) {
130 if (dev
->vhostfd
>= 0) {
134 flags
= fcntl(dev
->vhostfd
, F_GETFL
);
135 if (fcntl(dev
->vhostfd
, F_SETFL
,
136 flags
| O_NONBLOCK
) == -1) {
137 PMD_DRV_LOG(ERR
, "error setting O_NONBLOCK flag");
140 r
= recv(dev
->vhostfd
, buf
, 128, MSG_PEEK
);
141 if (r
== 0 || (r
< 0 && errno
!= EAGAIN
)) {
142 dev
->status
&= (~VIRTIO_NET_S_LINK_UP
);
143 PMD_DRV_LOG(ERR
, "virtio-user port %u is down",
146 /* This function could be called in the process
147 * of interrupt handling, callback cannot be
148 * unregistered here, set an alarm to do it.
151 virtio_user_delayed_handler
,
154 dev
->status
|= VIRTIO_NET_S_LINK_UP
;
156 if (fcntl(dev
->vhostfd
, F_SETFL
,
157 flags
& ~O_NONBLOCK
) == -1) {
158 PMD_DRV_LOG(ERR
, "error clearing O_NONBLOCK flag");
161 } else if (dev
->is_server
) {
162 dev
->status
&= (~VIRTIO_NET_S_LINK_UP
);
163 if (virtio_user_server_reconnect(dev
) >= 0)
164 dev
->status
|= VIRTIO_NET_S_LINK_UP
;
167 *(uint16_t *)dst
= dev
->status
;
170 if (offset
== offsetof(struct virtio_net_config
, max_virtqueue_pairs
))
171 *(uint16_t *)dst
= dev
->max_queue_pairs
;
175 virtio_user_write_dev_config(struct virtio_hw
*hw
, size_t offset
,
176 const void *src
, int length
)
179 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
181 if ((offset
== offsetof(struct virtio_net_config
, mac
)) &&
182 (length
== ETHER_ADDR_LEN
))
183 for (i
= 0; i
< ETHER_ADDR_LEN
; ++i
)
184 dev
->mac_addr
[i
] = ((const uint8_t *)src
)[i
];
186 PMD_DRV_LOG(ERR
, "not supported offset=%zu, len=%d",
191 virtio_user_reset(struct virtio_hw
*hw
)
193 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
195 if (dev
->status
& VIRTIO_CONFIG_STATUS_DRIVER_OK
)
196 virtio_user_stop_device(dev
);
200 virtio_user_set_status(struct virtio_hw
*hw
, uint8_t status
)
202 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
204 if (status
& VIRTIO_CONFIG_STATUS_DRIVER_OK
)
205 virtio_user_start_device(dev
);
206 else if (status
== VIRTIO_CONFIG_STATUS_RESET
)
207 virtio_user_reset(hw
);
208 dev
->status
= status
;
212 virtio_user_get_status(struct virtio_hw
*hw
)
214 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
220 virtio_user_get_features(struct virtio_hw
*hw
)
222 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
224 /* unmask feature bits defined in vhost user protocol */
225 return dev
->device_features
& VIRTIO_PMD_SUPPORTED_GUEST_FEATURES
;
229 virtio_user_set_features(struct virtio_hw
*hw
, uint64_t features
)
231 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
233 dev
->features
= features
& dev
->device_features
;
237 virtio_user_get_isr(struct virtio_hw
*hw __rte_unused
)
239 /* rxq interrupts and config interrupt are separated in virtio-user,
240 * here we only report config change.
242 return VIRTIO_PCI_ISR_CONFIG
;
246 virtio_user_set_config_irq(struct virtio_hw
*hw __rte_unused
,
247 uint16_t vec __rte_unused
)
253 virtio_user_set_queue_irq(struct virtio_hw
*hw __rte_unused
,
254 struct virtqueue
*vq __rte_unused
,
257 /* pretend we have done that */
261 /* This function is to get the queue size, aka, number of descs, of a specified
262 * queue. Different with the VHOST_USER_GET_QUEUE_NUM, which is used to get the
263 * max supported queues.
266 virtio_user_get_queue_num(struct virtio_hw
*hw
, uint16_t queue_id __rte_unused
)
268 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
270 /* Currently, each queue has same queue size */
271 return dev
->queue_size
;
275 virtio_user_setup_queue_packed(struct virtqueue
*vq
,
276 struct virtio_user_dev
*dev
)
278 uint16_t queue_idx
= vq
->vq_queue_index
;
279 struct vring_packed
*vring
;
285 vring
= &dev
->packed_vrings
[queue_idx
];
286 desc_addr
= (uintptr_t)vq
->vq_ring_virt_mem
;
287 avail_addr
= desc_addr
+ vq
->vq_nentries
*
288 sizeof(struct vring_packed_desc
);
289 used_addr
= RTE_ALIGN_CEIL(avail_addr
+
290 sizeof(struct vring_packed_desc_event
),
291 VIRTIO_PCI_VRING_ALIGN
);
292 vring
->num
= vq
->vq_nentries
;
293 vring
->desc
= (void *)(uintptr_t)desc_addr
;
294 vring
->driver
= (void *)(uintptr_t)avail_addr
;
295 vring
->device
= (void *)(uintptr_t)used_addr
;
296 dev
->packed_queues
[queue_idx
].avail_wrap_counter
= true;
297 dev
->packed_queues
[queue_idx
].used_wrap_counter
= true;
299 for (i
= 0; i
< vring
->num
; i
++)
300 vring
->desc
[i
].flags
= 0;
304 virtio_user_setup_queue_split(struct virtqueue
*vq
, struct virtio_user_dev
*dev
)
306 uint16_t queue_idx
= vq
->vq_queue_index
;
307 uint64_t desc_addr
, avail_addr
, used_addr
;
309 desc_addr
= (uintptr_t)vq
->vq_ring_virt_mem
;
310 avail_addr
= desc_addr
+ vq
->vq_nentries
* sizeof(struct vring_desc
);
311 used_addr
= RTE_ALIGN_CEIL(avail_addr
+ offsetof(struct vring_avail
,
312 ring
[vq
->vq_nentries
]),
313 VIRTIO_PCI_VRING_ALIGN
);
315 dev
->vrings
[queue_idx
].num
= vq
->vq_nentries
;
316 dev
->vrings
[queue_idx
].desc
= (void *)(uintptr_t)desc_addr
;
317 dev
->vrings
[queue_idx
].avail
= (void *)(uintptr_t)avail_addr
;
318 dev
->vrings
[queue_idx
].used
= (void *)(uintptr_t)used_addr
;
322 virtio_user_setup_queue(struct virtio_hw
*hw
, struct virtqueue
*vq
)
324 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
326 if (vtpci_packed_queue(hw
))
327 virtio_user_setup_queue_packed(vq
, dev
);
329 virtio_user_setup_queue_split(vq
, dev
);
335 virtio_user_del_queue(struct virtio_hw
*hw
, struct virtqueue
*vq
)
337 /* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU
338 * correspondingly stops the ioeventfds, and reset the status of
340 * For modern devices, set queue desc, avail, used in PCI bar to 0,
341 * not see any more behavior in QEMU.
343 * Here we just care about what information to deliver to vhost-user
344 * or vhost-kernel. So we just close ioeventfd for now.
346 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
348 close(dev
->callfds
[vq
->vq_queue_index
]);
349 close(dev
->kickfds
[vq
->vq_queue_index
]);
353 virtio_user_notify_queue(struct virtio_hw
*hw
, struct virtqueue
*vq
)
356 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
358 if (hw
->cvq
&& (hw
->cvq
->vq
== vq
)) {
359 if (vtpci_packed_queue(vq
->hw
))
360 virtio_user_handle_cq_packed(dev
, vq
->vq_queue_index
);
362 virtio_user_handle_cq(dev
, vq
->vq_queue_index
);
366 if (write(dev
->kickfds
[vq
->vq_queue_index
], &buf
, sizeof(buf
)) < 0)
367 PMD_DRV_LOG(ERR
, "failed to kick backend: %s",
371 const struct virtio_pci_ops virtio_user_ops
= {
372 .read_dev_cfg
= virtio_user_read_dev_config
,
373 .write_dev_cfg
= virtio_user_write_dev_config
,
374 .get_status
= virtio_user_get_status
,
375 .set_status
= virtio_user_set_status
,
376 .get_features
= virtio_user_get_features
,
377 .set_features
= virtio_user_set_features
,
378 .get_isr
= virtio_user_get_isr
,
379 .set_config_irq
= virtio_user_set_config_irq
,
380 .set_queue_irq
= virtio_user_set_queue_irq
,
381 .get_queue_num
= virtio_user_get_queue_num
,
382 .setup_queue
= virtio_user_setup_queue
,
383 .del_queue
= virtio_user_del_queue
,
384 .notify_queue
= virtio_user_notify_queue
,
387 static const char *valid_args
[] = {
388 #define VIRTIO_USER_ARG_QUEUES_NUM "queues"
389 VIRTIO_USER_ARG_QUEUES_NUM
,
390 #define VIRTIO_USER_ARG_CQ_NUM "cq"
391 VIRTIO_USER_ARG_CQ_NUM
,
392 #define VIRTIO_USER_ARG_MAC "mac"
394 #define VIRTIO_USER_ARG_PATH "path"
395 VIRTIO_USER_ARG_PATH
,
396 #define VIRTIO_USER_ARG_QUEUE_SIZE "queue_size"
397 VIRTIO_USER_ARG_QUEUE_SIZE
,
398 #define VIRTIO_USER_ARG_INTERFACE_NAME "iface"
399 VIRTIO_USER_ARG_INTERFACE_NAME
,
400 #define VIRTIO_USER_ARG_SERVER_MODE "server"
401 VIRTIO_USER_ARG_SERVER_MODE
,
402 #define VIRTIO_USER_ARG_MRG_RXBUF "mrg_rxbuf"
403 VIRTIO_USER_ARG_MRG_RXBUF
,
404 #define VIRTIO_USER_ARG_IN_ORDER "in_order"
405 VIRTIO_USER_ARG_IN_ORDER
,
406 #define VIRTIO_USER_ARG_PACKED_VQ "packed_vq"
407 VIRTIO_USER_ARG_PACKED_VQ
,
411 #define VIRTIO_USER_DEF_CQ_EN 0
412 #define VIRTIO_USER_DEF_Q_NUM 1
413 #define VIRTIO_USER_DEF_Q_SZ 256
414 #define VIRTIO_USER_DEF_SERVER_MODE 0
417 get_string_arg(const char *key __rte_unused
,
418 const char *value
, void *extra_args
)
420 if (!value
|| !extra_args
)
423 *(char **)extra_args
= strdup(value
);
425 if (!*(char **)extra_args
)
432 get_integer_arg(const char *key __rte_unused
,
433 const char *value
, void *extra_args
)
435 if (!value
|| !extra_args
)
438 *(uint64_t *)extra_args
= strtoull(value
, NULL
, 0);
443 static struct rte_vdev_driver virtio_user_driver
;
445 static struct rte_eth_dev
*
446 virtio_user_eth_dev_alloc(struct rte_vdev_device
*vdev
)
448 struct rte_eth_dev
*eth_dev
;
449 struct rte_eth_dev_data
*data
;
450 struct virtio_hw
*hw
;
451 struct virtio_user_dev
*dev
;
453 eth_dev
= rte_eth_vdev_allocate(vdev
, sizeof(*hw
));
455 PMD_INIT_LOG(ERR
, "cannot alloc rte_eth_dev");
459 data
= eth_dev
->data
;
460 hw
= eth_dev
->data
->dev_private
;
462 dev
= rte_zmalloc(NULL
, sizeof(*dev
), 0);
464 PMD_INIT_LOG(ERR
, "malloc virtio_user_dev failed");
465 rte_eth_dev_release_port(eth_dev
);
469 hw
->port_id
= data
->port_id
;
470 dev
->port_id
= data
->port_id
;
471 virtio_hw_internal
[hw
->port_id
].vtpci_ops
= &virtio_user_ops
;
473 * MSIX is required to enable LSC (see virtio_init_device).
474 * Here just pretend that we support msix.
478 hw
->use_simple_rx
= 0;
479 hw
->use_inorder_rx
= 0;
480 hw
->use_inorder_tx
= 0;
481 hw
->virtio_user_dev
= dev
;
486 virtio_user_eth_dev_free(struct rte_eth_dev
*eth_dev
)
488 struct rte_eth_dev_data
*data
= eth_dev
->data
;
489 struct virtio_hw
*hw
= data
->dev_private
;
491 rte_free(hw
->virtio_user_dev
);
492 rte_eth_dev_release_port(eth_dev
);
495 /* Dev initialization routine. Invoked once for each virtio vdev at
496 * EAL init time, see rte_bus_probe().
497 * Returns 0 on success.
500 virtio_user_pmd_probe(struct rte_vdev_device
*dev
)
502 struct rte_kvargs
*kvlist
= NULL
;
503 struct rte_eth_dev
*eth_dev
;
504 struct virtio_hw
*hw
;
505 uint64_t queues
= VIRTIO_USER_DEF_Q_NUM
;
506 uint64_t cq
= VIRTIO_USER_DEF_CQ_EN
;
507 uint64_t queue_size
= VIRTIO_USER_DEF_Q_SZ
;
508 uint64_t server_mode
= VIRTIO_USER_DEF_SERVER_MODE
;
509 uint64_t mrg_rxbuf
= 1;
510 uint64_t in_order
= 1;
511 uint64_t packed_vq
= 0;
514 char *mac_addr
= NULL
;
517 if (rte_eal_process_type() == RTE_PROC_SECONDARY
) {
518 const char *name
= rte_vdev_device_name(dev
);
519 eth_dev
= rte_eth_dev_attach_secondary(name
);
521 RTE_LOG(ERR
, PMD
, "Failed to probe %s\n", name
);
525 if (eth_virtio_dev_init(eth_dev
) < 0) {
526 PMD_INIT_LOG(ERR
, "eth_virtio_dev_init fails");
527 rte_eth_dev_release_port(eth_dev
);
531 eth_dev
->dev_ops
= &virtio_user_secondary_eth_dev_ops
;
532 eth_dev
->device
= &dev
->device
;
533 rte_eth_dev_probing_finish(eth_dev
);
537 kvlist
= rte_kvargs_parse(rte_vdev_device_args(dev
), valid_args
);
539 PMD_INIT_LOG(ERR
, "error when parsing param");
543 if (rte_kvargs_count(kvlist
, VIRTIO_USER_ARG_PATH
) == 1) {
544 if (rte_kvargs_process(kvlist
, VIRTIO_USER_ARG_PATH
,
545 &get_string_arg
, &path
) < 0) {
546 PMD_INIT_LOG(ERR
, "error to parse %s",
547 VIRTIO_USER_ARG_PATH
);
551 PMD_INIT_LOG(ERR
, "arg %s is mandatory for virtio_user",
552 VIRTIO_USER_ARG_PATH
);
556 if (rte_kvargs_count(kvlist
, VIRTIO_USER_ARG_INTERFACE_NAME
) == 1) {
557 if (is_vhost_user_by_type(path
)) {
559 "arg %s applies only to vhost-kernel backend",
560 VIRTIO_USER_ARG_INTERFACE_NAME
);
564 if (rte_kvargs_process(kvlist
, VIRTIO_USER_ARG_INTERFACE_NAME
,
565 &get_string_arg
, &ifname
) < 0) {
566 PMD_INIT_LOG(ERR
, "error to parse %s",
567 VIRTIO_USER_ARG_INTERFACE_NAME
);
572 if (rte_kvargs_count(kvlist
, VIRTIO_USER_ARG_MAC
) == 1) {
573 if (rte_kvargs_process(kvlist
, VIRTIO_USER_ARG_MAC
,
574 &get_string_arg
, &mac_addr
) < 0) {
575 PMD_INIT_LOG(ERR
, "error to parse %s",
576 VIRTIO_USER_ARG_MAC
);
581 if (rte_kvargs_count(kvlist
, VIRTIO_USER_ARG_QUEUE_SIZE
) == 1) {
582 if (rte_kvargs_process(kvlist
, VIRTIO_USER_ARG_QUEUE_SIZE
,
583 &get_integer_arg
, &queue_size
) < 0) {
584 PMD_INIT_LOG(ERR
, "error to parse %s",
585 VIRTIO_USER_ARG_QUEUE_SIZE
);
590 if (rte_kvargs_count(kvlist
, VIRTIO_USER_ARG_QUEUES_NUM
) == 1) {
591 if (rte_kvargs_process(kvlist
, VIRTIO_USER_ARG_QUEUES_NUM
,
592 &get_integer_arg
, &queues
) < 0) {
593 PMD_INIT_LOG(ERR
, "error to parse %s",
594 VIRTIO_USER_ARG_QUEUES_NUM
);
599 if (rte_kvargs_count(kvlist
, VIRTIO_USER_ARG_SERVER_MODE
) == 1) {
600 if (rte_kvargs_process(kvlist
, VIRTIO_USER_ARG_SERVER_MODE
,
601 &get_integer_arg
, &server_mode
) < 0) {
602 PMD_INIT_LOG(ERR
, "error to parse %s",
603 VIRTIO_USER_ARG_SERVER_MODE
);
608 if (rte_kvargs_count(kvlist
, VIRTIO_USER_ARG_CQ_NUM
) == 1) {
609 if (rte_kvargs_process(kvlist
, VIRTIO_USER_ARG_CQ_NUM
,
610 &get_integer_arg
, &cq
) < 0) {
611 PMD_INIT_LOG(ERR
, "error to parse %s",
612 VIRTIO_USER_ARG_CQ_NUM
);
615 } else if (queues
> 1) {
619 if (rte_kvargs_count(kvlist
, VIRTIO_USER_ARG_PACKED_VQ
) == 1) {
620 if (rte_kvargs_process(kvlist
, VIRTIO_USER_ARG_PACKED_VQ
,
621 &get_integer_arg
, &packed_vq
) < 0) {
622 PMD_INIT_LOG(ERR
, "error to parse %s",
623 VIRTIO_USER_ARG_PACKED_VQ
);
628 if (queues
> 1 && cq
== 0) {
629 PMD_INIT_LOG(ERR
, "multi-q requires ctrl-q");
633 if (queues
> VIRTIO_MAX_VIRTQUEUE_PAIRS
) {
634 PMD_INIT_LOG(ERR
, "arg %s %" PRIu64
" exceeds the limit %u",
635 VIRTIO_USER_ARG_QUEUES_NUM
, queues
,
636 VIRTIO_MAX_VIRTQUEUE_PAIRS
);
640 if (rte_kvargs_count(kvlist
, VIRTIO_USER_ARG_MRG_RXBUF
) == 1) {
641 if (rte_kvargs_process(kvlist
, VIRTIO_USER_ARG_MRG_RXBUF
,
642 &get_integer_arg
, &mrg_rxbuf
) < 0) {
643 PMD_INIT_LOG(ERR
, "error to parse %s",
644 VIRTIO_USER_ARG_MRG_RXBUF
);
649 if (rte_kvargs_count(kvlist
, VIRTIO_USER_ARG_IN_ORDER
) == 1) {
650 if (rte_kvargs_process(kvlist
, VIRTIO_USER_ARG_IN_ORDER
,
651 &get_integer_arg
, &in_order
) < 0) {
652 PMD_INIT_LOG(ERR
, "error to parse %s",
653 VIRTIO_USER_ARG_IN_ORDER
);
658 eth_dev
= virtio_user_eth_dev_alloc(dev
);
660 PMD_INIT_LOG(ERR
, "virtio_user fails to alloc device");
664 hw
= eth_dev
->data
->dev_private
;
665 if (virtio_user_dev_init(hw
->virtio_user_dev
, path
, queues
, cq
,
666 queue_size
, mac_addr
, &ifname
, server_mode
,
667 mrg_rxbuf
, in_order
, packed_vq
) < 0) {
668 PMD_INIT_LOG(ERR
, "virtio_user_dev_init fails");
669 virtio_user_eth_dev_free(eth_dev
);
673 /* previously called by rte_pci_probe() for physical dev */
674 if (eth_virtio_dev_init(eth_dev
) < 0) {
675 PMD_INIT_LOG(ERR
, "eth_virtio_dev_init fails");
676 virtio_user_eth_dev_free(eth_dev
);
680 rte_eth_dev_probing_finish(eth_dev
);
685 rte_kvargs_free(kvlist
);
696 virtio_user_pmd_remove(struct rte_vdev_device
*vdev
)
699 struct rte_eth_dev
*eth_dev
;
700 struct virtio_hw
*hw
;
701 struct virtio_user_dev
*dev
;
706 name
= rte_vdev_device_name(vdev
);
707 PMD_DRV_LOG(INFO
, "Un-Initializing %s", name
);
708 eth_dev
= rte_eth_dev_allocated(name
);
712 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
713 return rte_eth_dev_release_port(eth_dev
);
715 /* make sure the device is stopped, queues freed */
716 rte_eth_dev_close(eth_dev
->data
->port_id
);
718 hw
= eth_dev
->data
->dev_private
;
719 dev
= hw
->virtio_user_dev
;
720 virtio_user_dev_uninit(dev
);
722 rte_eth_dev_release_port(eth_dev
);
727 static struct rte_vdev_driver virtio_user_driver
= {
728 .probe
= virtio_user_pmd_probe
,
729 .remove
= virtio_user_pmd_remove
,
732 RTE_PMD_REGISTER_VDEV(net_virtio_user
, virtio_user_driver
);
733 RTE_PMD_REGISTER_ALIAS(net_virtio_user
, virtio_user
);
734 RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user
,