4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/types.h>
38 #include <rte_malloc.h>
39 #include <rte_kvargs.h>
42 #include "virtio_ethdev.h"
43 #include "virtio_logs.h"
44 #include "virtio_pci.h"
45 #include "virtqueue.h"
46 #include "virtio_rxtx.h"
47 #include "virtio_user/virtio_user_dev.h"
49 #define virtio_user_get_dev(hw) \
50 ((struct virtio_user_dev *)(hw)->virtio_user_dev)
53 virtio_user_read_dev_config(struct virtio_hw
*hw
, size_t offset
,
54 void *dst
, int length
)
57 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
59 if (offset
== offsetof(struct virtio_net_config
, mac
) &&
60 length
== ETHER_ADDR_LEN
) {
61 for (i
= 0; i
< ETHER_ADDR_LEN
; ++i
)
62 ((uint8_t *)dst
)[i
] = dev
->mac_addr
[i
];
66 if (offset
== offsetof(struct virtio_net_config
, status
))
67 *(uint16_t *)dst
= dev
->status
;
69 if (offset
== offsetof(struct virtio_net_config
, max_virtqueue_pairs
))
70 *(uint16_t *)dst
= dev
->max_queue_pairs
;
74 virtio_user_write_dev_config(struct virtio_hw
*hw
, size_t offset
,
75 const void *src
, int length
)
78 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
80 if ((offset
== offsetof(struct virtio_net_config
, mac
)) &&
81 (length
== ETHER_ADDR_LEN
))
82 for (i
= 0; i
< ETHER_ADDR_LEN
; ++i
)
83 dev
->mac_addr
[i
] = ((const uint8_t *)src
)[i
];
85 PMD_DRV_LOG(ERR
, "not supported offset=%zu, len=%d\n",
90 virtio_user_set_status(struct virtio_hw
*hw
, uint8_t status
)
92 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
94 if (status
& VIRTIO_CONFIG_STATUS_DRIVER_OK
)
95 virtio_user_start_device(dev
);
100 virtio_user_reset(struct virtio_hw
*hw
)
102 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
104 virtio_user_stop_device(dev
);
108 virtio_user_get_status(struct virtio_hw
*hw
)
110 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
116 virtio_user_get_features(struct virtio_hw
*hw
)
118 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
120 return dev
->features
;
124 virtio_user_set_features(struct virtio_hw
*hw
, uint64_t features
)
126 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
128 dev
->features
= features
;
132 virtio_user_get_isr(struct virtio_hw
*hw __rte_unused
)
134 /* When config interrupt happens, driver calls this function to query
135 * what kinds of change happen. Interrupt mode not supported for now.
141 virtio_user_set_config_irq(struct virtio_hw
*hw __rte_unused
,
142 uint16_t vec __rte_unused
)
144 return VIRTIO_MSI_NO_VECTOR
;
147 /* This function is to get the queue size, aka, number of descs, of a specified
148 * queue. Different with the VHOST_USER_GET_QUEUE_NUM, which is used to get the
149 * max supported queues.
152 virtio_user_get_queue_num(struct virtio_hw
*hw
, uint16_t queue_id __rte_unused
)
154 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
156 /* Currently, each queue has same queue size */
157 return dev
->queue_size
;
161 virtio_user_setup_queue(struct virtio_hw
*hw
, struct virtqueue
*vq
)
163 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
164 uint16_t queue_idx
= vq
->vq_queue_index
;
165 uint64_t desc_addr
, avail_addr
, used_addr
;
167 desc_addr
= (uintptr_t)vq
->vq_ring_virt_mem
;
168 avail_addr
= desc_addr
+ vq
->vq_nentries
* sizeof(struct vring_desc
);
169 used_addr
= RTE_ALIGN_CEIL(avail_addr
+ offsetof(struct vring_avail
,
170 ring
[vq
->vq_nentries
]),
171 VIRTIO_PCI_VRING_ALIGN
);
173 dev
->vrings
[queue_idx
].num
= vq
->vq_nentries
;
174 dev
->vrings
[queue_idx
].desc
= (void *)(uintptr_t)desc_addr
;
175 dev
->vrings
[queue_idx
].avail
= (void *)(uintptr_t)avail_addr
;
176 dev
->vrings
[queue_idx
].used
= (void *)(uintptr_t)used_addr
;
182 virtio_user_del_queue(struct virtio_hw
*hw
, struct virtqueue
*vq
)
184 /* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU
185 * correspondingly stops the ioeventfds, and reset the status of
187 * For modern devices, set queue desc, avail, used in PCI bar to 0,
188 * not see any more behavior in QEMU.
190 * Here we just care about what information to deliver to vhost-user
191 * or vhost-kernel. So we just close ioeventfd for now.
193 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
195 close(dev
->callfds
[vq
->vq_queue_index
]);
196 close(dev
->kickfds
[vq
->vq_queue_index
]);
200 virtio_user_notify_queue(struct virtio_hw
*hw
, struct virtqueue
*vq
)
203 struct virtio_user_dev
*dev
= virtio_user_get_dev(hw
);
205 if (hw
->cvq
&& (hw
->cvq
->vq
== vq
)) {
206 virtio_user_handle_cq(dev
, vq
->vq_queue_index
);
210 if (write(dev
->kickfds
[vq
->vq_queue_index
], &buf
, sizeof(buf
)) < 0)
211 PMD_DRV_LOG(ERR
, "failed to kick backend: %s\n",
215 static const struct virtio_pci_ops virtio_user_ops
= {
216 .read_dev_cfg
= virtio_user_read_dev_config
,
217 .write_dev_cfg
= virtio_user_write_dev_config
,
218 .reset
= virtio_user_reset
,
219 .get_status
= virtio_user_get_status
,
220 .set_status
= virtio_user_set_status
,
221 .get_features
= virtio_user_get_features
,
222 .set_features
= virtio_user_set_features
,
223 .get_isr
= virtio_user_get_isr
,
224 .set_config_irq
= virtio_user_set_config_irq
,
225 .get_queue_num
= virtio_user_get_queue_num
,
226 .setup_queue
= virtio_user_setup_queue
,
227 .del_queue
= virtio_user_del_queue
,
228 .notify_queue
= virtio_user_notify_queue
,
231 static const char *valid_args
[] = {
232 #define VIRTIO_USER_ARG_QUEUES_NUM "queues"
233 VIRTIO_USER_ARG_QUEUES_NUM
,
234 #define VIRTIO_USER_ARG_CQ_NUM "cq"
235 VIRTIO_USER_ARG_CQ_NUM
,
236 #define VIRTIO_USER_ARG_MAC "mac"
238 #define VIRTIO_USER_ARG_PATH "path"
239 VIRTIO_USER_ARG_PATH
,
240 #define VIRTIO_USER_ARG_QUEUE_SIZE "queue_size"
241 VIRTIO_USER_ARG_QUEUE_SIZE
,
245 #define VIRTIO_USER_DEF_CQ_EN 0
246 #define VIRTIO_USER_DEF_Q_NUM 1
247 #define VIRTIO_USER_DEF_Q_SZ 256
250 get_string_arg(const char *key __rte_unused
,
251 const char *value
, void *extra_args
)
253 if (!value
|| !extra_args
)
256 *(char **)extra_args
= strdup(value
);
262 get_integer_arg(const char *key __rte_unused
,
263 const char *value
, void *extra_args
)
265 if (!value
|| !extra_args
)
268 *(uint64_t *)extra_args
= strtoull(value
, NULL
, 0);
273 static struct rte_eth_dev
*
274 virtio_user_eth_dev_alloc(const char *name
)
276 struct rte_eth_dev
*eth_dev
;
277 struct rte_eth_dev_data
*data
;
278 struct virtio_hw
*hw
;
279 struct virtio_user_dev
*dev
;
281 eth_dev
= rte_eth_dev_allocate(name
);
283 PMD_INIT_LOG(ERR
, "cannot alloc rte_eth_dev");
287 data
= eth_dev
->data
;
289 hw
= rte_zmalloc(NULL
, sizeof(*hw
), 0);
291 PMD_INIT_LOG(ERR
, "malloc virtio_hw failed");
292 rte_eth_dev_release_port(eth_dev
);
296 dev
= rte_zmalloc(NULL
, sizeof(*dev
), 0);
298 PMD_INIT_LOG(ERR
, "malloc virtio_user_dev failed");
299 rte_eth_dev_release_port(eth_dev
);
304 hw
->vtpci_ops
= &virtio_user_ops
;
307 hw
->use_simple_rxtx
= 0;
308 hw
->virtio_user_dev
= dev
;
309 data
->dev_private
= hw
;
310 data
->numa_node
= SOCKET_ID_ANY
;
311 data
->kdrv
= RTE_KDRV_NONE
;
312 data
->dev_flags
= RTE_ETH_DEV_DETACHABLE
;
313 eth_dev
->pci_dev
= NULL
;
314 eth_dev
->driver
= NULL
;
319 virtio_user_eth_dev_free(struct rte_eth_dev
*eth_dev
)
321 struct rte_eth_dev_data
*data
= eth_dev
->data
;
322 struct virtio_hw
*hw
= data
->dev_private
;
324 rte_free(hw
->virtio_user_dev
);
326 rte_eth_dev_release_port(eth_dev
);
329 /* Dev initialization routine. Invoked once for each virtio vdev at
330 * EAL init time, see rte_eal_dev_init().
331 * Returns 0 on success.
334 virtio_user_pmd_probe(const char *name
, const char *params
)
336 struct rte_kvargs
*kvlist
= NULL
;
337 struct rte_eth_dev
*eth_dev
;
338 struct virtio_hw
*hw
;
339 uint64_t queues
= VIRTIO_USER_DEF_Q_NUM
;
340 uint64_t cq
= VIRTIO_USER_DEF_CQ_EN
;
341 uint64_t queue_size
= VIRTIO_USER_DEF_Q_SZ
;
343 char *mac_addr
= NULL
;
346 if (!params
|| params
[0] == '\0') {
347 PMD_INIT_LOG(ERR
, "arg %s is mandatory for virtio_user",
348 VIRTIO_USER_ARG_QUEUE_SIZE
);
352 kvlist
= rte_kvargs_parse(params
, valid_args
);
354 PMD_INIT_LOG(ERR
, "error when parsing param");
358 if (rte_kvargs_count(kvlist
, VIRTIO_USER_ARG_PATH
) == 1) {
359 if (rte_kvargs_process(kvlist
, VIRTIO_USER_ARG_PATH
,
360 &get_string_arg
, &path
) < 0) {
361 PMD_INIT_LOG(ERR
, "error to parse %s",
362 VIRTIO_USER_ARG_PATH
);
366 PMD_INIT_LOG(ERR
, "arg %s is mandatory for virtio_user\n",
367 VIRTIO_USER_ARG_QUEUE_SIZE
);
371 if (rte_kvargs_count(kvlist
, VIRTIO_USER_ARG_MAC
) == 1) {
372 if (rte_kvargs_process(kvlist
, VIRTIO_USER_ARG_MAC
,
373 &get_string_arg
, &mac_addr
) < 0) {
374 PMD_INIT_LOG(ERR
, "error to parse %s",
375 VIRTIO_USER_ARG_MAC
);
380 if (rte_kvargs_count(kvlist
, VIRTIO_USER_ARG_QUEUE_SIZE
) == 1) {
381 if (rte_kvargs_process(kvlist
, VIRTIO_USER_ARG_QUEUE_SIZE
,
382 &get_integer_arg
, &queue_size
) < 0) {
383 PMD_INIT_LOG(ERR
, "error to parse %s",
384 VIRTIO_USER_ARG_QUEUE_SIZE
);
389 if (rte_kvargs_count(kvlist
, VIRTIO_USER_ARG_QUEUES_NUM
) == 1) {
390 if (rte_kvargs_process(kvlist
, VIRTIO_USER_ARG_QUEUES_NUM
,
391 &get_integer_arg
, &queues
) < 0) {
392 PMD_INIT_LOG(ERR
, "error to parse %s",
393 VIRTIO_USER_ARG_QUEUES_NUM
);
398 if (rte_kvargs_count(kvlist
, VIRTIO_USER_ARG_CQ_NUM
) == 1) {
399 if (rte_kvargs_process(kvlist
, VIRTIO_USER_ARG_CQ_NUM
,
400 &get_integer_arg
, &cq
) < 0) {
401 PMD_INIT_LOG(ERR
, "error to parse %s",
402 VIRTIO_USER_ARG_CQ_NUM
);
405 } else if (queues
> 1) {
409 if (queues
> 1 && cq
== 0) {
410 PMD_INIT_LOG(ERR
, "multi-q requires ctrl-q");
414 eth_dev
= virtio_user_eth_dev_alloc(name
);
416 PMD_INIT_LOG(ERR
, "virtio_user fails to alloc device");
420 hw
= eth_dev
->data
->dev_private
;
421 if (virtio_user_dev_init(hw
->virtio_user_dev
, path
, queues
, cq
,
422 queue_size
, mac_addr
) < 0) {
423 PMD_INIT_LOG(ERR
, "virtio_user_dev_init fails");
424 virtio_user_eth_dev_free(eth_dev
);
428 /* previously called by rte_eal_pci_probe() for physical dev */
429 if (eth_virtio_dev_init(eth_dev
) < 0) {
430 PMD_INIT_LOG(ERR
, "eth_virtio_dev_init fails");
431 virtio_user_eth_dev_free(eth_dev
);
438 rte_kvargs_free(kvlist
);
446 /** Called by rte_eth_dev_detach() */
448 virtio_user_pmd_remove(const char *name
)
450 struct rte_eth_dev
*eth_dev
;
451 struct virtio_hw
*hw
;
452 struct virtio_user_dev
*dev
;
457 PMD_DRV_LOG(INFO
, "Un-Initializing %s\n", name
);
458 eth_dev
= rte_eth_dev_allocated(name
);
462 /* make sure the device is stopped, queues freed */
463 rte_eth_dev_close(eth_dev
->data
->port_id
);
465 hw
= eth_dev
->data
->dev_private
;
466 dev
= hw
->virtio_user_dev
;
467 virtio_user_dev_uninit(dev
);
469 rte_free(eth_dev
->data
->dev_private
);
470 rte_free(eth_dev
->data
);
471 rte_eth_dev_release_port(eth_dev
);
476 static struct rte_vdev_driver virtio_user_driver
= {
477 .probe
= virtio_user_pmd_probe
,
478 .remove
= virtio_user_pmd_remove
,
481 RTE_PMD_REGISTER_VDEV(net_virtio_user
, virtio_user_driver
);
482 RTE_PMD_REGISTER_ALIAS(net_virtio_user
, virtio_user
);
483 RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user
,