2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
16 #include "hw/virtio/virtio.h"
18 #include "net/checksum.h"
20 #include "qemu/error-report.h"
21 #include "qemu/timer.h"
22 #include "hw/virtio/virtio-net.h"
23 #include "net/vhost_net.h"
24 #include "hw/virtio/virtio-bus.h"
25 #include "qapi/qmp/qjson.h"
26 #include "qapi-event.h"
27 #include "hw/virtio/virtio-access.h"
29 #define VIRTIO_NET_VM_VERSION 11
31 #define MAC_TABLE_ENTRIES 64
32 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
34 /* previously fixed value */
35 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
36 /* for now, only allow larger queues; with virtio-1, guest can downsize */
37 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
40 * Calculate the number of bytes up to and including the given 'field' of
43 #define endof(container, field) \
44 (offsetof(container, field) + sizeof(((container *)0)->field))
46 typedef struct VirtIOFeature
{
51 static VirtIOFeature feature_sizes
[] = {
52 {.flags
= 1 << VIRTIO_NET_F_MAC
,
53 .end
= endof(struct virtio_net_config
, mac
)},
54 {.flags
= 1 << VIRTIO_NET_F_STATUS
,
55 .end
= endof(struct virtio_net_config
, status
)},
56 {.flags
= 1 << VIRTIO_NET_F_MQ
,
57 .end
= endof(struct virtio_net_config
, max_virtqueue_pairs
)},
58 {.flags
= 1 << VIRTIO_NET_F_MTU
,
59 .end
= endof(struct virtio_net_config
, mtu
)},
63 static VirtIONetQueue
*virtio_net_get_subqueue(NetClientState
*nc
)
65 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
67 return &n
->vqs
[nc
->queue_index
];
70 static int vq2q(int queue_index
)
72 return queue_index
/ 2;
76 * - we could suppress RX interrupt if we were so inclined.
79 static void virtio_net_get_config(VirtIODevice
*vdev
, uint8_t *config
)
81 VirtIONet
*n
= VIRTIO_NET(vdev
);
82 struct virtio_net_config netcfg
;
84 virtio_stw_p(vdev
, &netcfg
.status
, n
->status
);
85 virtio_stw_p(vdev
, &netcfg
.max_virtqueue_pairs
, n
->max_queues
);
86 virtio_stw_p(vdev
, &netcfg
.mtu
, n
->net_conf
.mtu
);
87 memcpy(netcfg
.mac
, n
->mac
, ETH_ALEN
);
88 memcpy(config
, &netcfg
, n
->config_size
);
91 static void virtio_net_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
93 VirtIONet
*n
= VIRTIO_NET(vdev
);
94 struct virtio_net_config netcfg
= {};
96 memcpy(&netcfg
, config
, n
->config_size
);
98 if (!virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_MAC_ADDR
) &&
99 !virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
) &&
100 memcmp(netcfg
.mac
, n
->mac
, ETH_ALEN
)) {
101 memcpy(n
->mac
, netcfg
.mac
, ETH_ALEN
);
102 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
106 static bool virtio_net_started(VirtIONet
*n
, uint8_t status
)
108 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
109 return (status
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
110 (n
->status
& VIRTIO_NET_S_LINK_UP
) && vdev
->vm_running
;
113 static void virtio_net_announce_timer(void *opaque
)
115 VirtIONet
*n
= opaque
;
116 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
118 n
->announce_counter
--;
119 n
->status
|= VIRTIO_NET_S_ANNOUNCE
;
120 virtio_notify_config(vdev
);
123 static void virtio_net_vhost_status(VirtIONet
*n
, uint8_t status
)
125 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
126 NetClientState
*nc
= qemu_get_queue(n
->nic
);
127 int queues
= n
->multiqueue
? n
->max_queues
: 1;
129 if (!get_vhost_net(nc
->peer
)) {
133 if ((virtio_net_started(n
, status
) && !nc
->peer
->link_down
) ==
134 !!n
->vhost_started
) {
137 if (!n
->vhost_started
) {
140 if (n
->needs_vnet_hdr_swap
) {
141 error_report("backend does not support %s vnet headers; "
142 "falling back on userspace virtio",
143 virtio_is_big_endian(vdev
) ? "BE" : "LE");
147 /* Any packets outstanding? Purge them to avoid touching rings
148 * when vhost is running.
150 for (i
= 0; i
< queues
; i
++) {
151 NetClientState
*qnc
= qemu_get_subqueue(n
->nic
, i
);
153 /* Purge both directions: TX and RX. */
154 qemu_net_queue_purge(qnc
->peer
->incoming_queue
, qnc
);
155 qemu_net_queue_purge(qnc
->incoming_queue
, qnc
->peer
);
158 if (virtio_has_feature(vdev
->guest_features
, VIRTIO_NET_F_MTU
)) {
159 r
= vhost_net_set_mtu(get_vhost_net(nc
->peer
), n
->net_conf
.mtu
);
161 error_report("%uBytes MTU not supported by the backend",
168 n
->vhost_started
= 1;
169 r
= vhost_net_start(vdev
, n
->nic
->ncs
, queues
);
171 error_report("unable to start vhost net: %d: "
172 "falling back on userspace virtio", -r
);
173 n
->vhost_started
= 0;
176 vhost_net_stop(vdev
, n
->nic
->ncs
, queues
);
177 n
->vhost_started
= 0;
181 static int virtio_net_set_vnet_endian_one(VirtIODevice
*vdev
,
182 NetClientState
*peer
,
185 if (virtio_is_big_endian(vdev
)) {
186 return qemu_set_vnet_be(peer
, enable
);
188 return qemu_set_vnet_le(peer
, enable
);
192 static bool virtio_net_set_vnet_endian(VirtIODevice
*vdev
, NetClientState
*ncs
,
193 int queues
, bool enable
)
197 for (i
= 0; i
< queues
; i
++) {
198 if (virtio_net_set_vnet_endian_one(vdev
, ncs
[i
].peer
, enable
) < 0 &&
201 virtio_net_set_vnet_endian_one(vdev
, ncs
[i
].peer
, false);
211 static void virtio_net_vnet_endian_status(VirtIONet
*n
, uint8_t status
)
213 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
214 int queues
= n
->multiqueue
? n
->max_queues
: 1;
216 if (virtio_net_started(n
, status
)) {
217 /* Before using the device, we tell the network backend about the
218 * endianness to use when parsing vnet headers. If the backend
219 * can't do it, we fallback onto fixing the headers in the core
222 n
->needs_vnet_hdr_swap
= virtio_net_set_vnet_endian(vdev
, n
->nic
->ncs
,
224 } else if (virtio_net_started(n
, vdev
->status
)) {
225 /* After using the device, we need to reset the network backend to
226 * the default (guest native endianness), otherwise the guest may
227 * lose network connectivity if it is rebooted into a different
230 virtio_net_set_vnet_endian(vdev
, n
->nic
->ncs
, queues
, false);
234 static void virtio_net_drop_tx_queue_data(VirtIODevice
*vdev
, VirtQueue
*vq
)
236 unsigned int dropped
= virtqueue_drop_all(vq
);
238 virtio_notify(vdev
, vq
);
242 static void virtio_net_set_status(struct VirtIODevice
*vdev
, uint8_t status
)
244 VirtIONet
*n
= VIRTIO_NET(vdev
);
247 uint8_t queue_status
;
249 virtio_net_vnet_endian_status(n
, status
);
250 virtio_net_vhost_status(n
, status
);
252 for (i
= 0; i
< n
->max_queues
; i
++) {
253 NetClientState
*ncs
= qemu_get_subqueue(n
->nic
, i
);
257 if ((!n
->multiqueue
&& i
!= 0) || i
>= n
->curr_queues
) {
260 queue_status
= status
;
263 virtio_net_started(n
, queue_status
) && !n
->vhost_started
;
266 qemu_flush_queued_packets(ncs
);
269 if (!q
->tx_waiting
) {
275 timer_mod(q
->tx_timer
,
276 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + n
->tx_timeout
);
278 qemu_bh_schedule(q
->tx_bh
);
282 timer_del(q
->tx_timer
);
284 qemu_bh_cancel(q
->tx_bh
);
286 if ((n
->status
& VIRTIO_NET_S_LINK_UP
) == 0 &&
287 (queue_status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
288 /* if tx is waiting we are likely have some packets in tx queue
289 * and disabled notification */
291 virtio_queue_set_notification(q
->tx_vq
, 1);
292 virtio_net_drop_tx_queue_data(vdev
, q
->tx_vq
);
298 static void virtio_net_set_link_status(NetClientState
*nc
)
300 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
301 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
302 uint16_t old_status
= n
->status
;
305 n
->status
&= ~VIRTIO_NET_S_LINK_UP
;
307 n
->status
|= VIRTIO_NET_S_LINK_UP
;
309 if (n
->status
!= old_status
)
310 virtio_notify_config(vdev
);
312 virtio_net_set_status(vdev
, vdev
->status
);
315 static void rxfilter_notify(NetClientState
*nc
)
317 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
319 if (nc
->rxfilter_notify_enabled
) {
320 gchar
*path
= object_get_canonical_path(OBJECT(n
->qdev
));
321 qapi_event_send_nic_rx_filter_changed(!!n
->netclient_name
,
322 n
->netclient_name
, path
, &error_abort
);
325 /* disable event notification to avoid events flooding */
326 nc
->rxfilter_notify_enabled
= 0;
330 static intList
*get_vlan_table(VirtIONet
*n
)
332 intList
*list
, *entry
;
336 for (i
= 0; i
< MAX_VLAN
>> 5; i
++) {
337 for (j
= 0; n
->vlans
[i
] && j
<= 0x1f; j
++) {
338 if (n
->vlans
[i
] & (1U << j
)) {
339 entry
= g_malloc0(sizeof(*entry
));
340 entry
->value
= (i
<< 5) + j
;
350 static RxFilterInfo
*virtio_net_query_rxfilter(NetClientState
*nc
)
352 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
353 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
355 strList
*str_list
, *entry
;
358 info
= g_malloc0(sizeof(*info
));
359 info
->name
= g_strdup(nc
->name
);
360 info
->promiscuous
= n
->promisc
;
363 info
->unicast
= RX_STATE_NONE
;
364 } else if (n
->alluni
) {
365 info
->unicast
= RX_STATE_ALL
;
367 info
->unicast
= RX_STATE_NORMAL
;
371 info
->multicast
= RX_STATE_NONE
;
372 } else if (n
->allmulti
) {
373 info
->multicast
= RX_STATE_ALL
;
375 info
->multicast
= RX_STATE_NORMAL
;
378 info
->broadcast_allowed
= n
->nobcast
;
379 info
->multicast_overflow
= n
->mac_table
.multi_overflow
;
380 info
->unicast_overflow
= n
->mac_table
.uni_overflow
;
382 info
->main_mac
= qemu_mac_strdup_printf(n
->mac
);
385 for (i
= 0; i
< n
->mac_table
.first_multi
; i
++) {
386 entry
= g_malloc0(sizeof(*entry
));
387 entry
->value
= qemu_mac_strdup_printf(n
->mac_table
.macs
+ i
* ETH_ALEN
);
388 entry
->next
= str_list
;
391 info
->unicast_table
= str_list
;
394 for (i
= n
->mac_table
.first_multi
; i
< n
->mac_table
.in_use
; i
++) {
395 entry
= g_malloc0(sizeof(*entry
));
396 entry
->value
= qemu_mac_strdup_printf(n
->mac_table
.macs
+ i
* ETH_ALEN
);
397 entry
->next
= str_list
;
400 info
->multicast_table
= str_list
;
401 info
->vlan_table
= get_vlan_table(n
);
403 if (!virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_VLAN
)) {
404 info
->vlan
= RX_STATE_ALL
;
405 } else if (!info
->vlan_table
) {
406 info
->vlan
= RX_STATE_NONE
;
408 info
->vlan
= RX_STATE_NORMAL
;
411 /* enable event notification after query */
412 nc
->rxfilter_notify_enabled
= 1;
417 static void virtio_net_reset(VirtIODevice
*vdev
)
419 VirtIONet
*n
= VIRTIO_NET(vdev
);
421 /* Reset back to compatibility mode */
428 /* multiqueue is disabled by default */
430 timer_del(n
->announce_timer
);
431 n
->announce_counter
= 0;
432 n
->status
&= ~VIRTIO_NET_S_ANNOUNCE
;
434 /* Flush any MAC and VLAN filter table state */
435 n
->mac_table
.in_use
= 0;
436 n
->mac_table
.first_multi
= 0;
437 n
->mac_table
.multi_overflow
= 0;
438 n
->mac_table
.uni_overflow
= 0;
439 memset(n
->mac_table
.macs
, 0, MAC_TABLE_ENTRIES
* ETH_ALEN
);
440 memcpy(&n
->mac
[0], &n
->nic
->conf
->macaddr
, sizeof(n
->mac
));
441 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
442 memset(n
->vlans
, 0, MAX_VLAN
>> 3);
445 static void peer_test_vnet_hdr(VirtIONet
*n
)
447 NetClientState
*nc
= qemu_get_queue(n
->nic
);
452 n
->has_vnet_hdr
= qemu_has_vnet_hdr(nc
->peer
);
455 static int peer_has_vnet_hdr(VirtIONet
*n
)
457 return n
->has_vnet_hdr
;
460 static int peer_has_ufo(VirtIONet
*n
)
462 if (!peer_has_vnet_hdr(n
))
465 n
->has_ufo
= qemu_has_ufo(qemu_get_queue(n
->nic
)->peer
);
470 static void virtio_net_set_mrg_rx_bufs(VirtIONet
*n
, int mergeable_rx_bufs
,
476 n
->mergeable_rx_bufs
= mergeable_rx_bufs
;
479 n
->guest_hdr_len
= sizeof(struct virtio_net_hdr_mrg_rxbuf
);
481 n
->guest_hdr_len
= n
->mergeable_rx_bufs
?
482 sizeof(struct virtio_net_hdr_mrg_rxbuf
) :
483 sizeof(struct virtio_net_hdr
);
486 for (i
= 0; i
< n
->max_queues
; i
++) {
487 nc
= qemu_get_subqueue(n
->nic
, i
);
489 if (peer_has_vnet_hdr(n
) &&
490 qemu_has_vnet_hdr_len(nc
->peer
, n
->guest_hdr_len
)) {
491 qemu_set_vnet_hdr_len(nc
->peer
, n
->guest_hdr_len
);
492 n
->host_hdr_len
= n
->guest_hdr_len
;
497 static int peer_attach(VirtIONet
*n
, int index
)
499 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
505 if (nc
->peer
->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
) {
506 vhost_set_vring_enable(nc
->peer
, 1);
509 if (nc
->peer
->info
->type
!= NET_CLIENT_DRIVER_TAP
) {
513 return tap_enable(nc
->peer
);
516 static int peer_detach(VirtIONet
*n
, int index
)
518 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
524 if (nc
->peer
->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
) {
525 vhost_set_vring_enable(nc
->peer
, 0);
528 if (nc
->peer
->info
->type
!= NET_CLIENT_DRIVER_TAP
) {
532 return tap_disable(nc
->peer
);
535 static void virtio_net_set_queues(VirtIONet
*n
)
540 if (n
->nic
->peer_deleted
) {
544 for (i
= 0; i
< n
->max_queues
; i
++) {
545 if (i
< n
->curr_queues
) {
546 r
= peer_attach(n
, i
);
549 r
= peer_detach(n
, i
);
555 static void virtio_net_set_multiqueue(VirtIONet
*n
, int multiqueue
);
557 static uint64_t virtio_net_get_features(VirtIODevice
*vdev
, uint64_t features
,
560 VirtIONet
*n
= VIRTIO_NET(vdev
);
561 NetClientState
*nc
= qemu_get_queue(n
->nic
);
563 /* Firstly sync all virtio-net possible supported features */
564 features
|= n
->host_features
;
566 virtio_add_feature(&features
, VIRTIO_NET_F_MAC
);
568 if (!peer_has_vnet_hdr(n
)) {
569 virtio_clear_feature(&features
, VIRTIO_NET_F_CSUM
);
570 virtio_clear_feature(&features
, VIRTIO_NET_F_HOST_TSO4
);
571 virtio_clear_feature(&features
, VIRTIO_NET_F_HOST_TSO6
);
572 virtio_clear_feature(&features
, VIRTIO_NET_F_HOST_ECN
);
574 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_CSUM
);
575 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_TSO4
);
576 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_TSO6
);
577 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_ECN
);
580 if (!peer_has_vnet_hdr(n
) || !peer_has_ufo(n
)) {
581 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_UFO
);
582 virtio_clear_feature(&features
, VIRTIO_NET_F_HOST_UFO
);
585 if (!get_vhost_net(nc
->peer
)) {
588 return vhost_net_get_features(get_vhost_net(nc
->peer
), features
);
591 static uint64_t virtio_net_bad_features(VirtIODevice
*vdev
)
593 uint64_t features
= 0;
595 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
597 virtio_add_feature(&features
, VIRTIO_NET_F_MAC
);
598 virtio_add_feature(&features
, VIRTIO_NET_F_CSUM
);
599 virtio_add_feature(&features
, VIRTIO_NET_F_HOST_TSO4
);
600 virtio_add_feature(&features
, VIRTIO_NET_F_HOST_TSO6
);
601 virtio_add_feature(&features
, VIRTIO_NET_F_HOST_ECN
);
606 static void virtio_net_apply_guest_offloads(VirtIONet
*n
)
608 qemu_set_offload(qemu_get_queue(n
->nic
)->peer
,
609 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_CSUM
)),
610 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_TSO4
)),
611 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_TSO6
)),
612 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_ECN
)),
613 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_UFO
)));
616 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features
)
618 static const uint64_t guest_offloads_mask
=
619 (1ULL << VIRTIO_NET_F_GUEST_CSUM
) |
620 (1ULL << VIRTIO_NET_F_GUEST_TSO4
) |
621 (1ULL << VIRTIO_NET_F_GUEST_TSO6
) |
622 (1ULL << VIRTIO_NET_F_GUEST_ECN
) |
623 (1ULL << VIRTIO_NET_F_GUEST_UFO
);
625 return guest_offloads_mask
& features
;
628 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet
*n
)
630 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
631 return virtio_net_guest_offloads_by_features(vdev
->guest_features
);
634 static void virtio_net_set_features(VirtIODevice
*vdev
, uint64_t features
)
636 VirtIONet
*n
= VIRTIO_NET(vdev
);
639 virtio_net_set_multiqueue(n
,
640 virtio_has_feature(features
, VIRTIO_NET_F_MQ
));
642 virtio_net_set_mrg_rx_bufs(n
,
643 virtio_has_feature(features
,
644 VIRTIO_NET_F_MRG_RXBUF
),
645 virtio_has_feature(features
,
646 VIRTIO_F_VERSION_1
));
648 if (n
->has_vnet_hdr
) {
649 n
->curr_guest_offloads
=
650 virtio_net_guest_offloads_by_features(features
);
651 virtio_net_apply_guest_offloads(n
);
654 for (i
= 0; i
< n
->max_queues
; i
++) {
655 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, i
);
657 if (!get_vhost_net(nc
->peer
)) {
660 vhost_net_ack_features(get_vhost_net(nc
->peer
), features
);
663 if (virtio_has_feature(features
, VIRTIO_NET_F_CTRL_VLAN
)) {
664 memset(n
->vlans
, 0, MAX_VLAN
>> 3);
666 memset(n
->vlans
, 0xff, MAX_VLAN
>> 3);
670 static int virtio_net_handle_rx_mode(VirtIONet
*n
, uint8_t cmd
,
671 struct iovec
*iov
, unsigned int iov_cnt
)
675 NetClientState
*nc
= qemu_get_queue(n
->nic
);
677 s
= iov_to_buf(iov
, iov_cnt
, 0, &on
, sizeof(on
));
678 if (s
!= sizeof(on
)) {
679 return VIRTIO_NET_ERR
;
682 if (cmd
== VIRTIO_NET_CTRL_RX_PROMISC
) {
684 } else if (cmd
== VIRTIO_NET_CTRL_RX_ALLMULTI
) {
686 } else if (cmd
== VIRTIO_NET_CTRL_RX_ALLUNI
) {
688 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOMULTI
) {
690 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOUNI
) {
692 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOBCAST
) {
695 return VIRTIO_NET_ERR
;
700 return VIRTIO_NET_OK
;
703 static int virtio_net_handle_offloads(VirtIONet
*n
, uint8_t cmd
,
704 struct iovec
*iov
, unsigned int iov_cnt
)
706 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
710 if (!virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
)) {
711 return VIRTIO_NET_ERR
;
714 s
= iov_to_buf(iov
, iov_cnt
, 0, &offloads
, sizeof(offloads
));
715 if (s
!= sizeof(offloads
)) {
716 return VIRTIO_NET_ERR
;
719 if (cmd
== VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET
) {
720 uint64_t supported_offloads
;
722 if (!n
->has_vnet_hdr
) {
723 return VIRTIO_NET_ERR
;
726 supported_offloads
= virtio_net_supported_guest_offloads(n
);
727 if (offloads
& ~supported_offloads
) {
728 return VIRTIO_NET_ERR
;
731 n
->curr_guest_offloads
= offloads
;
732 virtio_net_apply_guest_offloads(n
);
734 return VIRTIO_NET_OK
;
736 return VIRTIO_NET_ERR
;
740 static int virtio_net_handle_mac(VirtIONet
*n
, uint8_t cmd
,
741 struct iovec
*iov
, unsigned int iov_cnt
)
743 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
744 struct virtio_net_ctrl_mac mac_data
;
746 NetClientState
*nc
= qemu_get_queue(n
->nic
);
748 if (cmd
== VIRTIO_NET_CTRL_MAC_ADDR_SET
) {
749 if (iov_size(iov
, iov_cnt
) != sizeof(n
->mac
)) {
750 return VIRTIO_NET_ERR
;
752 s
= iov_to_buf(iov
, iov_cnt
, 0, &n
->mac
, sizeof(n
->mac
));
753 assert(s
== sizeof(n
->mac
));
754 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
757 return VIRTIO_NET_OK
;
760 if (cmd
!= VIRTIO_NET_CTRL_MAC_TABLE_SET
) {
761 return VIRTIO_NET_ERR
;
766 uint8_t uni_overflow
= 0;
767 uint8_t multi_overflow
= 0;
768 uint8_t *macs
= g_malloc0(MAC_TABLE_ENTRIES
* ETH_ALEN
);
770 s
= iov_to_buf(iov
, iov_cnt
, 0, &mac_data
.entries
,
771 sizeof(mac_data
.entries
));
772 mac_data
.entries
= virtio_ldl_p(vdev
, &mac_data
.entries
);
773 if (s
!= sizeof(mac_data
.entries
)) {
776 iov_discard_front(&iov
, &iov_cnt
, s
);
778 if (mac_data
.entries
* ETH_ALEN
> iov_size(iov
, iov_cnt
)) {
782 if (mac_data
.entries
<= MAC_TABLE_ENTRIES
) {
783 s
= iov_to_buf(iov
, iov_cnt
, 0, macs
,
784 mac_data
.entries
* ETH_ALEN
);
785 if (s
!= mac_data
.entries
* ETH_ALEN
) {
788 in_use
+= mac_data
.entries
;
793 iov_discard_front(&iov
, &iov_cnt
, mac_data
.entries
* ETH_ALEN
);
795 first_multi
= in_use
;
797 s
= iov_to_buf(iov
, iov_cnt
, 0, &mac_data
.entries
,
798 sizeof(mac_data
.entries
));
799 mac_data
.entries
= virtio_ldl_p(vdev
, &mac_data
.entries
);
800 if (s
!= sizeof(mac_data
.entries
)) {
804 iov_discard_front(&iov
, &iov_cnt
, s
);
806 if (mac_data
.entries
* ETH_ALEN
!= iov_size(iov
, iov_cnt
)) {
810 if (mac_data
.entries
<= MAC_TABLE_ENTRIES
- in_use
) {
811 s
= iov_to_buf(iov
, iov_cnt
, 0, &macs
[in_use
* ETH_ALEN
],
812 mac_data
.entries
* ETH_ALEN
);
813 if (s
!= mac_data
.entries
* ETH_ALEN
) {
816 in_use
+= mac_data
.entries
;
821 n
->mac_table
.in_use
= in_use
;
822 n
->mac_table
.first_multi
= first_multi
;
823 n
->mac_table
.uni_overflow
= uni_overflow
;
824 n
->mac_table
.multi_overflow
= multi_overflow
;
825 memcpy(n
->mac_table
.macs
, macs
, MAC_TABLE_ENTRIES
* ETH_ALEN
);
829 return VIRTIO_NET_OK
;
833 return VIRTIO_NET_ERR
;
836 static int virtio_net_handle_vlan_table(VirtIONet
*n
, uint8_t cmd
,
837 struct iovec
*iov
, unsigned int iov_cnt
)
839 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
842 NetClientState
*nc
= qemu_get_queue(n
->nic
);
844 s
= iov_to_buf(iov
, iov_cnt
, 0, &vid
, sizeof(vid
));
845 vid
= virtio_lduw_p(vdev
, &vid
);
846 if (s
!= sizeof(vid
)) {
847 return VIRTIO_NET_ERR
;
851 return VIRTIO_NET_ERR
;
853 if (cmd
== VIRTIO_NET_CTRL_VLAN_ADD
)
854 n
->vlans
[vid
>> 5] |= (1U << (vid
& 0x1f));
855 else if (cmd
== VIRTIO_NET_CTRL_VLAN_DEL
)
856 n
->vlans
[vid
>> 5] &= ~(1U << (vid
& 0x1f));
858 return VIRTIO_NET_ERR
;
862 return VIRTIO_NET_OK
;
865 static int virtio_net_handle_announce(VirtIONet
*n
, uint8_t cmd
,
866 struct iovec
*iov
, unsigned int iov_cnt
)
868 if (cmd
== VIRTIO_NET_CTRL_ANNOUNCE_ACK
&&
869 n
->status
& VIRTIO_NET_S_ANNOUNCE
) {
870 n
->status
&= ~VIRTIO_NET_S_ANNOUNCE
;
871 if (n
->announce_counter
) {
872 timer_mod(n
->announce_timer
,
873 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) +
874 self_announce_delay(n
->announce_counter
));
876 return VIRTIO_NET_OK
;
878 return VIRTIO_NET_ERR
;
882 static int virtio_net_handle_mq(VirtIONet
*n
, uint8_t cmd
,
883 struct iovec
*iov
, unsigned int iov_cnt
)
885 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
886 struct virtio_net_ctrl_mq mq
;
890 s
= iov_to_buf(iov
, iov_cnt
, 0, &mq
, sizeof(mq
));
891 if (s
!= sizeof(mq
)) {
892 return VIRTIO_NET_ERR
;
895 if (cmd
!= VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
) {
896 return VIRTIO_NET_ERR
;
899 queues
= virtio_lduw_p(vdev
, &mq
.virtqueue_pairs
);
901 if (queues
< VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN
||
902 queues
> VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX
||
903 queues
> n
->max_queues
||
905 return VIRTIO_NET_ERR
;
908 n
->curr_queues
= queues
;
909 /* stop the backend before changing the number of queues to avoid handling a
911 virtio_net_set_status(vdev
, vdev
->status
);
912 virtio_net_set_queues(n
);
914 return VIRTIO_NET_OK
;
917 static void virtio_net_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
919 VirtIONet
*n
= VIRTIO_NET(vdev
);
920 struct virtio_net_ctrl_hdr ctrl
;
921 virtio_net_ctrl_ack status
= VIRTIO_NET_ERR
;
922 VirtQueueElement
*elem
;
924 struct iovec
*iov
, *iov2
;
925 unsigned int iov_cnt
;
928 elem
= virtqueue_pop(vq
, sizeof(VirtQueueElement
));
932 if (iov_size(elem
->in_sg
, elem
->in_num
) < sizeof(status
) ||
933 iov_size(elem
->out_sg
, elem
->out_num
) < sizeof(ctrl
)) {
934 virtio_error(vdev
, "virtio-net ctrl missing headers");
935 virtqueue_detach_element(vq
, elem
, 0);
940 iov_cnt
= elem
->out_num
;
941 iov2
= iov
= g_memdup(elem
->out_sg
, sizeof(struct iovec
) * elem
->out_num
);
942 s
= iov_to_buf(iov
, iov_cnt
, 0, &ctrl
, sizeof(ctrl
));
943 iov_discard_front(&iov
, &iov_cnt
, sizeof(ctrl
));
944 if (s
!= sizeof(ctrl
)) {
945 status
= VIRTIO_NET_ERR
;
946 } else if (ctrl
.class == VIRTIO_NET_CTRL_RX
) {
947 status
= virtio_net_handle_rx_mode(n
, ctrl
.cmd
, iov
, iov_cnt
);
948 } else if (ctrl
.class == VIRTIO_NET_CTRL_MAC
) {
949 status
= virtio_net_handle_mac(n
, ctrl
.cmd
, iov
, iov_cnt
);
950 } else if (ctrl
.class == VIRTIO_NET_CTRL_VLAN
) {
951 status
= virtio_net_handle_vlan_table(n
, ctrl
.cmd
, iov
, iov_cnt
);
952 } else if (ctrl
.class == VIRTIO_NET_CTRL_ANNOUNCE
) {
953 status
= virtio_net_handle_announce(n
, ctrl
.cmd
, iov
, iov_cnt
);
954 } else if (ctrl
.class == VIRTIO_NET_CTRL_MQ
) {
955 status
= virtio_net_handle_mq(n
, ctrl
.cmd
, iov
, iov_cnt
);
956 } else if (ctrl
.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS
) {
957 status
= virtio_net_handle_offloads(n
, ctrl
.cmd
, iov
, iov_cnt
);
960 s
= iov_from_buf(elem
->in_sg
, elem
->in_num
, 0, &status
, sizeof(status
));
961 assert(s
== sizeof(status
));
963 virtqueue_push(vq
, elem
, sizeof(status
));
964 virtio_notify(vdev
, vq
);
972 static void virtio_net_handle_rx(VirtIODevice
*vdev
, VirtQueue
*vq
)
974 VirtIONet
*n
= VIRTIO_NET(vdev
);
975 int queue_index
= vq2q(virtio_get_queue_index(vq
));
977 qemu_flush_queued_packets(qemu_get_subqueue(n
->nic
, queue_index
));
980 static int virtio_net_can_receive(NetClientState
*nc
)
982 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
983 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
984 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
986 if (!vdev
->vm_running
) {
990 if (nc
->queue_index
>= n
->curr_queues
) {
994 if (!virtio_queue_ready(q
->rx_vq
) ||
995 !(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1002 static int virtio_net_has_buffers(VirtIONetQueue
*q
, int bufsize
)
1004 VirtIONet
*n
= q
->n
;
1005 if (virtio_queue_empty(q
->rx_vq
) ||
1006 (n
->mergeable_rx_bufs
&&
1007 !virtqueue_avail_bytes(q
->rx_vq
, bufsize
, 0))) {
1008 virtio_queue_set_notification(q
->rx_vq
, 1);
1010 /* To avoid a race condition where the guest has made some buffers
1011 * available after the above check but before notification was
1012 * enabled, check for available buffers again.
1014 if (virtio_queue_empty(q
->rx_vq
) ||
1015 (n
->mergeable_rx_bufs
&&
1016 !virtqueue_avail_bytes(q
->rx_vq
, bufsize
, 0))) {
1021 virtio_queue_set_notification(q
->rx_vq
, 0);
1025 static void virtio_net_hdr_swap(VirtIODevice
*vdev
, struct virtio_net_hdr
*hdr
)
1027 virtio_tswap16s(vdev
, &hdr
->hdr_len
);
1028 virtio_tswap16s(vdev
, &hdr
->gso_size
);
1029 virtio_tswap16s(vdev
, &hdr
->csum_start
);
1030 virtio_tswap16s(vdev
, &hdr
->csum_offset
);
1033 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
1034 * it never finds out that the packets don't have valid checksums. This
1035 * causes dhclient to get upset. Fedora's carried a patch for ages to
1036 * fix this with Xen but it hasn't appeared in an upstream release of
1039 * To avoid breaking existing guests, we catch udp packets and add
1040 * checksums. This is terrible but it's better than hacking the guest
1043 * N.B. if we introduce a zero-copy API, this operation is no longer free so
1044 * we should provide a mechanism to disable it to avoid polluting the host
1047 static void work_around_broken_dhclient(struct virtio_net_hdr
*hdr
,
1048 uint8_t *buf
, size_t size
)
1050 if ((hdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) && /* missing csum */
1051 (size
> 27 && size
< 1500) && /* normal sized MTU */
1052 (buf
[12] == 0x08 && buf
[13] == 0x00) && /* ethertype == IPv4 */
1053 (buf
[23] == 17) && /* ip.protocol == UDP */
1054 (buf
[34] == 0 && buf
[35] == 67)) { /* udp.srcport == bootps */
1055 net_checksum_calculate(buf
, size
);
1056 hdr
->flags
&= ~VIRTIO_NET_HDR_F_NEEDS_CSUM
;
1060 static void receive_header(VirtIONet
*n
, const struct iovec
*iov
, int iov_cnt
,
1061 const void *buf
, size_t size
)
1063 if (n
->has_vnet_hdr
) {
1064 /* FIXME this cast is evil */
1065 void *wbuf
= (void *)buf
;
1066 work_around_broken_dhclient(wbuf
, wbuf
+ n
->host_hdr_len
,
1067 size
- n
->host_hdr_len
);
1069 if (n
->needs_vnet_hdr_swap
) {
1070 virtio_net_hdr_swap(VIRTIO_DEVICE(n
), wbuf
);
1072 iov_from_buf(iov
, iov_cnt
, 0, buf
, sizeof(struct virtio_net_hdr
));
1074 struct virtio_net_hdr hdr
= {
1076 .gso_type
= VIRTIO_NET_HDR_GSO_NONE
1078 iov_from_buf(iov
, iov_cnt
, 0, &hdr
, sizeof hdr
);
1082 static int receive_filter(VirtIONet
*n
, const uint8_t *buf
, int size
)
1084 static const uint8_t bcast
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1085 static const uint8_t vlan
[] = {0x81, 0x00};
1086 uint8_t *ptr
= (uint8_t *)buf
;
1092 ptr
+= n
->host_hdr_len
;
1094 if (!memcmp(&ptr
[12], vlan
, sizeof(vlan
))) {
1095 int vid
= lduw_be_p(ptr
+ 14) & 0xfff;
1096 if (!(n
->vlans
[vid
>> 5] & (1U << (vid
& 0x1f))))
1100 if (ptr
[0] & 1) { // multicast
1101 if (!memcmp(ptr
, bcast
, sizeof(bcast
))) {
1103 } else if (n
->nomulti
) {
1105 } else if (n
->allmulti
|| n
->mac_table
.multi_overflow
) {
1109 for (i
= n
->mac_table
.first_multi
; i
< n
->mac_table
.in_use
; i
++) {
1110 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
1117 } else if (n
->alluni
|| n
->mac_table
.uni_overflow
) {
1119 } else if (!memcmp(ptr
, n
->mac
, ETH_ALEN
)) {
1123 for (i
= 0; i
< n
->mac_table
.first_multi
; i
++) {
1124 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
1133 static ssize_t
virtio_net_receive(NetClientState
*nc
, const uint8_t *buf
, size_t size
)
1135 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
1136 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
1137 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1138 struct iovec mhdr_sg
[VIRTQUEUE_MAX_SIZE
];
1139 struct virtio_net_hdr_mrg_rxbuf mhdr
;
1140 unsigned mhdr_cnt
= 0;
1141 size_t offset
, i
, guest_offset
;
1143 if (!virtio_net_can_receive(nc
)) {
1147 /* hdr_len refers to the header we supply to the guest */
1148 if (!virtio_net_has_buffers(q
, size
+ n
->guest_hdr_len
- n
->host_hdr_len
)) {
1152 if (!receive_filter(n
, buf
, size
))
1157 while (offset
< size
) {
1158 VirtQueueElement
*elem
;
1160 const struct iovec
*sg
;
1164 elem
= virtqueue_pop(q
->rx_vq
, sizeof(VirtQueueElement
));
1167 virtio_error(vdev
, "virtio-net unexpected empty queue: "
1168 "i %zd mergeable %d offset %zd, size %zd, "
1169 "guest hdr len %zd, host hdr len %zd "
1170 "guest features 0x%" PRIx64
,
1171 i
, n
->mergeable_rx_bufs
, offset
, size
,
1172 n
->guest_hdr_len
, n
->host_hdr_len
,
1173 vdev
->guest_features
);
1178 if (elem
->in_num
< 1) {
1180 "virtio-net receive queue contains no in buffers");
1181 virtqueue_detach_element(q
->rx_vq
, elem
, 0);
1188 assert(offset
== 0);
1189 if (n
->mergeable_rx_bufs
) {
1190 mhdr_cnt
= iov_copy(mhdr_sg
, ARRAY_SIZE(mhdr_sg
),
1192 offsetof(typeof(mhdr
), num_buffers
),
1193 sizeof(mhdr
.num_buffers
));
1196 receive_header(n
, sg
, elem
->in_num
, buf
, size
);
1197 offset
= n
->host_hdr_len
;
1198 total
+= n
->guest_hdr_len
;
1199 guest_offset
= n
->guest_hdr_len
;
1204 /* copy in packet. ugh */
1205 len
= iov_from_buf(sg
, elem
->in_num
, guest_offset
,
1206 buf
+ offset
, size
- offset
);
1209 /* If buffers can't be merged, at this point we
1210 * must have consumed the complete packet.
1211 * Otherwise, drop it. */
1212 if (!n
->mergeable_rx_bufs
&& offset
< size
) {
1213 virtqueue_unpop(q
->rx_vq
, elem
, total
);
1218 /* signal other side */
1219 virtqueue_fill(q
->rx_vq
, elem
, total
, i
++);
1224 virtio_stw_p(vdev
, &mhdr
.num_buffers
, i
);
1225 iov_from_buf(mhdr_sg
, mhdr_cnt
,
1227 &mhdr
.num_buffers
, sizeof mhdr
.num_buffers
);
1230 virtqueue_flush(q
->rx_vq
, i
);
1231 virtio_notify(vdev
, q
->rx_vq
);
1236 static int32_t virtio_net_flush_tx(VirtIONetQueue
*q
);
1238 static void virtio_net_tx_complete(NetClientState
*nc
, ssize_t len
)
1240 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
1241 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
1242 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1244 virtqueue_push(q
->tx_vq
, q
->async_tx
.elem
, 0);
1245 virtio_notify(vdev
, q
->tx_vq
);
1247 g_free(q
->async_tx
.elem
);
1248 q
->async_tx
.elem
= NULL
;
1250 virtio_queue_set_notification(q
->tx_vq
, 1);
1251 virtio_net_flush_tx(q
);
1255 static int32_t virtio_net_flush_tx(VirtIONetQueue
*q
)
1257 VirtIONet
*n
= q
->n
;
1258 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1259 VirtQueueElement
*elem
;
1260 int32_t num_packets
= 0;
1261 int queue_index
= vq2q(virtio_get_queue_index(q
->tx_vq
));
1262 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1266 if (q
->async_tx
.elem
) {
1267 virtio_queue_set_notification(q
->tx_vq
, 0);
1273 unsigned int out_num
;
1274 struct iovec sg
[VIRTQUEUE_MAX_SIZE
], sg2
[VIRTQUEUE_MAX_SIZE
+ 1], *out_sg
;
1275 struct virtio_net_hdr_mrg_rxbuf mhdr
;
1277 elem
= virtqueue_pop(q
->tx_vq
, sizeof(VirtQueueElement
));
1282 out_num
= elem
->out_num
;
1283 out_sg
= elem
->out_sg
;
1285 virtio_error(vdev
, "virtio-net header not in first element");
1286 virtqueue_detach_element(q
->tx_vq
, elem
, 0);
1291 if (n
->has_vnet_hdr
) {
1292 if (iov_to_buf(out_sg
, out_num
, 0, &mhdr
, n
->guest_hdr_len
) <
1294 virtio_error(vdev
, "virtio-net header incorrect");
1295 virtqueue_detach_element(q
->tx_vq
, elem
, 0);
1299 if (n
->needs_vnet_hdr_swap
) {
1300 virtio_net_hdr_swap(vdev
, (void *) &mhdr
);
1301 sg2
[0].iov_base
= &mhdr
;
1302 sg2
[0].iov_len
= n
->guest_hdr_len
;
1303 out_num
= iov_copy(&sg2
[1], ARRAY_SIZE(sg2
) - 1,
1305 n
->guest_hdr_len
, -1);
1306 if (out_num
== VIRTQUEUE_MAX_SIZE
) {
1314 * If host wants to see the guest header as is, we can
1315 * pass it on unchanged. Otherwise, copy just the parts
1316 * that host is interested in.
1318 assert(n
->host_hdr_len
<= n
->guest_hdr_len
);
1319 if (n
->host_hdr_len
!= n
->guest_hdr_len
) {
1320 unsigned sg_num
= iov_copy(sg
, ARRAY_SIZE(sg
),
1322 0, n
->host_hdr_len
);
1323 sg_num
+= iov_copy(sg
+ sg_num
, ARRAY_SIZE(sg
) - sg_num
,
1325 n
->guest_hdr_len
, -1);
1330 ret
= qemu_sendv_packet_async(qemu_get_subqueue(n
->nic
, queue_index
),
1331 out_sg
, out_num
, virtio_net_tx_complete
);
1333 virtio_queue_set_notification(q
->tx_vq
, 0);
1334 q
->async_tx
.elem
= elem
;
1339 virtqueue_push(q
->tx_vq
, elem
, 0);
1340 virtio_notify(vdev
, q
->tx_vq
);
1343 if (++num_packets
>= n
->tx_burst
) {
1350 static void virtio_net_handle_tx_timer(VirtIODevice
*vdev
, VirtQueue
*vq
)
1352 VirtIONet
*n
= VIRTIO_NET(vdev
);
1353 VirtIONetQueue
*q
= &n
->vqs
[vq2q(virtio_get_queue_index(vq
))];
1355 if (unlikely((n
->status
& VIRTIO_NET_S_LINK_UP
) == 0)) {
1356 virtio_net_drop_tx_queue_data(vdev
, vq
);
1360 /* This happens when device was stopped but VCPU wasn't. */
1361 if (!vdev
->vm_running
) {
1366 if (q
->tx_waiting
) {
1367 virtio_queue_set_notification(vq
, 1);
1368 timer_del(q
->tx_timer
);
1370 if (virtio_net_flush_tx(q
) == -EINVAL
) {
1374 timer_mod(q
->tx_timer
,
1375 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + n
->tx_timeout
);
1377 virtio_queue_set_notification(vq
, 0);
1381 static void virtio_net_handle_tx_bh(VirtIODevice
*vdev
, VirtQueue
*vq
)
1383 VirtIONet
*n
= VIRTIO_NET(vdev
);
1384 VirtIONetQueue
*q
= &n
->vqs
[vq2q(virtio_get_queue_index(vq
))];
1386 if (unlikely((n
->status
& VIRTIO_NET_S_LINK_UP
) == 0)) {
1387 virtio_net_drop_tx_queue_data(vdev
, vq
);
1391 if (unlikely(q
->tx_waiting
)) {
1395 /* This happens when device was stopped but VCPU wasn't. */
1396 if (!vdev
->vm_running
) {
1399 virtio_queue_set_notification(vq
, 0);
1400 qemu_bh_schedule(q
->tx_bh
);
1403 static void virtio_net_tx_timer(void *opaque
)
1405 VirtIONetQueue
*q
= opaque
;
1406 VirtIONet
*n
= q
->n
;
1407 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1408 /* This happens when device was stopped but BH wasn't. */
1409 if (!vdev
->vm_running
) {
1410 /* Make sure tx waiting is set, so we'll run when restarted. */
1411 assert(q
->tx_waiting
);
1417 /* Just in case the driver is not ready on more */
1418 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1422 virtio_queue_set_notification(q
->tx_vq
, 1);
1423 virtio_net_flush_tx(q
);
1426 static void virtio_net_tx_bh(void *opaque
)
1428 VirtIONetQueue
*q
= opaque
;
1429 VirtIONet
*n
= q
->n
;
1430 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1433 /* This happens when device was stopped but BH wasn't. */
1434 if (!vdev
->vm_running
) {
1435 /* Make sure tx waiting is set, so we'll run when restarted. */
1436 assert(q
->tx_waiting
);
1442 /* Just in case the driver is not ready on more */
1443 if (unlikely(!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
))) {
1447 ret
= virtio_net_flush_tx(q
);
1448 if (ret
== -EBUSY
|| ret
== -EINVAL
) {
1449 return; /* Notification re-enable handled by tx_complete or device
1453 /* If we flush a full burst of packets, assume there are
1454 * more coming and immediately reschedule */
1455 if (ret
>= n
->tx_burst
) {
1456 qemu_bh_schedule(q
->tx_bh
);
1461 /* If less than a full burst, re-enable notification and flush
1462 * anything that may have come in while we weren't looking. If
1463 * we find something, assume the guest is still active and reschedule */
1464 virtio_queue_set_notification(q
->tx_vq
, 1);
1465 ret
= virtio_net_flush_tx(q
);
1466 if (ret
== -EINVAL
) {
1468 } else if (ret
> 0) {
1469 virtio_queue_set_notification(q
->tx_vq
, 0);
1470 qemu_bh_schedule(q
->tx_bh
);
1475 static void virtio_net_add_queue(VirtIONet
*n
, int index
)
1477 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1479 n
->vqs
[index
].rx_vq
= virtio_add_queue(vdev
, n
->net_conf
.rx_queue_size
,
1480 virtio_net_handle_rx
);
1481 if (n
->net_conf
.tx
&& !strcmp(n
->net_conf
.tx
, "timer")) {
1482 n
->vqs
[index
].tx_vq
=
1483 virtio_add_queue(vdev
, 256, virtio_net_handle_tx_timer
);
1484 n
->vqs
[index
].tx_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
1485 virtio_net_tx_timer
,
1488 n
->vqs
[index
].tx_vq
=
1489 virtio_add_queue(vdev
, 256, virtio_net_handle_tx_bh
);
1490 n
->vqs
[index
].tx_bh
= qemu_bh_new(virtio_net_tx_bh
, &n
->vqs
[index
]);
1493 n
->vqs
[index
].tx_waiting
= 0;
1494 n
->vqs
[index
].n
= n
;
1497 static void virtio_net_del_queue(VirtIONet
*n
, int index
)
1499 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1500 VirtIONetQueue
*q
= &n
->vqs
[index
];
1501 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
1503 qemu_purge_queued_packets(nc
);
1505 virtio_del_queue(vdev
, index
* 2);
1507 timer_del(q
->tx_timer
);
1508 timer_free(q
->tx_timer
);
1510 qemu_bh_delete(q
->tx_bh
);
1512 virtio_del_queue(vdev
, index
* 2 + 1);
1515 static void virtio_net_change_num_queues(VirtIONet
*n
, int new_max_queues
)
1517 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1518 int old_num_queues
= virtio_get_num_queues(vdev
);
1519 int new_num_queues
= new_max_queues
* 2 + 1;
1522 assert(old_num_queues
>= 3);
1523 assert(old_num_queues
% 2 == 1);
1525 if (old_num_queues
== new_num_queues
) {
1530 * We always need to remove and add ctrl vq if
1531 * old_num_queues != new_num_queues. Remove ctrl_vq first,
1532 * and then we only enter one of the following too loops.
1534 virtio_del_queue(vdev
, old_num_queues
- 1);
1536 for (i
= new_num_queues
- 1; i
< old_num_queues
- 1; i
+= 2) {
1537 /* new_num_queues < old_num_queues */
1538 virtio_net_del_queue(n
, i
/ 2);
1541 for (i
= old_num_queues
- 1; i
< new_num_queues
- 1; i
+= 2) {
1542 /* new_num_queues > old_num_queues */
1543 virtio_net_add_queue(n
, i
/ 2);
1546 /* add ctrl_vq last */
1547 n
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_net_handle_ctrl
);
1550 static void virtio_net_set_multiqueue(VirtIONet
*n
, int multiqueue
)
1552 int max
= multiqueue
? n
->max_queues
: 1;
1554 n
->multiqueue
= multiqueue
;
1555 virtio_net_change_num_queues(n
, max
);
1557 virtio_net_set_queues(n
);
1560 static int virtio_net_post_load_device(void *opaque
, int version_id
)
1562 VirtIONet
*n
= opaque
;
1563 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1566 virtio_net_set_mrg_rx_bufs(n
, n
->mergeable_rx_bufs
,
1567 virtio_vdev_has_feature(vdev
,
1568 VIRTIO_F_VERSION_1
));
1570 /* MAC_TABLE_ENTRIES may be different from the saved image */
1571 if (n
->mac_table
.in_use
> MAC_TABLE_ENTRIES
) {
1572 n
->mac_table
.in_use
= 0;
1575 if (!virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
)) {
1576 n
->curr_guest_offloads
= virtio_net_supported_guest_offloads(n
);
1579 if (peer_has_vnet_hdr(n
)) {
1580 virtio_net_apply_guest_offloads(n
);
1583 virtio_net_set_queues(n
);
1585 /* Find the first multicast entry in the saved MAC filter */
1586 for (i
= 0; i
< n
->mac_table
.in_use
; i
++) {
1587 if (n
->mac_table
.macs
[i
* ETH_ALEN
] & 1) {
1591 n
->mac_table
.first_multi
= i
;
1593 /* nc.link_down can't be migrated, so infer link_down according
1594 * to link status bit in n->status */
1595 link_down
= (n
->status
& VIRTIO_NET_S_LINK_UP
) == 0;
1596 for (i
= 0; i
< n
->max_queues
; i
++) {
1597 qemu_get_subqueue(n
->nic
, i
)->link_down
= link_down
;
1600 if (virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_GUEST_ANNOUNCE
) &&
1601 virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_VQ
)) {
1602 n
->announce_counter
= SELF_ANNOUNCE_ROUNDS
;
1603 timer_mod(n
->announce_timer
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
));
1609 /* tx_waiting field of a VirtIONetQueue */
1610 static const VMStateDescription vmstate_virtio_net_queue_tx_waiting
= {
1611 .name
= "virtio-net-queue-tx_waiting",
1612 .fields
= (VMStateField
[]) {
1613 VMSTATE_UINT32(tx_waiting
, VirtIONetQueue
),
1614 VMSTATE_END_OF_LIST()
1618 static bool max_queues_gt_1(void *opaque
, int version_id
)
1620 return VIRTIO_NET(opaque
)->max_queues
> 1;
1623 static bool has_ctrl_guest_offloads(void *opaque
, int version_id
)
1625 return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque
),
1626 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
);
1629 static bool mac_table_fits(void *opaque
, int version_id
)
1631 return VIRTIO_NET(opaque
)->mac_table
.in_use
<= MAC_TABLE_ENTRIES
;
1634 static bool mac_table_doesnt_fit(void *opaque
, int version_id
)
1636 return !mac_table_fits(opaque
, version_id
);
1639 /* This temporary type is shared by all the WITH_TMP methods
1640 * although only some fields are used by each.
1642 struct VirtIONetMigTmp
{
1644 VirtIONetQueue
*vqs_1
;
1645 uint16_t curr_queues_1
;
1647 uint32_t has_vnet_hdr
;
1650 /* The 2nd and subsequent tx_waiting flags are loaded later than
1651 * the 1st entry in the queues and only if there's more than one
1652 * entry. We use the tmp mechanism to calculate a temporary
1653 * pointer and count and also validate the count.
1656 static void virtio_net_tx_waiting_pre_save(void *opaque
)
1658 struct VirtIONetMigTmp
*tmp
= opaque
;
1660 tmp
->vqs_1
= tmp
->parent
->vqs
+ 1;
1661 tmp
->curr_queues_1
= tmp
->parent
->curr_queues
- 1;
1662 if (tmp
->parent
->curr_queues
== 0) {
1663 tmp
->curr_queues_1
= 0;
1667 static int virtio_net_tx_waiting_pre_load(void *opaque
)
1669 struct VirtIONetMigTmp
*tmp
= opaque
;
1671 /* Reuse the pointer setup from save */
1672 virtio_net_tx_waiting_pre_save(opaque
);
1674 if (tmp
->parent
->curr_queues
> tmp
->parent
->max_queues
) {
1675 error_report("virtio-net: curr_queues %x > max_queues %x",
1676 tmp
->parent
->curr_queues
, tmp
->parent
->max_queues
);
1681 return 0; /* all good */
1684 static const VMStateDescription vmstate_virtio_net_tx_waiting
= {
1685 .name
= "virtio-net-tx_waiting",
1686 .pre_load
= virtio_net_tx_waiting_pre_load
,
1687 .pre_save
= virtio_net_tx_waiting_pre_save
,
1688 .fields
= (VMStateField
[]) {
1689 VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1
, struct VirtIONetMigTmp
,
1691 vmstate_virtio_net_queue_tx_waiting
,
1692 struct VirtIONetQueue
),
1693 VMSTATE_END_OF_LIST()
1697 /* the 'has_ufo' flag is just tested; if the incoming stream has the
1698 * flag set we need to check that we have it
1700 static int virtio_net_ufo_post_load(void *opaque
, int version_id
)
1702 struct VirtIONetMigTmp
*tmp
= opaque
;
1704 if (tmp
->has_ufo
&& !peer_has_ufo(tmp
->parent
)) {
1705 error_report("virtio-net: saved image requires TUN_F_UFO support");
1712 static void virtio_net_ufo_pre_save(void *opaque
)
1714 struct VirtIONetMigTmp
*tmp
= opaque
;
1716 tmp
->has_ufo
= tmp
->parent
->has_ufo
;
1719 static const VMStateDescription vmstate_virtio_net_has_ufo
= {
1720 .name
= "virtio-net-ufo",
1721 .post_load
= virtio_net_ufo_post_load
,
1722 .pre_save
= virtio_net_ufo_pre_save
,
1723 .fields
= (VMStateField
[]) {
1724 VMSTATE_UINT8(has_ufo
, struct VirtIONetMigTmp
),
1725 VMSTATE_END_OF_LIST()
1729 /* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the
1730 * flag set we need to check that we have it
1732 static int virtio_net_vnet_post_load(void *opaque
, int version_id
)
1734 struct VirtIONetMigTmp
*tmp
= opaque
;
1736 if (tmp
->has_vnet_hdr
&& !peer_has_vnet_hdr(tmp
->parent
)) {
1737 error_report("virtio-net: saved image requires vnet_hdr=on");
1744 static void virtio_net_vnet_pre_save(void *opaque
)
1746 struct VirtIONetMigTmp
*tmp
= opaque
;
1748 tmp
->has_vnet_hdr
= tmp
->parent
->has_vnet_hdr
;
1751 static const VMStateDescription vmstate_virtio_net_has_vnet
= {
1752 .name
= "virtio-net-vnet",
1753 .post_load
= virtio_net_vnet_post_load
,
1754 .pre_save
= virtio_net_vnet_pre_save
,
1755 .fields
= (VMStateField
[]) {
1756 VMSTATE_UINT32(has_vnet_hdr
, struct VirtIONetMigTmp
),
1757 VMSTATE_END_OF_LIST()
1761 static const VMStateDescription vmstate_virtio_net_device
= {
1762 .name
= "virtio-net-device",
1763 .version_id
= VIRTIO_NET_VM_VERSION
,
1764 .minimum_version_id
= VIRTIO_NET_VM_VERSION
,
1765 .post_load
= virtio_net_post_load_device
,
1766 .fields
= (VMStateField
[]) {
1767 VMSTATE_UINT8_ARRAY(mac
, VirtIONet
, ETH_ALEN
),
1768 VMSTATE_STRUCT_POINTER(vqs
, VirtIONet
,
1769 vmstate_virtio_net_queue_tx_waiting
,
1771 VMSTATE_UINT32(mergeable_rx_bufs
, VirtIONet
),
1772 VMSTATE_UINT16(status
, VirtIONet
),
1773 VMSTATE_UINT8(promisc
, VirtIONet
),
1774 VMSTATE_UINT8(allmulti
, VirtIONet
),
1775 VMSTATE_UINT32(mac_table
.in_use
, VirtIONet
),
1777 /* Guarded pair: If it fits we load it, else we throw it away
1778 * - can happen if source has a larger MAC table.; post-load
1779 * sets flags in this case.
1781 VMSTATE_VBUFFER_MULTIPLY(mac_table
.macs
, VirtIONet
,
1782 0, mac_table_fits
, mac_table
.in_use
,
1784 VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet
, mac_table_doesnt_fit
, 0,
1785 mac_table
.in_use
, ETH_ALEN
),
1787 /* Note: This is an array of uint32's that's always been saved as a
1788 * buffer; hold onto your endiannesses; it's actually used as a bitmap
1789 * but based on the uint.
1791 VMSTATE_BUFFER_POINTER_UNSAFE(vlans
, VirtIONet
, 0, MAX_VLAN
>> 3),
1792 VMSTATE_WITH_TMP(VirtIONet
, struct VirtIONetMigTmp
,
1793 vmstate_virtio_net_has_vnet
),
1794 VMSTATE_UINT8(mac_table
.multi_overflow
, VirtIONet
),
1795 VMSTATE_UINT8(mac_table
.uni_overflow
, VirtIONet
),
1796 VMSTATE_UINT8(alluni
, VirtIONet
),
1797 VMSTATE_UINT8(nomulti
, VirtIONet
),
1798 VMSTATE_UINT8(nouni
, VirtIONet
),
1799 VMSTATE_UINT8(nobcast
, VirtIONet
),
1800 VMSTATE_WITH_TMP(VirtIONet
, struct VirtIONetMigTmp
,
1801 vmstate_virtio_net_has_ufo
),
1802 VMSTATE_SINGLE_TEST(max_queues
, VirtIONet
, max_queues_gt_1
, 0,
1803 vmstate_info_uint16_equal
, uint16_t),
1804 VMSTATE_UINT16_TEST(curr_queues
, VirtIONet
, max_queues_gt_1
),
1805 VMSTATE_WITH_TMP(VirtIONet
, struct VirtIONetMigTmp
,
1806 vmstate_virtio_net_tx_waiting
),
1807 VMSTATE_UINT64_TEST(curr_guest_offloads
, VirtIONet
,
1808 has_ctrl_guest_offloads
),
1809 VMSTATE_END_OF_LIST()
1813 static NetClientInfo net_virtio_info
= {
1814 .type
= NET_CLIENT_DRIVER_NIC
,
1815 .size
= sizeof(NICState
),
1816 .can_receive
= virtio_net_can_receive
,
1817 .receive
= virtio_net_receive
,
1818 .link_status_changed
= virtio_net_set_link_status
,
1819 .query_rx_filter
= virtio_net_query_rxfilter
,
1822 static bool virtio_net_guest_notifier_pending(VirtIODevice
*vdev
, int idx
)
1824 VirtIONet
*n
= VIRTIO_NET(vdev
);
1825 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, vq2q(idx
));
1826 assert(n
->vhost_started
);
1827 return vhost_net_virtqueue_pending(get_vhost_net(nc
->peer
), idx
);
1830 static void virtio_net_guest_notifier_mask(VirtIODevice
*vdev
, int idx
,
1833 VirtIONet
*n
= VIRTIO_NET(vdev
);
1834 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, vq2q(idx
));
1835 assert(n
->vhost_started
);
1836 vhost_net_virtqueue_mask(get_vhost_net(nc
->peer
),
1840 static void virtio_net_set_config_size(VirtIONet
*n
, uint64_t host_features
)
1842 int i
, config_size
= 0;
1843 virtio_add_feature(&host_features
, VIRTIO_NET_F_MAC
);
1845 for (i
= 0; feature_sizes
[i
].flags
!= 0; i
++) {
1846 if (host_features
& feature_sizes
[i
].flags
) {
1847 config_size
= MAX(feature_sizes
[i
].end
, config_size
);
1850 n
->config_size
= config_size
;
1853 void virtio_net_set_netclient_name(VirtIONet
*n
, const char *name
,
1857 * The name can be NULL, the netclient name will be type.x.
1859 assert(type
!= NULL
);
1861 g_free(n
->netclient_name
);
1862 g_free(n
->netclient_type
);
1863 n
->netclient_name
= g_strdup(name
);
1864 n
->netclient_type
= g_strdup(type
);
1867 static void virtio_net_device_realize(DeviceState
*dev
, Error
**errp
)
1869 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1870 VirtIONet
*n
= VIRTIO_NET(dev
);
1874 if (n
->net_conf
.mtu
) {
1875 n
->host_features
|= (0x1 << VIRTIO_NET_F_MTU
);
1878 virtio_net_set_config_size(n
, n
->host_features
);
1879 virtio_init(vdev
, "virtio-net", VIRTIO_ID_NET
, n
->config_size
);
1882 * We set a lower limit on RX queue size to what it always was.
1883 * Guests that want a smaller ring can always resize it without
1884 * help from us (using virtio 1 and up).
1886 if (n
->net_conf
.rx_queue_size
< VIRTIO_NET_RX_QUEUE_MIN_SIZE
||
1887 n
->net_conf
.rx_queue_size
> VIRTQUEUE_MAX_SIZE
||
1888 (n
->net_conf
.rx_queue_size
& (n
->net_conf
.rx_queue_size
- 1))) {
1889 error_setg(errp
, "Invalid rx_queue_size (= %" PRIu16
"), "
1890 "must be a power of 2 between %d and %d.",
1891 n
->net_conf
.rx_queue_size
, VIRTIO_NET_RX_QUEUE_MIN_SIZE
,
1892 VIRTQUEUE_MAX_SIZE
);
1893 virtio_cleanup(vdev
);
1897 n
->max_queues
= MAX(n
->nic_conf
.peers
.queues
, 1);
1898 if (n
->max_queues
* 2 + 1 > VIRTIO_QUEUE_MAX
) {
1899 error_setg(errp
, "Invalid number of queues (= %" PRIu32
"), "
1900 "must be a positive integer less than %d.",
1901 n
->max_queues
, (VIRTIO_QUEUE_MAX
- 1) / 2);
1902 virtio_cleanup(vdev
);
1905 n
->vqs
= g_malloc0(sizeof(VirtIONetQueue
) * n
->max_queues
);
1907 n
->tx_timeout
= n
->net_conf
.txtimer
;
1909 if (n
->net_conf
.tx
&& strcmp(n
->net_conf
.tx
, "timer")
1910 && strcmp(n
->net_conf
.tx
, "bh")) {
1911 error_report("virtio-net: "
1912 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1914 error_report("Defaulting to \"bh\"");
1917 for (i
= 0; i
< n
->max_queues
; i
++) {
1918 virtio_net_add_queue(n
, i
);
1921 n
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_net_handle_ctrl
);
1922 qemu_macaddr_default_if_unset(&n
->nic_conf
.macaddr
);
1923 memcpy(&n
->mac
[0], &n
->nic_conf
.macaddr
, sizeof(n
->mac
));
1924 n
->status
= VIRTIO_NET_S_LINK_UP
;
1925 n
->announce_timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
1926 virtio_net_announce_timer
, n
);
1928 if (n
->netclient_type
) {
1930 * Happen when virtio_net_set_netclient_name has been called.
1932 n
->nic
= qemu_new_nic(&net_virtio_info
, &n
->nic_conf
,
1933 n
->netclient_type
, n
->netclient_name
, n
);
1935 n
->nic
= qemu_new_nic(&net_virtio_info
, &n
->nic_conf
,
1936 object_get_typename(OBJECT(dev
)), dev
->id
, n
);
1939 peer_test_vnet_hdr(n
);
1940 if (peer_has_vnet_hdr(n
)) {
1941 for (i
= 0; i
< n
->max_queues
; i
++) {
1942 qemu_using_vnet_hdr(qemu_get_subqueue(n
->nic
, i
)->peer
, true);
1944 n
->host_hdr_len
= sizeof(struct virtio_net_hdr
);
1946 n
->host_hdr_len
= 0;
1949 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->nic_conf
.macaddr
.a
);
1951 n
->vqs
[0].tx_waiting
= 0;
1952 n
->tx_burst
= n
->net_conf
.txburst
;
1953 virtio_net_set_mrg_rx_bufs(n
, 0, 0);
1954 n
->promisc
= 1; /* for compatibility */
1956 n
->mac_table
.macs
= g_malloc0(MAC_TABLE_ENTRIES
* ETH_ALEN
);
1958 n
->vlans
= g_malloc0(MAX_VLAN
>> 3);
1960 nc
= qemu_get_queue(n
->nic
);
1961 nc
->rxfilter_notify_enabled
= 1;
1966 static void virtio_net_device_unrealize(DeviceState
*dev
, Error
**errp
)
1968 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1969 VirtIONet
*n
= VIRTIO_NET(dev
);
1972 /* This will stop vhost backend if appropriate. */
1973 virtio_net_set_status(vdev
, 0);
1975 g_free(n
->netclient_name
);
1976 n
->netclient_name
= NULL
;
1977 g_free(n
->netclient_type
);
1978 n
->netclient_type
= NULL
;
1980 g_free(n
->mac_table
.macs
);
1983 max_queues
= n
->multiqueue
? n
->max_queues
: 1;
1984 for (i
= 0; i
< max_queues
; i
++) {
1985 virtio_net_del_queue(n
, i
);
1988 timer_del(n
->announce_timer
);
1989 timer_free(n
->announce_timer
);
1991 qemu_del_nic(n
->nic
);
1992 virtio_cleanup(vdev
);
1995 static void virtio_net_instance_init(Object
*obj
)
1997 VirtIONet
*n
= VIRTIO_NET(obj
);
2000 * The default config_size is sizeof(struct virtio_net_config).
2001 * Can be overriden with virtio_net_set_config_size.
2003 n
->config_size
= sizeof(struct virtio_net_config
);
2004 device_add_bootindex_property(obj
, &n
->nic_conf
.bootindex
,
2005 "bootindex", "/ethernet-phy@0",
2009 static void virtio_net_pre_save(void *opaque
)
2011 VirtIONet
*n
= opaque
;
2013 /* At this point, backend must be stopped, otherwise
2014 * it might keep writing to memory. */
2015 assert(!n
->vhost_started
);
2018 static const VMStateDescription vmstate_virtio_net
= {
2019 .name
= "virtio-net",
2020 .minimum_version_id
= VIRTIO_NET_VM_VERSION
,
2021 .version_id
= VIRTIO_NET_VM_VERSION
,
2022 .fields
= (VMStateField
[]) {
2023 VMSTATE_VIRTIO_DEVICE
,
2024 VMSTATE_END_OF_LIST()
2026 .pre_save
= virtio_net_pre_save
,
2029 static Property virtio_net_properties
[] = {
2030 DEFINE_PROP_BIT("csum", VirtIONet
, host_features
, VIRTIO_NET_F_CSUM
, true),
2031 DEFINE_PROP_BIT("guest_csum", VirtIONet
, host_features
,
2032 VIRTIO_NET_F_GUEST_CSUM
, true),
2033 DEFINE_PROP_BIT("gso", VirtIONet
, host_features
, VIRTIO_NET_F_GSO
, true),
2034 DEFINE_PROP_BIT("guest_tso4", VirtIONet
, host_features
,
2035 VIRTIO_NET_F_GUEST_TSO4
, true),
2036 DEFINE_PROP_BIT("guest_tso6", VirtIONet
, host_features
,
2037 VIRTIO_NET_F_GUEST_TSO6
, true),
2038 DEFINE_PROP_BIT("guest_ecn", VirtIONet
, host_features
,
2039 VIRTIO_NET_F_GUEST_ECN
, true),
2040 DEFINE_PROP_BIT("guest_ufo", VirtIONet
, host_features
,
2041 VIRTIO_NET_F_GUEST_UFO
, true),
2042 DEFINE_PROP_BIT("guest_announce", VirtIONet
, host_features
,
2043 VIRTIO_NET_F_GUEST_ANNOUNCE
, true),
2044 DEFINE_PROP_BIT("host_tso4", VirtIONet
, host_features
,
2045 VIRTIO_NET_F_HOST_TSO4
, true),
2046 DEFINE_PROP_BIT("host_tso6", VirtIONet
, host_features
,
2047 VIRTIO_NET_F_HOST_TSO6
, true),
2048 DEFINE_PROP_BIT("host_ecn", VirtIONet
, host_features
,
2049 VIRTIO_NET_F_HOST_ECN
, true),
2050 DEFINE_PROP_BIT("host_ufo", VirtIONet
, host_features
,
2051 VIRTIO_NET_F_HOST_UFO
, true),
2052 DEFINE_PROP_BIT("mrg_rxbuf", VirtIONet
, host_features
,
2053 VIRTIO_NET_F_MRG_RXBUF
, true),
2054 DEFINE_PROP_BIT("status", VirtIONet
, host_features
,
2055 VIRTIO_NET_F_STATUS
, true),
2056 DEFINE_PROP_BIT("ctrl_vq", VirtIONet
, host_features
,
2057 VIRTIO_NET_F_CTRL_VQ
, true),
2058 DEFINE_PROP_BIT("ctrl_rx", VirtIONet
, host_features
,
2059 VIRTIO_NET_F_CTRL_RX
, true),
2060 DEFINE_PROP_BIT("ctrl_vlan", VirtIONet
, host_features
,
2061 VIRTIO_NET_F_CTRL_VLAN
, true),
2062 DEFINE_PROP_BIT("ctrl_rx_extra", VirtIONet
, host_features
,
2063 VIRTIO_NET_F_CTRL_RX_EXTRA
, true),
2064 DEFINE_PROP_BIT("ctrl_mac_addr", VirtIONet
, host_features
,
2065 VIRTIO_NET_F_CTRL_MAC_ADDR
, true),
2066 DEFINE_PROP_BIT("ctrl_guest_offloads", VirtIONet
, host_features
,
2067 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
, true),
2068 DEFINE_PROP_BIT("mq", VirtIONet
, host_features
, VIRTIO_NET_F_MQ
, false),
2069 DEFINE_NIC_PROPERTIES(VirtIONet
, nic_conf
),
2070 DEFINE_PROP_UINT32("x-txtimer", VirtIONet
, net_conf
.txtimer
,
2072 DEFINE_PROP_INT32("x-txburst", VirtIONet
, net_conf
.txburst
, TX_BURST
),
2073 DEFINE_PROP_STRING("tx", VirtIONet
, net_conf
.tx
),
2074 DEFINE_PROP_UINT16("rx_queue_size", VirtIONet
, net_conf
.rx_queue_size
,
2075 VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
),
2076 DEFINE_PROP_UINT16("host_mtu", VirtIONet
, net_conf
.mtu
, 0),
2077 DEFINE_PROP_END_OF_LIST(),
2080 static void virtio_net_class_init(ObjectClass
*klass
, void *data
)
2082 DeviceClass
*dc
= DEVICE_CLASS(klass
);
2083 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
2085 dc
->props
= virtio_net_properties
;
2086 dc
->vmsd
= &vmstate_virtio_net
;
2087 set_bit(DEVICE_CATEGORY_NETWORK
, dc
->categories
);
2088 vdc
->realize
= virtio_net_device_realize
;
2089 vdc
->unrealize
= virtio_net_device_unrealize
;
2090 vdc
->get_config
= virtio_net_get_config
;
2091 vdc
->set_config
= virtio_net_set_config
;
2092 vdc
->get_features
= virtio_net_get_features
;
2093 vdc
->set_features
= virtio_net_set_features
;
2094 vdc
->bad_features
= virtio_net_bad_features
;
2095 vdc
->reset
= virtio_net_reset
;
2096 vdc
->set_status
= virtio_net_set_status
;
2097 vdc
->guest_notifier_mask
= virtio_net_guest_notifier_mask
;
2098 vdc
->guest_notifier_pending
= virtio_net_guest_notifier_pending
;
2099 vdc
->legacy_features
|= (0x1 << VIRTIO_NET_F_GSO
);
2100 vdc
->vmsd
= &vmstate_virtio_net_device
;
2103 static const TypeInfo virtio_net_info
= {
2104 .name
= TYPE_VIRTIO_NET
,
2105 .parent
= TYPE_VIRTIO_DEVICE
,
2106 .instance_size
= sizeof(VirtIONet
),
2107 .instance_init
= virtio_net_instance_init
,
2108 .class_init
= virtio_net_class_init
,
2111 static void virtio_register_types(void)
2113 type_register_static(&virtio_net_info
);
2116 type_init(virtio_register_types
)