2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
16 #include "hw/virtio/virtio.h"
18 #include "net/checksum.h"
20 #include "qemu/error-report.h"
21 #include "qemu/timer.h"
22 #include "hw/virtio/virtio-net.h"
23 #include "net/vhost_net.h"
24 #include "hw/virtio/virtio-bus.h"
25 #include "qapi/qmp/qjson.h"
26 #include "qapi-event.h"
27 #include "hw/virtio/virtio-access.h"
29 #define VIRTIO_NET_VM_VERSION 11
31 #define MAC_TABLE_ENTRIES 64
32 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
34 /* previously fixed value */
35 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
36 /* for now, only allow larger queues; with virtio-1, guest can downsize */
37 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
40 * Calculate the number of bytes up to and including the given 'field' of
43 #define endof(container, field) \
44 (offsetof(container, field) + sizeof(((container *)0)->field))
46 typedef struct VirtIOFeature
{
51 static VirtIOFeature feature_sizes
[] = {
52 {.flags
= 1 << VIRTIO_NET_F_MAC
,
53 .end
= endof(struct virtio_net_config
, mac
)},
54 {.flags
= 1 << VIRTIO_NET_F_STATUS
,
55 .end
= endof(struct virtio_net_config
, status
)},
56 {.flags
= 1 << VIRTIO_NET_F_MQ
,
57 .end
= endof(struct virtio_net_config
, max_virtqueue_pairs
)},
61 static VirtIONetQueue
*virtio_net_get_subqueue(NetClientState
*nc
)
63 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
65 return &n
->vqs
[nc
->queue_index
];
68 static int vq2q(int queue_index
)
70 return queue_index
/ 2;
74 * - we could suppress RX interrupt if we were so inclined.
77 static void virtio_net_get_config(VirtIODevice
*vdev
, uint8_t *config
)
79 VirtIONet
*n
= VIRTIO_NET(vdev
);
80 struct virtio_net_config netcfg
;
82 virtio_stw_p(vdev
, &netcfg
.status
, n
->status
);
83 virtio_stw_p(vdev
, &netcfg
.max_virtqueue_pairs
, n
->max_queues
);
84 memcpy(netcfg
.mac
, n
->mac
, ETH_ALEN
);
85 memcpy(config
, &netcfg
, n
->config_size
);
88 static void virtio_net_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
90 VirtIONet
*n
= VIRTIO_NET(vdev
);
91 struct virtio_net_config netcfg
= {};
93 memcpy(&netcfg
, config
, n
->config_size
);
95 if (!virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_MAC_ADDR
) &&
96 !virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
) &&
97 memcmp(netcfg
.mac
, n
->mac
, ETH_ALEN
)) {
98 memcpy(n
->mac
, netcfg
.mac
, ETH_ALEN
);
99 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
103 static bool virtio_net_started(VirtIONet
*n
, uint8_t status
)
105 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
106 return (status
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
107 (n
->status
& VIRTIO_NET_S_LINK_UP
) && vdev
->vm_running
;
110 static void virtio_net_announce_timer(void *opaque
)
112 VirtIONet
*n
= opaque
;
113 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
115 n
->announce_counter
--;
116 n
->status
|= VIRTIO_NET_S_ANNOUNCE
;
117 virtio_notify_config(vdev
);
120 static void virtio_net_vhost_status(VirtIONet
*n
, uint8_t status
)
122 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
123 NetClientState
*nc
= qemu_get_queue(n
->nic
);
124 int queues
= n
->multiqueue
? n
->max_queues
: 1;
126 if (!get_vhost_net(nc
->peer
)) {
130 if ((virtio_net_started(n
, status
) && !nc
->peer
->link_down
) ==
131 !!n
->vhost_started
) {
134 if (!n
->vhost_started
) {
137 if (n
->needs_vnet_hdr_swap
) {
138 error_report("backend does not support %s vnet headers; "
139 "falling back on userspace virtio",
140 virtio_is_big_endian(vdev
) ? "BE" : "LE");
144 /* Any packets outstanding? Purge them to avoid touching rings
145 * when vhost is running.
147 for (i
= 0; i
< queues
; i
++) {
148 NetClientState
*qnc
= qemu_get_subqueue(n
->nic
, i
);
150 /* Purge both directions: TX and RX. */
151 qemu_net_queue_purge(qnc
->peer
->incoming_queue
, qnc
);
152 qemu_net_queue_purge(qnc
->incoming_queue
, qnc
->peer
);
155 n
->vhost_started
= 1;
156 r
= vhost_net_start(vdev
, n
->nic
->ncs
, queues
);
158 error_report("unable to start vhost net: %d: "
159 "falling back on userspace virtio", -r
);
160 n
->vhost_started
= 0;
163 vhost_net_stop(vdev
, n
->nic
->ncs
, queues
);
164 n
->vhost_started
= 0;
168 static int virtio_net_set_vnet_endian_one(VirtIODevice
*vdev
,
169 NetClientState
*peer
,
172 if (virtio_is_big_endian(vdev
)) {
173 return qemu_set_vnet_be(peer
, enable
);
175 return qemu_set_vnet_le(peer
, enable
);
179 static bool virtio_net_set_vnet_endian(VirtIODevice
*vdev
, NetClientState
*ncs
,
180 int queues
, bool enable
)
184 for (i
= 0; i
< queues
; i
++) {
185 if (virtio_net_set_vnet_endian_one(vdev
, ncs
[i
].peer
, enable
) < 0 &&
188 virtio_net_set_vnet_endian_one(vdev
, ncs
[i
].peer
, false);
198 static void virtio_net_vnet_endian_status(VirtIONet
*n
, uint8_t status
)
200 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
201 int queues
= n
->multiqueue
? n
->max_queues
: 1;
203 if (virtio_net_started(n
, status
)) {
204 /* Before using the device, we tell the network backend about the
205 * endianness to use when parsing vnet headers. If the backend
206 * can't do it, we fallback onto fixing the headers in the core
209 n
->needs_vnet_hdr_swap
= virtio_net_set_vnet_endian(vdev
, n
->nic
->ncs
,
211 } else if (virtio_net_started(n
, vdev
->status
)) {
212 /* After using the device, we need to reset the network backend to
213 * the default (guest native endianness), otherwise the guest may
214 * lose network connectivity if it is rebooted into a different
217 virtio_net_set_vnet_endian(vdev
, n
->nic
->ncs
, queues
, false);
221 static void virtio_net_drop_tx_queue_data(VirtIODevice
*vdev
, VirtQueue
*vq
)
223 unsigned int dropped
= virtqueue_drop_all(vq
);
225 virtio_notify(vdev
, vq
);
229 static void virtio_net_set_status(struct VirtIODevice
*vdev
, uint8_t status
)
231 VirtIONet
*n
= VIRTIO_NET(vdev
);
234 uint8_t queue_status
;
236 virtio_net_vnet_endian_status(n
, status
);
237 virtio_net_vhost_status(n
, status
);
239 for (i
= 0; i
< n
->max_queues
; i
++) {
240 NetClientState
*ncs
= qemu_get_subqueue(n
->nic
, i
);
244 if ((!n
->multiqueue
&& i
!= 0) || i
>= n
->curr_queues
) {
247 queue_status
= status
;
250 virtio_net_started(n
, queue_status
) && !n
->vhost_started
;
253 qemu_flush_queued_packets(ncs
);
256 if (!q
->tx_waiting
) {
262 timer_mod(q
->tx_timer
,
263 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + n
->tx_timeout
);
265 qemu_bh_schedule(q
->tx_bh
);
269 timer_del(q
->tx_timer
);
271 qemu_bh_cancel(q
->tx_bh
);
273 if ((n
->status
& VIRTIO_NET_S_LINK_UP
) == 0 &&
274 (queue_status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
275 /* if tx is waiting we are likely have some packets in tx queue
276 * and disabled notification */
278 virtio_queue_set_notification(q
->tx_vq
, 1);
279 virtio_net_drop_tx_queue_data(vdev
, q
->tx_vq
);
285 static void virtio_net_set_link_status(NetClientState
*nc
)
287 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
288 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
289 uint16_t old_status
= n
->status
;
292 n
->status
&= ~VIRTIO_NET_S_LINK_UP
;
294 n
->status
|= VIRTIO_NET_S_LINK_UP
;
296 if (n
->status
!= old_status
)
297 virtio_notify_config(vdev
);
299 virtio_net_set_status(vdev
, vdev
->status
);
302 static void rxfilter_notify(NetClientState
*nc
)
304 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
306 if (nc
->rxfilter_notify_enabled
) {
307 gchar
*path
= object_get_canonical_path(OBJECT(n
->qdev
));
308 qapi_event_send_nic_rx_filter_changed(!!n
->netclient_name
,
309 n
->netclient_name
, path
, &error_abort
);
312 /* disable event notification to avoid events flooding */
313 nc
->rxfilter_notify_enabled
= 0;
317 static intList
*get_vlan_table(VirtIONet
*n
)
319 intList
*list
, *entry
;
323 for (i
= 0; i
< MAX_VLAN
>> 5; i
++) {
324 for (j
= 0; n
->vlans
[i
] && j
<= 0x1f; j
++) {
325 if (n
->vlans
[i
] & (1U << j
)) {
326 entry
= g_malloc0(sizeof(*entry
));
327 entry
->value
= (i
<< 5) + j
;
337 static RxFilterInfo
*virtio_net_query_rxfilter(NetClientState
*nc
)
339 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
340 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
342 strList
*str_list
, *entry
;
345 info
= g_malloc0(sizeof(*info
));
346 info
->name
= g_strdup(nc
->name
);
347 info
->promiscuous
= n
->promisc
;
350 info
->unicast
= RX_STATE_NONE
;
351 } else if (n
->alluni
) {
352 info
->unicast
= RX_STATE_ALL
;
354 info
->unicast
= RX_STATE_NORMAL
;
358 info
->multicast
= RX_STATE_NONE
;
359 } else if (n
->allmulti
) {
360 info
->multicast
= RX_STATE_ALL
;
362 info
->multicast
= RX_STATE_NORMAL
;
365 info
->broadcast_allowed
= n
->nobcast
;
366 info
->multicast_overflow
= n
->mac_table
.multi_overflow
;
367 info
->unicast_overflow
= n
->mac_table
.uni_overflow
;
369 info
->main_mac
= qemu_mac_strdup_printf(n
->mac
);
372 for (i
= 0; i
< n
->mac_table
.first_multi
; i
++) {
373 entry
= g_malloc0(sizeof(*entry
));
374 entry
->value
= qemu_mac_strdup_printf(n
->mac_table
.macs
+ i
* ETH_ALEN
);
375 entry
->next
= str_list
;
378 info
->unicast_table
= str_list
;
381 for (i
= n
->mac_table
.first_multi
; i
< n
->mac_table
.in_use
; i
++) {
382 entry
= g_malloc0(sizeof(*entry
));
383 entry
->value
= qemu_mac_strdup_printf(n
->mac_table
.macs
+ i
* ETH_ALEN
);
384 entry
->next
= str_list
;
387 info
->multicast_table
= str_list
;
388 info
->vlan_table
= get_vlan_table(n
);
390 if (!virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_VLAN
)) {
391 info
->vlan
= RX_STATE_ALL
;
392 } else if (!info
->vlan_table
) {
393 info
->vlan
= RX_STATE_NONE
;
395 info
->vlan
= RX_STATE_NORMAL
;
398 /* enable event notification after query */
399 nc
->rxfilter_notify_enabled
= 1;
404 static void virtio_net_reset(VirtIODevice
*vdev
)
406 VirtIONet
*n
= VIRTIO_NET(vdev
);
408 /* Reset back to compatibility mode */
415 /* multiqueue is disabled by default */
417 timer_del(n
->announce_timer
);
418 n
->announce_counter
= 0;
419 n
->status
&= ~VIRTIO_NET_S_ANNOUNCE
;
421 /* Flush any MAC and VLAN filter table state */
422 n
->mac_table
.in_use
= 0;
423 n
->mac_table
.first_multi
= 0;
424 n
->mac_table
.multi_overflow
= 0;
425 n
->mac_table
.uni_overflow
= 0;
426 memset(n
->mac_table
.macs
, 0, MAC_TABLE_ENTRIES
* ETH_ALEN
);
427 memcpy(&n
->mac
[0], &n
->nic
->conf
->macaddr
, sizeof(n
->mac
));
428 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
429 memset(n
->vlans
, 0, MAX_VLAN
>> 3);
432 static void peer_test_vnet_hdr(VirtIONet
*n
)
434 NetClientState
*nc
= qemu_get_queue(n
->nic
);
439 n
->has_vnet_hdr
= qemu_has_vnet_hdr(nc
->peer
);
442 static int peer_has_vnet_hdr(VirtIONet
*n
)
444 return n
->has_vnet_hdr
;
447 static int peer_has_ufo(VirtIONet
*n
)
449 if (!peer_has_vnet_hdr(n
))
452 n
->has_ufo
= qemu_has_ufo(qemu_get_queue(n
->nic
)->peer
);
457 static void virtio_net_set_mrg_rx_bufs(VirtIONet
*n
, int mergeable_rx_bufs
,
463 n
->mergeable_rx_bufs
= mergeable_rx_bufs
;
466 n
->guest_hdr_len
= sizeof(struct virtio_net_hdr_mrg_rxbuf
);
468 n
->guest_hdr_len
= n
->mergeable_rx_bufs
?
469 sizeof(struct virtio_net_hdr_mrg_rxbuf
) :
470 sizeof(struct virtio_net_hdr
);
473 for (i
= 0; i
< n
->max_queues
; i
++) {
474 nc
= qemu_get_subqueue(n
->nic
, i
);
476 if (peer_has_vnet_hdr(n
) &&
477 qemu_has_vnet_hdr_len(nc
->peer
, n
->guest_hdr_len
)) {
478 qemu_set_vnet_hdr_len(nc
->peer
, n
->guest_hdr_len
);
479 n
->host_hdr_len
= n
->guest_hdr_len
;
484 static int peer_attach(VirtIONet
*n
, int index
)
486 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
492 if (nc
->peer
->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
) {
493 vhost_set_vring_enable(nc
->peer
, 1);
496 if (nc
->peer
->info
->type
!= NET_CLIENT_DRIVER_TAP
) {
500 return tap_enable(nc
->peer
);
503 static int peer_detach(VirtIONet
*n
, int index
)
505 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
511 if (nc
->peer
->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
) {
512 vhost_set_vring_enable(nc
->peer
, 0);
515 if (nc
->peer
->info
->type
!= NET_CLIENT_DRIVER_TAP
) {
519 return tap_disable(nc
->peer
);
522 static void virtio_net_set_queues(VirtIONet
*n
)
527 if (n
->nic
->peer_deleted
) {
531 for (i
= 0; i
< n
->max_queues
; i
++) {
532 if (i
< n
->curr_queues
) {
533 r
= peer_attach(n
, i
);
536 r
= peer_detach(n
, i
);
542 static void virtio_net_set_multiqueue(VirtIONet
*n
, int multiqueue
);
544 static uint64_t virtio_net_get_features(VirtIODevice
*vdev
, uint64_t features
,
547 VirtIONet
*n
= VIRTIO_NET(vdev
);
548 NetClientState
*nc
= qemu_get_queue(n
->nic
);
550 /* Firstly sync all virtio-net possible supported features */
551 features
|= n
->host_features
;
553 virtio_add_feature(&features
, VIRTIO_NET_F_MAC
);
555 if (!peer_has_vnet_hdr(n
)) {
556 virtio_clear_feature(&features
, VIRTIO_NET_F_CSUM
);
557 virtio_clear_feature(&features
, VIRTIO_NET_F_HOST_TSO4
);
558 virtio_clear_feature(&features
, VIRTIO_NET_F_HOST_TSO6
);
559 virtio_clear_feature(&features
, VIRTIO_NET_F_HOST_ECN
);
561 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_CSUM
);
562 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_TSO4
);
563 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_TSO6
);
564 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_ECN
);
567 if (!peer_has_vnet_hdr(n
) || !peer_has_ufo(n
)) {
568 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_UFO
);
569 virtio_clear_feature(&features
, VIRTIO_NET_F_HOST_UFO
);
572 if (!get_vhost_net(nc
->peer
)) {
575 return vhost_net_get_features(get_vhost_net(nc
->peer
), features
);
578 static uint64_t virtio_net_bad_features(VirtIODevice
*vdev
)
580 uint64_t features
= 0;
582 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
584 virtio_add_feature(&features
, VIRTIO_NET_F_MAC
);
585 virtio_add_feature(&features
, VIRTIO_NET_F_CSUM
);
586 virtio_add_feature(&features
, VIRTIO_NET_F_HOST_TSO4
);
587 virtio_add_feature(&features
, VIRTIO_NET_F_HOST_TSO6
);
588 virtio_add_feature(&features
, VIRTIO_NET_F_HOST_ECN
);
593 static void virtio_net_apply_guest_offloads(VirtIONet
*n
)
595 qemu_set_offload(qemu_get_queue(n
->nic
)->peer
,
596 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_CSUM
)),
597 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_TSO4
)),
598 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_TSO6
)),
599 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_ECN
)),
600 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_UFO
)));
603 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features
)
605 static const uint64_t guest_offloads_mask
=
606 (1ULL << VIRTIO_NET_F_GUEST_CSUM
) |
607 (1ULL << VIRTIO_NET_F_GUEST_TSO4
) |
608 (1ULL << VIRTIO_NET_F_GUEST_TSO6
) |
609 (1ULL << VIRTIO_NET_F_GUEST_ECN
) |
610 (1ULL << VIRTIO_NET_F_GUEST_UFO
);
612 return guest_offloads_mask
& features
;
615 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet
*n
)
617 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
618 return virtio_net_guest_offloads_by_features(vdev
->guest_features
);
621 static void virtio_net_set_features(VirtIODevice
*vdev
, uint64_t features
)
623 VirtIONet
*n
= VIRTIO_NET(vdev
);
626 virtio_net_set_multiqueue(n
,
627 virtio_has_feature(features
, VIRTIO_NET_F_MQ
));
629 virtio_net_set_mrg_rx_bufs(n
,
630 virtio_has_feature(features
,
631 VIRTIO_NET_F_MRG_RXBUF
),
632 virtio_has_feature(features
,
633 VIRTIO_F_VERSION_1
));
635 if (n
->has_vnet_hdr
) {
636 n
->curr_guest_offloads
=
637 virtio_net_guest_offloads_by_features(features
);
638 virtio_net_apply_guest_offloads(n
);
641 for (i
= 0; i
< n
->max_queues
; i
++) {
642 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, i
);
644 if (!get_vhost_net(nc
->peer
)) {
647 vhost_net_ack_features(get_vhost_net(nc
->peer
), features
);
650 if (virtio_has_feature(features
, VIRTIO_NET_F_CTRL_VLAN
)) {
651 memset(n
->vlans
, 0, MAX_VLAN
>> 3);
653 memset(n
->vlans
, 0xff, MAX_VLAN
>> 3);
657 static int virtio_net_handle_rx_mode(VirtIONet
*n
, uint8_t cmd
,
658 struct iovec
*iov
, unsigned int iov_cnt
)
662 NetClientState
*nc
= qemu_get_queue(n
->nic
);
664 s
= iov_to_buf(iov
, iov_cnt
, 0, &on
, sizeof(on
));
665 if (s
!= sizeof(on
)) {
666 return VIRTIO_NET_ERR
;
669 if (cmd
== VIRTIO_NET_CTRL_RX_PROMISC
) {
671 } else if (cmd
== VIRTIO_NET_CTRL_RX_ALLMULTI
) {
673 } else if (cmd
== VIRTIO_NET_CTRL_RX_ALLUNI
) {
675 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOMULTI
) {
677 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOUNI
) {
679 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOBCAST
) {
682 return VIRTIO_NET_ERR
;
687 return VIRTIO_NET_OK
;
690 static int virtio_net_handle_offloads(VirtIONet
*n
, uint8_t cmd
,
691 struct iovec
*iov
, unsigned int iov_cnt
)
693 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
697 if (!virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
)) {
698 return VIRTIO_NET_ERR
;
701 s
= iov_to_buf(iov
, iov_cnt
, 0, &offloads
, sizeof(offloads
));
702 if (s
!= sizeof(offloads
)) {
703 return VIRTIO_NET_ERR
;
706 if (cmd
== VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET
) {
707 uint64_t supported_offloads
;
709 if (!n
->has_vnet_hdr
) {
710 return VIRTIO_NET_ERR
;
713 supported_offloads
= virtio_net_supported_guest_offloads(n
);
714 if (offloads
& ~supported_offloads
) {
715 return VIRTIO_NET_ERR
;
718 n
->curr_guest_offloads
= offloads
;
719 virtio_net_apply_guest_offloads(n
);
721 return VIRTIO_NET_OK
;
723 return VIRTIO_NET_ERR
;
727 static int virtio_net_handle_mac(VirtIONet
*n
, uint8_t cmd
,
728 struct iovec
*iov
, unsigned int iov_cnt
)
730 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
731 struct virtio_net_ctrl_mac mac_data
;
733 NetClientState
*nc
= qemu_get_queue(n
->nic
);
735 if (cmd
== VIRTIO_NET_CTRL_MAC_ADDR_SET
) {
736 if (iov_size(iov
, iov_cnt
) != sizeof(n
->mac
)) {
737 return VIRTIO_NET_ERR
;
739 s
= iov_to_buf(iov
, iov_cnt
, 0, &n
->mac
, sizeof(n
->mac
));
740 assert(s
== sizeof(n
->mac
));
741 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
744 return VIRTIO_NET_OK
;
747 if (cmd
!= VIRTIO_NET_CTRL_MAC_TABLE_SET
) {
748 return VIRTIO_NET_ERR
;
753 uint8_t uni_overflow
= 0;
754 uint8_t multi_overflow
= 0;
755 uint8_t *macs
= g_malloc0(MAC_TABLE_ENTRIES
* ETH_ALEN
);
757 s
= iov_to_buf(iov
, iov_cnt
, 0, &mac_data
.entries
,
758 sizeof(mac_data
.entries
));
759 mac_data
.entries
= virtio_ldl_p(vdev
, &mac_data
.entries
);
760 if (s
!= sizeof(mac_data
.entries
)) {
763 iov_discard_front(&iov
, &iov_cnt
, s
);
765 if (mac_data
.entries
* ETH_ALEN
> iov_size(iov
, iov_cnt
)) {
769 if (mac_data
.entries
<= MAC_TABLE_ENTRIES
) {
770 s
= iov_to_buf(iov
, iov_cnt
, 0, macs
,
771 mac_data
.entries
* ETH_ALEN
);
772 if (s
!= mac_data
.entries
* ETH_ALEN
) {
775 in_use
+= mac_data
.entries
;
780 iov_discard_front(&iov
, &iov_cnt
, mac_data
.entries
* ETH_ALEN
);
782 first_multi
= in_use
;
784 s
= iov_to_buf(iov
, iov_cnt
, 0, &mac_data
.entries
,
785 sizeof(mac_data
.entries
));
786 mac_data
.entries
= virtio_ldl_p(vdev
, &mac_data
.entries
);
787 if (s
!= sizeof(mac_data
.entries
)) {
791 iov_discard_front(&iov
, &iov_cnt
, s
);
793 if (mac_data
.entries
* ETH_ALEN
!= iov_size(iov
, iov_cnt
)) {
797 if (mac_data
.entries
<= MAC_TABLE_ENTRIES
- in_use
) {
798 s
= iov_to_buf(iov
, iov_cnt
, 0, &macs
[in_use
* ETH_ALEN
],
799 mac_data
.entries
* ETH_ALEN
);
800 if (s
!= mac_data
.entries
* ETH_ALEN
) {
803 in_use
+= mac_data
.entries
;
808 n
->mac_table
.in_use
= in_use
;
809 n
->mac_table
.first_multi
= first_multi
;
810 n
->mac_table
.uni_overflow
= uni_overflow
;
811 n
->mac_table
.multi_overflow
= multi_overflow
;
812 memcpy(n
->mac_table
.macs
, macs
, MAC_TABLE_ENTRIES
* ETH_ALEN
);
816 return VIRTIO_NET_OK
;
820 return VIRTIO_NET_ERR
;
823 static int virtio_net_handle_vlan_table(VirtIONet
*n
, uint8_t cmd
,
824 struct iovec
*iov
, unsigned int iov_cnt
)
826 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
829 NetClientState
*nc
= qemu_get_queue(n
->nic
);
831 s
= iov_to_buf(iov
, iov_cnt
, 0, &vid
, sizeof(vid
));
832 vid
= virtio_lduw_p(vdev
, &vid
);
833 if (s
!= sizeof(vid
)) {
834 return VIRTIO_NET_ERR
;
838 return VIRTIO_NET_ERR
;
840 if (cmd
== VIRTIO_NET_CTRL_VLAN_ADD
)
841 n
->vlans
[vid
>> 5] |= (1U << (vid
& 0x1f));
842 else if (cmd
== VIRTIO_NET_CTRL_VLAN_DEL
)
843 n
->vlans
[vid
>> 5] &= ~(1U << (vid
& 0x1f));
845 return VIRTIO_NET_ERR
;
849 return VIRTIO_NET_OK
;
852 static int virtio_net_handle_announce(VirtIONet
*n
, uint8_t cmd
,
853 struct iovec
*iov
, unsigned int iov_cnt
)
855 if (cmd
== VIRTIO_NET_CTRL_ANNOUNCE_ACK
&&
856 n
->status
& VIRTIO_NET_S_ANNOUNCE
) {
857 n
->status
&= ~VIRTIO_NET_S_ANNOUNCE
;
858 if (n
->announce_counter
) {
859 timer_mod(n
->announce_timer
,
860 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) +
861 self_announce_delay(n
->announce_counter
));
863 return VIRTIO_NET_OK
;
865 return VIRTIO_NET_ERR
;
869 static int virtio_net_handle_mq(VirtIONet
*n
, uint8_t cmd
,
870 struct iovec
*iov
, unsigned int iov_cnt
)
872 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
873 struct virtio_net_ctrl_mq mq
;
877 s
= iov_to_buf(iov
, iov_cnt
, 0, &mq
, sizeof(mq
));
878 if (s
!= sizeof(mq
)) {
879 return VIRTIO_NET_ERR
;
882 if (cmd
!= VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
) {
883 return VIRTIO_NET_ERR
;
886 queues
= virtio_lduw_p(vdev
, &mq
.virtqueue_pairs
);
888 if (queues
< VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN
||
889 queues
> VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX
||
890 queues
> n
->max_queues
||
892 return VIRTIO_NET_ERR
;
895 n
->curr_queues
= queues
;
896 /* stop the backend before changing the number of queues to avoid handling a
898 virtio_net_set_status(vdev
, vdev
->status
);
899 virtio_net_set_queues(n
);
901 return VIRTIO_NET_OK
;
904 static void virtio_net_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
906 VirtIONet
*n
= VIRTIO_NET(vdev
);
907 struct virtio_net_ctrl_hdr ctrl
;
908 virtio_net_ctrl_ack status
= VIRTIO_NET_ERR
;
909 VirtQueueElement
*elem
;
911 struct iovec
*iov
, *iov2
;
912 unsigned int iov_cnt
;
915 elem
= virtqueue_pop(vq
, sizeof(VirtQueueElement
));
919 if (iov_size(elem
->in_sg
, elem
->in_num
) < sizeof(status
) ||
920 iov_size(elem
->out_sg
, elem
->out_num
) < sizeof(ctrl
)) {
921 virtio_error(vdev
, "virtio-net ctrl missing headers");
922 virtqueue_detach_element(vq
, elem
, 0);
927 iov_cnt
= elem
->out_num
;
928 iov2
= iov
= g_memdup(elem
->out_sg
, sizeof(struct iovec
) * elem
->out_num
);
929 s
= iov_to_buf(iov
, iov_cnt
, 0, &ctrl
, sizeof(ctrl
));
930 iov_discard_front(&iov
, &iov_cnt
, sizeof(ctrl
));
931 if (s
!= sizeof(ctrl
)) {
932 status
= VIRTIO_NET_ERR
;
933 } else if (ctrl
.class == VIRTIO_NET_CTRL_RX
) {
934 status
= virtio_net_handle_rx_mode(n
, ctrl
.cmd
, iov
, iov_cnt
);
935 } else if (ctrl
.class == VIRTIO_NET_CTRL_MAC
) {
936 status
= virtio_net_handle_mac(n
, ctrl
.cmd
, iov
, iov_cnt
);
937 } else if (ctrl
.class == VIRTIO_NET_CTRL_VLAN
) {
938 status
= virtio_net_handle_vlan_table(n
, ctrl
.cmd
, iov
, iov_cnt
);
939 } else if (ctrl
.class == VIRTIO_NET_CTRL_ANNOUNCE
) {
940 status
= virtio_net_handle_announce(n
, ctrl
.cmd
, iov
, iov_cnt
);
941 } else if (ctrl
.class == VIRTIO_NET_CTRL_MQ
) {
942 status
= virtio_net_handle_mq(n
, ctrl
.cmd
, iov
, iov_cnt
);
943 } else if (ctrl
.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS
) {
944 status
= virtio_net_handle_offloads(n
, ctrl
.cmd
, iov
, iov_cnt
);
947 s
= iov_from_buf(elem
->in_sg
, elem
->in_num
, 0, &status
, sizeof(status
));
948 assert(s
== sizeof(status
));
950 virtqueue_push(vq
, elem
, sizeof(status
));
951 virtio_notify(vdev
, vq
);
959 static void virtio_net_handle_rx(VirtIODevice
*vdev
, VirtQueue
*vq
)
961 VirtIONet
*n
= VIRTIO_NET(vdev
);
962 int queue_index
= vq2q(virtio_get_queue_index(vq
));
964 qemu_flush_queued_packets(qemu_get_subqueue(n
->nic
, queue_index
));
967 static int virtio_net_can_receive(NetClientState
*nc
)
969 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
970 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
971 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
973 if (!vdev
->vm_running
) {
977 if (nc
->queue_index
>= n
->curr_queues
) {
981 if (!virtio_queue_ready(q
->rx_vq
) ||
982 !(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
989 static int virtio_net_has_buffers(VirtIONetQueue
*q
, int bufsize
)
992 if (virtio_queue_empty(q
->rx_vq
) ||
993 (n
->mergeable_rx_bufs
&&
994 !virtqueue_avail_bytes(q
->rx_vq
, bufsize
, 0))) {
995 virtio_queue_set_notification(q
->rx_vq
, 1);
997 /* To avoid a race condition where the guest has made some buffers
998 * available after the above check but before notification was
999 * enabled, check for available buffers again.
1001 if (virtio_queue_empty(q
->rx_vq
) ||
1002 (n
->mergeable_rx_bufs
&&
1003 !virtqueue_avail_bytes(q
->rx_vq
, bufsize
, 0))) {
1008 virtio_queue_set_notification(q
->rx_vq
, 0);
1012 static void virtio_net_hdr_swap(VirtIODevice
*vdev
, struct virtio_net_hdr
*hdr
)
1014 virtio_tswap16s(vdev
, &hdr
->hdr_len
);
1015 virtio_tswap16s(vdev
, &hdr
->gso_size
);
1016 virtio_tswap16s(vdev
, &hdr
->csum_start
);
1017 virtio_tswap16s(vdev
, &hdr
->csum_offset
);
1020 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
1021 * it never finds out that the packets don't have valid checksums. This
1022 * causes dhclient to get upset. Fedora's carried a patch for ages to
1023 * fix this with Xen but it hasn't appeared in an upstream release of
1026 * To avoid breaking existing guests, we catch udp packets and add
1027 * checksums. This is terrible but it's better than hacking the guest
1030 * N.B. if we introduce a zero-copy API, this operation is no longer free so
1031 * we should provide a mechanism to disable it to avoid polluting the host
1034 static void work_around_broken_dhclient(struct virtio_net_hdr
*hdr
,
1035 uint8_t *buf
, size_t size
)
1037 if ((hdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) && /* missing csum */
1038 (size
> 27 && size
< 1500) && /* normal sized MTU */
1039 (buf
[12] == 0x08 && buf
[13] == 0x00) && /* ethertype == IPv4 */
1040 (buf
[23] == 17) && /* ip.protocol == UDP */
1041 (buf
[34] == 0 && buf
[35] == 67)) { /* udp.srcport == bootps */
1042 net_checksum_calculate(buf
, size
);
1043 hdr
->flags
&= ~VIRTIO_NET_HDR_F_NEEDS_CSUM
;
1047 static void receive_header(VirtIONet
*n
, const struct iovec
*iov
, int iov_cnt
,
1048 const void *buf
, size_t size
)
1050 if (n
->has_vnet_hdr
) {
1051 /* FIXME this cast is evil */
1052 void *wbuf
= (void *)buf
;
1053 work_around_broken_dhclient(wbuf
, wbuf
+ n
->host_hdr_len
,
1054 size
- n
->host_hdr_len
);
1056 if (n
->needs_vnet_hdr_swap
) {
1057 virtio_net_hdr_swap(VIRTIO_DEVICE(n
), wbuf
);
1059 iov_from_buf(iov
, iov_cnt
, 0, buf
, sizeof(struct virtio_net_hdr
));
1061 struct virtio_net_hdr hdr
= {
1063 .gso_type
= VIRTIO_NET_HDR_GSO_NONE
1065 iov_from_buf(iov
, iov_cnt
, 0, &hdr
, sizeof hdr
);
1069 static int receive_filter(VirtIONet
*n
, const uint8_t *buf
, int size
)
1071 static const uint8_t bcast
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1072 static const uint8_t vlan
[] = {0x81, 0x00};
1073 uint8_t *ptr
= (uint8_t *)buf
;
1079 ptr
+= n
->host_hdr_len
;
1081 if (!memcmp(&ptr
[12], vlan
, sizeof(vlan
))) {
1082 int vid
= lduw_be_p(ptr
+ 14) & 0xfff;
1083 if (!(n
->vlans
[vid
>> 5] & (1U << (vid
& 0x1f))))
1087 if (ptr
[0] & 1) { // multicast
1088 if (!memcmp(ptr
, bcast
, sizeof(bcast
))) {
1090 } else if (n
->nomulti
) {
1092 } else if (n
->allmulti
|| n
->mac_table
.multi_overflow
) {
1096 for (i
= n
->mac_table
.first_multi
; i
< n
->mac_table
.in_use
; i
++) {
1097 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
1104 } else if (n
->alluni
|| n
->mac_table
.uni_overflow
) {
1106 } else if (!memcmp(ptr
, n
->mac
, ETH_ALEN
)) {
1110 for (i
= 0; i
< n
->mac_table
.first_multi
; i
++) {
1111 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
1120 static ssize_t
virtio_net_receive(NetClientState
*nc
, const uint8_t *buf
, size_t size
)
1122 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
1123 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
1124 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1125 struct iovec mhdr_sg
[VIRTQUEUE_MAX_SIZE
];
1126 struct virtio_net_hdr_mrg_rxbuf mhdr
;
1127 unsigned mhdr_cnt
= 0;
1128 size_t offset
, i
, guest_offset
;
1130 if (!virtio_net_can_receive(nc
)) {
1134 /* hdr_len refers to the header we supply to the guest */
1135 if (!virtio_net_has_buffers(q
, size
+ n
->guest_hdr_len
- n
->host_hdr_len
)) {
1139 if (!receive_filter(n
, buf
, size
))
1144 while (offset
< size
) {
1145 VirtQueueElement
*elem
;
1147 const struct iovec
*sg
;
1151 elem
= virtqueue_pop(q
->rx_vq
, sizeof(VirtQueueElement
));
1154 virtio_error(vdev
, "virtio-net unexpected empty queue: "
1155 "i %zd mergeable %d offset %zd, size %zd, "
1156 "guest hdr len %zd, host hdr len %zd "
1157 "guest features 0x%" PRIx64
,
1158 i
, n
->mergeable_rx_bufs
, offset
, size
,
1159 n
->guest_hdr_len
, n
->host_hdr_len
,
1160 vdev
->guest_features
);
1165 if (elem
->in_num
< 1) {
1167 "virtio-net receive queue contains no in buffers");
1168 virtqueue_detach_element(q
->rx_vq
, elem
, 0);
1175 assert(offset
== 0);
1176 if (n
->mergeable_rx_bufs
) {
1177 mhdr_cnt
= iov_copy(mhdr_sg
, ARRAY_SIZE(mhdr_sg
),
1179 offsetof(typeof(mhdr
), num_buffers
),
1180 sizeof(mhdr
.num_buffers
));
1183 receive_header(n
, sg
, elem
->in_num
, buf
, size
);
1184 offset
= n
->host_hdr_len
;
1185 total
+= n
->guest_hdr_len
;
1186 guest_offset
= n
->guest_hdr_len
;
1191 /* copy in packet. ugh */
1192 len
= iov_from_buf(sg
, elem
->in_num
, guest_offset
,
1193 buf
+ offset
, size
- offset
);
1196 /* If buffers can't be merged, at this point we
1197 * must have consumed the complete packet.
1198 * Otherwise, drop it. */
1199 if (!n
->mergeable_rx_bufs
&& offset
< size
) {
1200 virtqueue_unpop(q
->rx_vq
, elem
, total
);
1205 /* signal other side */
1206 virtqueue_fill(q
->rx_vq
, elem
, total
, i
++);
1211 virtio_stw_p(vdev
, &mhdr
.num_buffers
, i
);
1212 iov_from_buf(mhdr_sg
, mhdr_cnt
,
1214 &mhdr
.num_buffers
, sizeof mhdr
.num_buffers
);
1217 virtqueue_flush(q
->rx_vq
, i
);
1218 virtio_notify(vdev
, q
->rx_vq
);
1223 static int32_t virtio_net_flush_tx(VirtIONetQueue
*q
);
1225 static void virtio_net_tx_complete(NetClientState
*nc
, ssize_t len
)
1227 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
1228 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
1229 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1231 virtqueue_push(q
->tx_vq
, q
->async_tx
.elem
, 0);
1232 virtio_notify(vdev
, q
->tx_vq
);
1234 g_free(q
->async_tx
.elem
);
1235 q
->async_tx
.elem
= NULL
;
1237 virtio_queue_set_notification(q
->tx_vq
, 1);
1238 virtio_net_flush_tx(q
);
1242 static int32_t virtio_net_flush_tx(VirtIONetQueue
*q
)
1244 VirtIONet
*n
= q
->n
;
1245 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1246 VirtQueueElement
*elem
;
1247 int32_t num_packets
= 0;
1248 int queue_index
= vq2q(virtio_get_queue_index(q
->tx_vq
));
1249 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1253 if (q
->async_tx
.elem
) {
1254 virtio_queue_set_notification(q
->tx_vq
, 0);
1260 unsigned int out_num
;
1261 struct iovec sg
[VIRTQUEUE_MAX_SIZE
], sg2
[VIRTQUEUE_MAX_SIZE
+ 1], *out_sg
;
1262 struct virtio_net_hdr_mrg_rxbuf mhdr
;
1264 elem
= virtqueue_pop(q
->tx_vq
, sizeof(VirtQueueElement
));
1269 out_num
= elem
->out_num
;
1270 out_sg
= elem
->out_sg
;
1272 virtio_error(vdev
, "virtio-net header not in first element");
1273 virtqueue_detach_element(q
->tx_vq
, elem
, 0);
1278 if (n
->has_vnet_hdr
) {
1279 if (iov_to_buf(out_sg
, out_num
, 0, &mhdr
, n
->guest_hdr_len
) <
1281 virtio_error(vdev
, "virtio-net header incorrect");
1282 virtqueue_detach_element(q
->tx_vq
, elem
, 0);
1286 if (n
->needs_vnet_hdr_swap
) {
1287 virtio_net_hdr_swap(vdev
, (void *) &mhdr
);
1288 sg2
[0].iov_base
= &mhdr
;
1289 sg2
[0].iov_len
= n
->guest_hdr_len
;
1290 out_num
= iov_copy(&sg2
[1], ARRAY_SIZE(sg2
) - 1,
1292 n
->guest_hdr_len
, -1);
1293 if (out_num
== VIRTQUEUE_MAX_SIZE
) {
1301 * If host wants to see the guest header as is, we can
1302 * pass it on unchanged. Otherwise, copy just the parts
1303 * that host is interested in.
1305 assert(n
->host_hdr_len
<= n
->guest_hdr_len
);
1306 if (n
->host_hdr_len
!= n
->guest_hdr_len
) {
1307 unsigned sg_num
= iov_copy(sg
, ARRAY_SIZE(sg
),
1309 0, n
->host_hdr_len
);
1310 sg_num
+= iov_copy(sg
+ sg_num
, ARRAY_SIZE(sg
) - sg_num
,
1312 n
->guest_hdr_len
, -1);
1317 ret
= qemu_sendv_packet_async(qemu_get_subqueue(n
->nic
, queue_index
),
1318 out_sg
, out_num
, virtio_net_tx_complete
);
1320 virtio_queue_set_notification(q
->tx_vq
, 0);
1321 q
->async_tx
.elem
= elem
;
1326 virtqueue_push(q
->tx_vq
, elem
, 0);
1327 virtio_notify(vdev
, q
->tx_vq
);
1330 if (++num_packets
>= n
->tx_burst
) {
1337 static void virtio_net_handle_tx_timer(VirtIODevice
*vdev
, VirtQueue
*vq
)
1339 VirtIONet
*n
= VIRTIO_NET(vdev
);
1340 VirtIONetQueue
*q
= &n
->vqs
[vq2q(virtio_get_queue_index(vq
))];
1342 if (unlikely((n
->status
& VIRTIO_NET_S_LINK_UP
) == 0)) {
1343 virtio_net_drop_tx_queue_data(vdev
, vq
);
1347 /* This happens when device was stopped but VCPU wasn't. */
1348 if (!vdev
->vm_running
) {
1353 if (q
->tx_waiting
) {
1354 virtio_queue_set_notification(vq
, 1);
1355 timer_del(q
->tx_timer
);
1357 if (virtio_net_flush_tx(q
) == -EINVAL
) {
1361 timer_mod(q
->tx_timer
,
1362 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + n
->tx_timeout
);
1364 virtio_queue_set_notification(vq
, 0);
1368 static void virtio_net_handle_tx_bh(VirtIODevice
*vdev
, VirtQueue
*vq
)
1370 VirtIONet
*n
= VIRTIO_NET(vdev
);
1371 VirtIONetQueue
*q
= &n
->vqs
[vq2q(virtio_get_queue_index(vq
))];
1373 if (unlikely((n
->status
& VIRTIO_NET_S_LINK_UP
) == 0)) {
1374 virtio_net_drop_tx_queue_data(vdev
, vq
);
1378 if (unlikely(q
->tx_waiting
)) {
1382 /* This happens when device was stopped but VCPU wasn't. */
1383 if (!vdev
->vm_running
) {
1386 virtio_queue_set_notification(vq
, 0);
1387 qemu_bh_schedule(q
->tx_bh
);
1390 static void virtio_net_tx_timer(void *opaque
)
1392 VirtIONetQueue
*q
= opaque
;
1393 VirtIONet
*n
= q
->n
;
1394 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1395 /* This happens when device was stopped but BH wasn't. */
1396 if (!vdev
->vm_running
) {
1397 /* Make sure tx waiting is set, so we'll run when restarted. */
1398 assert(q
->tx_waiting
);
1404 /* Just in case the driver is not ready on more */
1405 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1409 virtio_queue_set_notification(q
->tx_vq
, 1);
1410 virtio_net_flush_tx(q
);
1413 static void virtio_net_tx_bh(void *opaque
)
1415 VirtIONetQueue
*q
= opaque
;
1416 VirtIONet
*n
= q
->n
;
1417 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1420 /* This happens when device was stopped but BH wasn't. */
1421 if (!vdev
->vm_running
) {
1422 /* Make sure tx waiting is set, so we'll run when restarted. */
1423 assert(q
->tx_waiting
);
1429 /* Just in case the driver is not ready on more */
1430 if (unlikely(!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
))) {
1434 ret
= virtio_net_flush_tx(q
);
1435 if (ret
== -EBUSY
|| ret
== -EINVAL
) {
1436 return; /* Notification re-enable handled by tx_complete or device
1440 /* If we flush a full burst of packets, assume there are
1441 * more coming and immediately reschedule */
1442 if (ret
>= n
->tx_burst
) {
1443 qemu_bh_schedule(q
->tx_bh
);
1448 /* If less than a full burst, re-enable notification and flush
1449 * anything that may have come in while we weren't looking. If
1450 * we find something, assume the guest is still active and reschedule */
1451 virtio_queue_set_notification(q
->tx_vq
, 1);
1452 ret
= virtio_net_flush_tx(q
);
1453 if (ret
== -EINVAL
) {
1455 } else if (ret
> 0) {
1456 virtio_queue_set_notification(q
->tx_vq
, 0);
1457 qemu_bh_schedule(q
->tx_bh
);
1462 static void virtio_net_add_queue(VirtIONet
*n
, int index
)
1464 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1466 n
->vqs
[index
].rx_vq
= virtio_add_queue(vdev
, n
->net_conf
.rx_queue_size
,
1467 virtio_net_handle_rx
);
1468 if (n
->net_conf
.tx
&& !strcmp(n
->net_conf
.tx
, "timer")) {
1469 n
->vqs
[index
].tx_vq
=
1470 virtio_add_queue(vdev
, 256, virtio_net_handle_tx_timer
);
1471 n
->vqs
[index
].tx_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
1472 virtio_net_tx_timer
,
1475 n
->vqs
[index
].tx_vq
=
1476 virtio_add_queue(vdev
, 256, virtio_net_handle_tx_bh
);
1477 n
->vqs
[index
].tx_bh
= qemu_bh_new(virtio_net_tx_bh
, &n
->vqs
[index
]);
1480 n
->vqs
[index
].tx_waiting
= 0;
1481 n
->vqs
[index
].n
= n
;
1484 static void virtio_net_del_queue(VirtIONet
*n
, int index
)
1486 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1487 VirtIONetQueue
*q
= &n
->vqs
[index
];
1488 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
1490 qemu_purge_queued_packets(nc
);
1492 virtio_del_queue(vdev
, index
* 2);
1494 timer_del(q
->tx_timer
);
1495 timer_free(q
->tx_timer
);
1497 qemu_bh_delete(q
->tx_bh
);
1499 virtio_del_queue(vdev
, index
* 2 + 1);
1502 static void virtio_net_change_num_queues(VirtIONet
*n
, int new_max_queues
)
1504 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1505 int old_num_queues
= virtio_get_num_queues(vdev
);
1506 int new_num_queues
= new_max_queues
* 2 + 1;
1509 assert(old_num_queues
>= 3);
1510 assert(old_num_queues
% 2 == 1);
1512 if (old_num_queues
== new_num_queues
) {
1517 * We always need to remove and add ctrl vq if
1518 * old_num_queues != new_num_queues. Remove ctrl_vq first,
1519 * and then we only enter one of the following too loops.
1521 virtio_del_queue(vdev
, old_num_queues
- 1);
1523 for (i
= new_num_queues
- 1; i
< old_num_queues
- 1; i
+= 2) {
1524 /* new_num_queues < old_num_queues */
1525 virtio_net_del_queue(n
, i
/ 2);
1528 for (i
= old_num_queues
- 1; i
< new_num_queues
- 1; i
+= 2) {
1529 /* new_num_queues > old_num_queues */
1530 virtio_net_add_queue(n
, i
/ 2);
1533 /* add ctrl_vq last */
1534 n
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_net_handle_ctrl
);
1537 static void virtio_net_set_multiqueue(VirtIONet
*n
, int multiqueue
)
1539 int max
= multiqueue
? n
->max_queues
: 1;
1541 n
->multiqueue
= multiqueue
;
1542 virtio_net_change_num_queues(n
, max
);
1544 virtio_net_set_queues(n
);
1547 static void virtio_net_save_device(VirtIODevice
*vdev
, QEMUFile
*f
)
1549 VirtIONet
*n
= VIRTIO_NET(vdev
);
1552 qemu_put_buffer(f
, n
->mac
, ETH_ALEN
);
1553 qemu_put_be32(f
, n
->vqs
[0].tx_waiting
);
1554 qemu_put_be32(f
, n
->mergeable_rx_bufs
);
1555 qemu_put_be16(f
, n
->status
);
1556 qemu_put_byte(f
, n
->promisc
);
1557 qemu_put_byte(f
, n
->allmulti
);
1558 qemu_put_be32(f
, n
->mac_table
.in_use
);
1559 qemu_put_buffer(f
, n
->mac_table
.macs
, n
->mac_table
.in_use
* ETH_ALEN
);
1560 qemu_put_buffer(f
, (uint8_t *)n
->vlans
, MAX_VLAN
>> 3);
1561 qemu_put_be32(f
, n
->has_vnet_hdr
);
1562 qemu_put_byte(f
, n
->mac_table
.multi_overflow
);
1563 qemu_put_byte(f
, n
->mac_table
.uni_overflow
);
1564 qemu_put_byte(f
, n
->alluni
);
1565 qemu_put_byte(f
, n
->nomulti
);
1566 qemu_put_byte(f
, n
->nouni
);
1567 qemu_put_byte(f
, n
->nobcast
);
1568 qemu_put_byte(f
, n
->has_ufo
);
1569 if (n
->max_queues
> 1) {
1570 qemu_put_be16(f
, n
->max_queues
);
1571 qemu_put_be16(f
, n
->curr_queues
);
1572 for (i
= 1; i
< n
->curr_queues
; i
++) {
1573 qemu_put_be32(f
, n
->vqs
[i
].tx_waiting
);
1577 if (virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
)) {
1578 qemu_put_be64(f
, n
->curr_guest_offloads
);
1582 static int virtio_net_load_device(VirtIODevice
*vdev
, QEMUFile
*f
,
1585 VirtIONet
*n
= VIRTIO_NET(vdev
);
1588 qemu_get_buffer(f
, n
->mac
, ETH_ALEN
);
1589 n
->vqs
[0].tx_waiting
= qemu_get_be32(f
);
1591 virtio_net_set_mrg_rx_bufs(n
, qemu_get_be32(f
),
1592 virtio_vdev_has_feature(vdev
,
1593 VIRTIO_F_VERSION_1
));
1595 n
->status
= qemu_get_be16(f
);
1597 n
->promisc
= qemu_get_byte(f
);
1598 n
->allmulti
= qemu_get_byte(f
);
1600 n
->mac_table
.in_use
= qemu_get_be32(f
);
1601 /* MAC_TABLE_ENTRIES may be different from the saved image */
1602 if (n
->mac_table
.in_use
<= MAC_TABLE_ENTRIES
) {
1603 qemu_get_buffer(f
, n
->mac_table
.macs
,
1604 n
->mac_table
.in_use
* ETH_ALEN
);
1608 /* Overflow detected - can happen if source has a larger MAC table.
1609 * We simply set overflow flag so there's no need to maintain the
1610 * table of addresses, discard them all.
1611 * Note: 64 bit math to avoid integer overflow.
1613 for (i
= 0; i
< (int64_t)n
->mac_table
.in_use
* ETH_ALEN
; ++i
) {
1616 n
->mac_table
.multi_overflow
= n
->mac_table
.uni_overflow
= 1;
1617 n
->mac_table
.in_use
= 0;
1620 qemu_get_buffer(f
, (uint8_t *)n
->vlans
, MAX_VLAN
>> 3);
1622 if (qemu_get_be32(f
) && !peer_has_vnet_hdr(n
)) {
1623 error_report("virtio-net: saved image requires vnet_hdr=on");
1627 n
->mac_table
.multi_overflow
= qemu_get_byte(f
);
1628 n
->mac_table
.uni_overflow
= qemu_get_byte(f
);
1630 n
->alluni
= qemu_get_byte(f
);
1631 n
->nomulti
= qemu_get_byte(f
);
1632 n
->nouni
= qemu_get_byte(f
);
1633 n
->nobcast
= qemu_get_byte(f
);
1635 if (qemu_get_byte(f
) && !peer_has_ufo(n
)) {
1636 error_report("virtio-net: saved image requires TUN_F_UFO support");
1640 if (n
->max_queues
> 1) {
1641 if (n
->max_queues
!= qemu_get_be16(f
)) {
1642 error_report("virtio-net: different max_queues ");
1646 n
->curr_queues
= qemu_get_be16(f
);
1647 if (n
->curr_queues
> n
->max_queues
) {
1648 error_report("virtio-net: curr_queues %x > max_queues %x",
1649 n
->curr_queues
, n
->max_queues
);
1652 for (i
= 1; i
< n
->curr_queues
; i
++) {
1653 n
->vqs
[i
].tx_waiting
= qemu_get_be32(f
);
1657 if (virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
)) {
1658 n
->curr_guest_offloads
= qemu_get_be64(f
);
1660 n
->curr_guest_offloads
= virtio_net_supported_guest_offloads(n
);
1663 if (peer_has_vnet_hdr(n
)) {
1664 virtio_net_apply_guest_offloads(n
);
1667 virtio_net_set_queues(n
);
1669 /* Find the first multicast entry in the saved MAC filter */
1670 for (i
= 0; i
< n
->mac_table
.in_use
; i
++) {
1671 if (n
->mac_table
.macs
[i
* ETH_ALEN
] & 1) {
1675 n
->mac_table
.first_multi
= i
;
1677 /* nc.link_down can't be migrated, so infer link_down according
1678 * to link status bit in n->status */
1679 link_down
= (n
->status
& VIRTIO_NET_S_LINK_UP
) == 0;
1680 for (i
= 0; i
< n
->max_queues
; i
++) {
1681 qemu_get_subqueue(n
->nic
, i
)->link_down
= link_down
;
1684 if (virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_GUEST_ANNOUNCE
) &&
1685 virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_VQ
)) {
1686 n
->announce_counter
= SELF_ANNOUNCE_ROUNDS
;
1687 timer_mod(n
->announce_timer
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
));
1693 static NetClientInfo net_virtio_info
= {
1694 .type
= NET_CLIENT_DRIVER_NIC
,
1695 .size
= sizeof(NICState
),
1696 .can_receive
= virtio_net_can_receive
,
1697 .receive
= virtio_net_receive
,
1698 .link_status_changed
= virtio_net_set_link_status
,
1699 .query_rx_filter
= virtio_net_query_rxfilter
,
1702 static bool virtio_net_guest_notifier_pending(VirtIODevice
*vdev
, int idx
)
1704 VirtIONet
*n
= VIRTIO_NET(vdev
);
1705 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, vq2q(idx
));
1706 assert(n
->vhost_started
);
1707 return vhost_net_virtqueue_pending(get_vhost_net(nc
->peer
), idx
);
1710 static void virtio_net_guest_notifier_mask(VirtIODevice
*vdev
, int idx
,
1713 VirtIONet
*n
= VIRTIO_NET(vdev
);
1714 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, vq2q(idx
));
1715 assert(n
->vhost_started
);
1716 vhost_net_virtqueue_mask(get_vhost_net(nc
->peer
),
1720 static void virtio_net_set_config_size(VirtIONet
*n
, uint64_t host_features
)
1722 int i
, config_size
= 0;
1723 virtio_add_feature(&host_features
, VIRTIO_NET_F_MAC
);
1724 for (i
= 0; feature_sizes
[i
].flags
!= 0; i
++) {
1725 if (host_features
& feature_sizes
[i
].flags
) {
1726 config_size
= MAX(feature_sizes
[i
].end
, config_size
);
1729 n
->config_size
= config_size
;
1732 void virtio_net_set_netclient_name(VirtIONet
*n
, const char *name
,
1736 * The name can be NULL, the netclient name will be type.x.
1738 assert(type
!= NULL
);
1740 g_free(n
->netclient_name
);
1741 g_free(n
->netclient_type
);
1742 n
->netclient_name
= g_strdup(name
);
1743 n
->netclient_type
= g_strdup(type
);
1746 static void virtio_net_device_realize(DeviceState
*dev
, Error
**errp
)
1748 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1749 VirtIONet
*n
= VIRTIO_NET(dev
);
1753 virtio_net_set_config_size(n
, n
->host_features
);
1754 virtio_init(vdev
, "virtio-net", VIRTIO_ID_NET
, n
->config_size
);
1757 * We set a lower limit on RX queue size to what it always was.
1758 * Guests that want a smaller ring can always resize it without
1759 * help from us (using virtio 1 and up).
1761 if (n
->net_conf
.rx_queue_size
< VIRTIO_NET_RX_QUEUE_MIN_SIZE
||
1762 n
->net_conf
.rx_queue_size
> VIRTQUEUE_MAX_SIZE
||
1763 (n
->net_conf
.rx_queue_size
& (n
->net_conf
.rx_queue_size
- 1))) {
1764 error_setg(errp
, "Invalid rx_queue_size (= %" PRIu16
"), "
1765 "must be a power of 2 between %d and %d.",
1766 n
->net_conf
.rx_queue_size
, VIRTIO_NET_RX_QUEUE_MIN_SIZE
,
1767 VIRTQUEUE_MAX_SIZE
);
1768 virtio_cleanup(vdev
);
1772 n
->max_queues
= MAX(n
->nic_conf
.peers
.queues
, 1);
1773 if (n
->max_queues
* 2 + 1 > VIRTIO_QUEUE_MAX
) {
1774 error_setg(errp
, "Invalid number of queues (= %" PRIu32
"), "
1775 "must be a positive integer less than %d.",
1776 n
->max_queues
, (VIRTIO_QUEUE_MAX
- 1) / 2);
1777 virtio_cleanup(vdev
);
1780 n
->vqs
= g_malloc0(sizeof(VirtIONetQueue
) * n
->max_queues
);
1782 n
->tx_timeout
= n
->net_conf
.txtimer
;
1784 if (n
->net_conf
.tx
&& strcmp(n
->net_conf
.tx
, "timer")
1785 && strcmp(n
->net_conf
.tx
, "bh")) {
1786 error_report("virtio-net: "
1787 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1789 error_report("Defaulting to \"bh\"");
1792 for (i
= 0; i
< n
->max_queues
; i
++) {
1793 virtio_net_add_queue(n
, i
);
1796 n
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_net_handle_ctrl
);
1797 qemu_macaddr_default_if_unset(&n
->nic_conf
.macaddr
);
1798 memcpy(&n
->mac
[0], &n
->nic_conf
.macaddr
, sizeof(n
->mac
));
1799 n
->status
= VIRTIO_NET_S_LINK_UP
;
1800 n
->announce_timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
1801 virtio_net_announce_timer
, n
);
1803 if (n
->netclient_type
) {
1805 * Happen when virtio_net_set_netclient_name has been called.
1807 n
->nic
= qemu_new_nic(&net_virtio_info
, &n
->nic_conf
,
1808 n
->netclient_type
, n
->netclient_name
, n
);
1810 n
->nic
= qemu_new_nic(&net_virtio_info
, &n
->nic_conf
,
1811 object_get_typename(OBJECT(dev
)), dev
->id
, n
);
1814 peer_test_vnet_hdr(n
);
1815 if (peer_has_vnet_hdr(n
)) {
1816 for (i
= 0; i
< n
->max_queues
; i
++) {
1817 qemu_using_vnet_hdr(qemu_get_subqueue(n
->nic
, i
)->peer
, true);
1819 n
->host_hdr_len
= sizeof(struct virtio_net_hdr
);
1821 n
->host_hdr_len
= 0;
1824 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->nic_conf
.macaddr
.a
);
1826 n
->vqs
[0].tx_waiting
= 0;
1827 n
->tx_burst
= n
->net_conf
.txburst
;
1828 virtio_net_set_mrg_rx_bufs(n
, 0, 0);
1829 n
->promisc
= 1; /* for compatibility */
1831 n
->mac_table
.macs
= g_malloc0(MAC_TABLE_ENTRIES
* ETH_ALEN
);
1833 n
->vlans
= g_malloc0(MAX_VLAN
>> 3);
1835 nc
= qemu_get_queue(n
->nic
);
1836 nc
->rxfilter_notify_enabled
= 1;
1841 static void virtio_net_device_unrealize(DeviceState
*dev
, Error
**errp
)
1843 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1844 VirtIONet
*n
= VIRTIO_NET(dev
);
1847 /* This will stop vhost backend if appropriate. */
1848 virtio_net_set_status(vdev
, 0);
1850 g_free(n
->netclient_name
);
1851 n
->netclient_name
= NULL
;
1852 g_free(n
->netclient_type
);
1853 n
->netclient_type
= NULL
;
1855 g_free(n
->mac_table
.macs
);
1858 max_queues
= n
->multiqueue
? n
->max_queues
: 1;
1859 for (i
= 0; i
< max_queues
; i
++) {
1860 virtio_net_del_queue(n
, i
);
1863 timer_del(n
->announce_timer
);
1864 timer_free(n
->announce_timer
);
1866 qemu_del_nic(n
->nic
);
1867 virtio_cleanup(vdev
);
1870 static void virtio_net_instance_init(Object
*obj
)
1872 VirtIONet
*n
= VIRTIO_NET(obj
);
1875 * The default config_size is sizeof(struct virtio_net_config).
1876 * Can be overriden with virtio_net_set_config_size.
1878 n
->config_size
= sizeof(struct virtio_net_config
);
1879 device_add_bootindex_property(obj
, &n
->nic_conf
.bootindex
,
1880 "bootindex", "/ethernet-phy@0",
1884 static void virtio_net_pre_save(void *opaque
)
1886 VirtIONet
*n
= opaque
;
1888 /* At this point, backend must be stopped, otherwise
1889 * it might keep writing to memory. */
1890 assert(!n
->vhost_started
);
1893 static const VMStateDescription vmstate_virtio_net
= {
1894 .name
= "virtio-net",
1895 .minimum_version_id
= VIRTIO_NET_VM_VERSION
,
1896 .version_id
= VIRTIO_NET_VM_VERSION
,
1897 .fields
= (VMStateField
[]) {
1898 VMSTATE_VIRTIO_DEVICE
,
1899 VMSTATE_END_OF_LIST()
1901 .pre_save
= virtio_net_pre_save
,
1904 static Property virtio_net_properties
[] = {
1905 DEFINE_PROP_BIT("csum", VirtIONet
, host_features
, VIRTIO_NET_F_CSUM
, true),
1906 DEFINE_PROP_BIT("guest_csum", VirtIONet
, host_features
,
1907 VIRTIO_NET_F_GUEST_CSUM
, true),
1908 DEFINE_PROP_BIT("gso", VirtIONet
, host_features
, VIRTIO_NET_F_GSO
, true),
1909 DEFINE_PROP_BIT("guest_tso4", VirtIONet
, host_features
,
1910 VIRTIO_NET_F_GUEST_TSO4
, true),
1911 DEFINE_PROP_BIT("guest_tso6", VirtIONet
, host_features
,
1912 VIRTIO_NET_F_GUEST_TSO6
, true),
1913 DEFINE_PROP_BIT("guest_ecn", VirtIONet
, host_features
,
1914 VIRTIO_NET_F_GUEST_ECN
, true),
1915 DEFINE_PROP_BIT("guest_ufo", VirtIONet
, host_features
,
1916 VIRTIO_NET_F_GUEST_UFO
, true),
1917 DEFINE_PROP_BIT("guest_announce", VirtIONet
, host_features
,
1918 VIRTIO_NET_F_GUEST_ANNOUNCE
, true),
1919 DEFINE_PROP_BIT("host_tso4", VirtIONet
, host_features
,
1920 VIRTIO_NET_F_HOST_TSO4
, true),
1921 DEFINE_PROP_BIT("host_tso6", VirtIONet
, host_features
,
1922 VIRTIO_NET_F_HOST_TSO6
, true),
1923 DEFINE_PROP_BIT("host_ecn", VirtIONet
, host_features
,
1924 VIRTIO_NET_F_HOST_ECN
, true),
1925 DEFINE_PROP_BIT("host_ufo", VirtIONet
, host_features
,
1926 VIRTIO_NET_F_HOST_UFO
, true),
1927 DEFINE_PROP_BIT("mrg_rxbuf", VirtIONet
, host_features
,
1928 VIRTIO_NET_F_MRG_RXBUF
, true),
1929 DEFINE_PROP_BIT("status", VirtIONet
, host_features
,
1930 VIRTIO_NET_F_STATUS
, true),
1931 DEFINE_PROP_BIT("ctrl_vq", VirtIONet
, host_features
,
1932 VIRTIO_NET_F_CTRL_VQ
, true),
1933 DEFINE_PROP_BIT("ctrl_rx", VirtIONet
, host_features
,
1934 VIRTIO_NET_F_CTRL_RX
, true),
1935 DEFINE_PROP_BIT("ctrl_vlan", VirtIONet
, host_features
,
1936 VIRTIO_NET_F_CTRL_VLAN
, true),
1937 DEFINE_PROP_BIT("ctrl_rx_extra", VirtIONet
, host_features
,
1938 VIRTIO_NET_F_CTRL_RX_EXTRA
, true),
1939 DEFINE_PROP_BIT("ctrl_mac_addr", VirtIONet
, host_features
,
1940 VIRTIO_NET_F_CTRL_MAC_ADDR
, true),
1941 DEFINE_PROP_BIT("ctrl_guest_offloads", VirtIONet
, host_features
,
1942 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
, true),
1943 DEFINE_PROP_BIT("mq", VirtIONet
, host_features
, VIRTIO_NET_F_MQ
, false),
1944 DEFINE_NIC_PROPERTIES(VirtIONet
, nic_conf
),
1945 DEFINE_PROP_UINT32("x-txtimer", VirtIONet
, net_conf
.txtimer
,
1947 DEFINE_PROP_INT32("x-txburst", VirtIONet
, net_conf
.txburst
, TX_BURST
),
1948 DEFINE_PROP_STRING("tx", VirtIONet
, net_conf
.tx
),
1949 DEFINE_PROP_UINT16("rx_queue_size", VirtIONet
, net_conf
.rx_queue_size
,
1950 VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
),
1951 DEFINE_PROP_END_OF_LIST(),
1954 static void virtio_net_class_init(ObjectClass
*klass
, void *data
)
1956 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1957 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
1959 dc
->props
= virtio_net_properties
;
1960 dc
->vmsd
= &vmstate_virtio_net
;
1961 set_bit(DEVICE_CATEGORY_NETWORK
, dc
->categories
);
1962 vdc
->realize
= virtio_net_device_realize
;
1963 vdc
->unrealize
= virtio_net_device_unrealize
;
1964 vdc
->get_config
= virtio_net_get_config
;
1965 vdc
->set_config
= virtio_net_set_config
;
1966 vdc
->get_features
= virtio_net_get_features
;
1967 vdc
->set_features
= virtio_net_set_features
;
1968 vdc
->bad_features
= virtio_net_bad_features
;
1969 vdc
->reset
= virtio_net_reset
;
1970 vdc
->set_status
= virtio_net_set_status
;
1971 vdc
->guest_notifier_mask
= virtio_net_guest_notifier_mask
;
1972 vdc
->guest_notifier_pending
= virtio_net_guest_notifier_pending
;
1973 vdc
->load
= virtio_net_load_device
;
1974 vdc
->save
= virtio_net_save_device
;
1975 vdc
->legacy_features
|= (0x1 << VIRTIO_NET_F_GSO
);
1978 static const TypeInfo virtio_net_info
= {
1979 .name
= TYPE_VIRTIO_NET
,
1980 .parent
= TYPE_VIRTIO_DEVICE
,
1981 .instance_size
= sizeof(VirtIONet
),
1982 .instance_init
= virtio_net_instance_init
,
1983 .class_init
= virtio_net_class_init
,
1986 static void virtio_register_types(void)
1988 type_register_static(&virtio_net_info
);
1991 type_init(virtio_register_types
)