]> git.proxmox.com Git - mirror_qemu.git/blob - hw/net/virtio-net.c
Include qapi/error.h exactly where needed
[mirror_qemu.git] / hw / net / virtio-net.c
1 /*
2 * Virtio Network Device
3 *
4 * Copyright IBM, Corp. 2007
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #include "qemu/osdep.h"
15 #include "qemu/iov.h"
16 #include "hw/virtio/virtio.h"
17 #include "net/net.h"
18 #include "net/checksum.h"
19 #include "net/tap.h"
20 #include "qemu/error-report.h"
21 #include "qemu/timer.h"
22 #include "hw/virtio/virtio-net.h"
23 #include "net/vhost_net.h"
24 #include "hw/virtio/virtio-bus.h"
25 #include "qapi/error.h"
26 #include "qapi/qmp/qjson.h"
27 #include "qapi-event.h"
28 #include "hw/virtio/virtio-access.h"
29 #include "migration/misc.h"
30
31 #define VIRTIO_NET_VM_VERSION 11
32
33 #define MAC_TABLE_ENTRIES 64
34 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
35
36 /* previously fixed value */
37 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
38 #define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
39
40 /* for now, only allow larger queues; with virtio-1, guest can downsize */
41 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
42 #define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
43
44 /*
45 * Calculate the number of bytes up to and including the given 'field' of
46 * 'container'.
47 */
48 #define endof(container, field) \
49 (offsetof(container, field) + sizeof(((container *)0)->field))
50
51 typedef struct VirtIOFeature {
52 uint32_t flags;
53 size_t end;
54 } VirtIOFeature;
55
56 static VirtIOFeature feature_sizes[] = {
57 {.flags = 1 << VIRTIO_NET_F_MAC,
58 .end = endof(struct virtio_net_config, mac)},
59 {.flags = 1 << VIRTIO_NET_F_STATUS,
60 .end = endof(struct virtio_net_config, status)},
61 {.flags = 1 << VIRTIO_NET_F_MQ,
62 .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
63 {.flags = 1 << VIRTIO_NET_F_MTU,
64 .end = endof(struct virtio_net_config, mtu)},
65 {}
66 };
67
68 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
69 {
70 VirtIONet *n = qemu_get_nic_opaque(nc);
71
72 return &n->vqs[nc->queue_index];
73 }
74
75 static int vq2q(int queue_index)
76 {
77 return queue_index / 2;
78 }
79
80 /* TODO
81 * - we could suppress RX interrupt if we were so inclined.
82 */
83
84 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
85 {
86 VirtIONet *n = VIRTIO_NET(vdev);
87 struct virtio_net_config netcfg;
88
89 virtio_stw_p(vdev, &netcfg.status, n->status);
90 virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
91 virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
92 memcpy(netcfg.mac, n->mac, ETH_ALEN);
93 memcpy(config, &netcfg, n->config_size);
94 }
95
96 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
97 {
98 VirtIONet *n = VIRTIO_NET(vdev);
99 struct virtio_net_config netcfg = {};
100
101 memcpy(&netcfg, config, n->config_size);
102
103 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
104 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
105 memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
106 memcpy(n->mac, netcfg.mac, ETH_ALEN);
107 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
108 }
109 }
110
111 static bool virtio_net_started(VirtIONet *n, uint8_t status)
112 {
113 VirtIODevice *vdev = VIRTIO_DEVICE(n);
114 return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
115 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
116 }
117
118 static void virtio_net_announce_timer(void *opaque)
119 {
120 VirtIONet *n = opaque;
121 VirtIODevice *vdev = VIRTIO_DEVICE(n);
122
123 n->announce_counter--;
124 n->status |= VIRTIO_NET_S_ANNOUNCE;
125 virtio_notify_config(vdev);
126 }
127
128 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
129 {
130 VirtIODevice *vdev = VIRTIO_DEVICE(n);
131 NetClientState *nc = qemu_get_queue(n->nic);
132 int queues = n->multiqueue ? n->max_queues : 1;
133
134 if (!get_vhost_net(nc->peer)) {
135 return;
136 }
137
138 if ((virtio_net_started(n, status) && !nc->peer->link_down) ==
139 !!n->vhost_started) {
140 return;
141 }
142 if (!n->vhost_started) {
143 int r, i;
144
145 if (n->needs_vnet_hdr_swap) {
146 error_report("backend does not support %s vnet headers; "
147 "falling back on userspace virtio",
148 virtio_is_big_endian(vdev) ? "BE" : "LE");
149 return;
150 }
151
152 /* Any packets outstanding? Purge them to avoid touching rings
153 * when vhost is running.
154 */
155 for (i = 0; i < queues; i++) {
156 NetClientState *qnc = qemu_get_subqueue(n->nic, i);
157
158 /* Purge both directions: TX and RX. */
159 qemu_net_queue_purge(qnc->peer->incoming_queue, qnc);
160 qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
161 }
162
163 if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) {
164 r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu);
165 if (r < 0) {
166 error_report("%uBytes MTU not supported by the backend",
167 n->net_conf.mtu);
168
169 return;
170 }
171 }
172
173 n->vhost_started = 1;
174 r = vhost_net_start(vdev, n->nic->ncs, queues);
175 if (r < 0) {
176 error_report("unable to start vhost net: %d: "
177 "falling back on userspace virtio", -r);
178 n->vhost_started = 0;
179 }
180 } else {
181 vhost_net_stop(vdev, n->nic->ncs, queues);
182 n->vhost_started = 0;
183 }
184 }
185
186 static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev,
187 NetClientState *peer,
188 bool enable)
189 {
190 if (virtio_is_big_endian(vdev)) {
191 return qemu_set_vnet_be(peer, enable);
192 } else {
193 return qemu_set_vnet_le(peer, enable);
194 }
195 }
196
197 static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs,
198 int queues, bool enable)
199 {
200 int i;
201
202 for (i = 0; i < queues; i++) {
203 if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 &&
204 enable) {
205 while (--i >= 0) {
206 virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false);
207 }
208
209 return true;
210 }
211 }
212
213 return false;
214 }
215
216 static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
217 {
218 VirtIODevice *vdev = VIRTIO_DEVICE(n);
219 int queues = n->multiqueue ? n->max_queues : 1;
220
221 if (virtio_net_started(n, status)) {
222 /* Before using the device, we tell the network backend about the
223 * endianness to use when parsing vnet headers. If the backend
224 * can't do it, we fallback onto fixing the headers in the core
225 * virtio-net code.
226 */
227 n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs,
228 queues, true);
229 } else if (virtio_net_started(n, vdev->status)) {
230 /* After using the device, we need to reset the network backend to
231 * the default (guest native endianness), otherwise the guest may
232 * lose network connectivity if it is rebooted into a different
233 * endianness.
234 */
235 virtio_net_set_vnet_endian(vdev, n->nic->ncs, queues, false);
236 }
237 }
238
239 static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq)
240 {
241 unsigned int dropped = virtqueue_drop_all(vq);
242 if (dropped) {
243 virtio_notify(vdev, vq);
244 }
245 }
246
247 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
248 {
249 VirtIONet *n = VIRTIO_NET(vdev);
250 VirtIONetQueue *q;
251 int i;
252 uint8_t queue_status;
253
254 virtio_net_vnet_endian_status(n, status);
255 virtio_net_vhost_status(n, status);
256
257 for (i = 0; i < n->max_queues; i++) {
258 NetClientState *ncs = qemu_get_subqueue(n->nic, i);
259 bool queue_started;
260 q = &n->vqs[i];
261
262 if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
263 queue_status = 0;
264 } else {
265 queue_status = status;
266 }
267 queue_started =
268 virtio_net_started(n, queue_status) && !n->vhost_started;
269
270 if (queue_started) {
271 qemu_flush_queued_packets(ncs);
272 }
273
274 if (!q->tx_waiting) {
275 continue;
276 }
277
278 if (queue_started) {
279 if (q->tx_timer) {
280 timer_mod(q->tx_timer,
281 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
282 } else {
283 qemu_bh_schedule(q->tx_bh);
284 }
285 } else {
286 if (q->tx_timer) {
287 timer_del(q->tx_timer);
288 } else {
289 qemu_bh_cancel(q->tx_bh);
290 }
291 if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 &&
292 (queue_status & VIRTIO_CONFIG_S_DRIVER_OK) &&
293 vdev->vm_running) {
294 /* if tx is waiting we are likely have some packets in tx queue
295 * and disabled notification */
296 q->tx_waiting = 0;
297 virtio_queue_set_notification(q->tx_vq, 1);
298 virtio_net_drop_tx_queue_data(vdev, q->tx_vq);
299 }
300 }
301 }
302 }
303
304 static void virtio_net_set_link_status(NetClientState *nc)
305 {
306 VirtIONet *n = qemu_get_nic_opaque(nc);
307 VirtIODevice *vdev = VIRTIO_DEVICE(n);
308 uint16_t old_status = n->status;
309
310 if (nc->link_down)
311 n->status &= ~VIRTIO_NET_S_LINK_UP;
312 else
313 n->status |= VIRTIO_NET_S_LINK_UP;
314
315 if (n->status != old_status)
316 virtio_notify_config(vdev);
317
318 virtio_net_set_status(vdev, vdev->status);
319 }
320
321 static void rxfilter_notify(NetClientState *nc)
322 {
323 VirtIONet *n = qemu_get_nic_opaque(nc);
324
325 if (nc->rxfilter_notify_enabled) {
326 gchar *path = object_get_canonical_path(OBJECT(n->qdev));
327 qapi_event_send_nic_rx_filter_changed(!!n->netclient_name,
328 n->netclient_name, path, &error_abort);
329 g_free(path);
330
331 /* disable event notification to avoid events flooding */
332 nc->rxfilter_notify_enabled = 0;
333 }
334 }
335
336 static intList *get_vlan_table(VirtIONet *n)
337 {
338 intList *list, *entry;
339 int i, j;
340
341 list = NULL;
342 for (i = 0; i < MAX_VLAN >> 5; i++) {
343 for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
344 if (n->vlans[i] & (1U << j)) {
345 entry = g_malloc0(sizeof(*entry));
346 entry->value = (i << 5) + j;
347 entry->next = list;
348 list = entry;
349 }
350 }
351 }
352
353 return list;
354 }
355
356 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
357 {
358 VirtIONet *n = qemu_get_nic_opaque(nc);
359 VirtIODevice *vdev = VIRTIO_DEVICE(n);
360 RxFilterInfo *info;
361 strList *str_list, *entry;
362 int i;
363
364 info = g_malloc0(sizeof(*info));
365 info->name = g_strdup(nc->name);
366 info->promiscuous = n->promisc;
367
368 if (n->nouni) {
369 info->unicast = RX_STATE_NONE;
370 } else if (n->alluni) {
371 info->unicast = RX_STATE_ALL;
372 } else {
373 info->unicast = RX_STATE_NORMAL;
374 }
375
376 if (n->nomulti) {
377 info->multicast = RX_STATE_NONE;
378 } else if (n->allmulti) {
379 info->multicast = RX_STATE_ALL;
380 } else {
381 info->multicast = RX_STATE_NORMAL;
382 }
383
384 info->broadcast_allowed = n->nobcast;
385 info->multicast_overflow = n->mac_table.multi_overflow;
386 info->unicast_overflow = n->mac_table.uni_overflow;
387
388 info->main_mac = qemu_mac_strdup_printf(n->mac);
389
390 str_list = NULL;
391 for (i = 0; i < n->mac_table.first_multi; i++) {
392 entry = g_malloc0(sizeof(*entry));
393 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
394 entry->next = str_list;
395 str_list = entry;
396 }
397 info->unicast_table = str_list;
398
399 str_list = NULL;
400 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
401 entry = g_malloc0(sizeof(*entry));
402 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
403 entry->next = str_list;
404 str_list = entry;
405 }
406 info->multicast_table = str_list;
407 info->vlan_table = get_vlan_table(n);
408
409 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
410 info->vlan = RX_STATE_ALL;
411 } else if (!info->vlan_table) {
412 info->vlan = RX_STATE_NONE;
413 } else {
414 info->vlan = RX_STATE_NORMAL;
415 }
416
417 /* enable event notification after query */
418 nc->rxfilter_notify_enabled = 1;
419
420 return info;
421 }
422
423 static void virtio_net_reset(VirtIODevice *vdev)
424 {
425 VirtIONet *n = VIRTIO_NET(vdev);
426
427 /* Reset back to compatibility mode */
428 n->promisc = 1;
429 n->allmulti = 0;
430 n->alluni = 0;
431 n->nomulti = 0;
432 n->nouni = 0;
433 n->nobcast = 0;
434 /* multiqueue is disabled by default */
435 n->curr_queues = 1;
436 timer_del(n->announce_timer);
437 n->announce_counter = 0;
438 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
439
440 /* Flush any MAC and VLAN filter table state */
441 n->mac_table.in_use = 0;
442 n->mac_table.first_multi = 0;
443 n->mac_table.multi_overflow = 0;
444 n->mac_table.uni_overflow = 0;
445 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
446 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
447 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
448 memset(n->vlans, 0, MAX_VLAN >> 3);
449 }
450
451 static void peer_test_vnet_hdr(VirtIONet *n)
452 {
453 NetClientState *nc = qemu_get_queue(n->nic);
454 if (!nc->peer) {
455 return;
456 }
457
458 n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
459 }
460
461 static int peer_has_vnet_hdr(VirtIONet *n)
462 {
463 return n->has_vnet_hdr;
464 }
465
466 static int peer_has_ufo(VirtIONet *n)
467 {
468 if (!peer_has_vnet_hdr(n))
469 return 0;
470
471 n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
472
473 return n->has_ufo;
474 }
475
476 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
477 int version_1)
478 {
479 int i;
480 NetClientState *nc;
481
482 n->mergeable_rx_bufs = mergeable_rx_bufs;
483
484 if (version_1) {
485 n->guest_hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
486 } else {
487 n->guest_hdr_len = n->mergeable_rx_bufs ?
488 sizeof(struct virtio_net_hdr_mrg_rxbuf) :
489 sizeof(struct virtio_net_hdr);
490 }
491
492 for (i = 0; i < n->max_queues; i++) {
493 nc = qemu_get_subqueue(n->nic, i);
494
495 if (peer_has_vnet_hdr(n) &&
496 qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
497 qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
498 n->host_hdr_len = n->guest_hdr_len;
499 }
500 }
501 }
502
503 static int virtio_net_max_tx_queue_size(VirtIONet *n)
504 {
505 NetClientState *peer = n->nic_conf.peers.ncs[0];
506
507 /*
508 * Backends other than vhost-user don't support max queue size.
509 */
510 if (!peer) {
511 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
512 }
513
514 if (peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
515 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
516 }
517
518 return VIRTQUEUE_MAX_SIZE;
519 }
520
521 static int peer_attach(VirtIONet *n, int index)
522 {
523 NetClientState *nc = qemu_get_subqueue(n->nic, index);
524
525 if (!nc->peer) {
526 return 0;
527 }
528
529 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
530 vhost_set_vring_enable(nc->peer, 1);
531 }
532
533 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
534 return 0;
535 }
536
537 if (n->max_queues == 1) {
538 return 0;
539 }
540
541 return tap_enable(nc->peer);
542 }
543
544 static int peer_detach(VirtIONet *n, int index)
545 {
546 NetClientState *nc = qemu_get_subqueue(n->nic, index);
547
548 if (!nc->peer) {
549 return 0;
550 }
551
552 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
553 vhost_set_vring_enable(nc->peer, 0);
554 }
555
556 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
557 return 0;
558 }
559
560 return tap_disable(nc->peer);
561 }
562
563 static void virtio_net_set_queues(VirtIONet *n)
564 {
565 int i;
566 int r;
567
568 if (n->nic->peer_deleted) {
569 return;
570 }
571
572 for (i = 0; i < n->max_queues; i++) {
573 if (i < n->curr_queues) {
574 r = peer_attach(n, i);
575 assert(!r);
576 } else {
577 r = peer_detach(n, i);
578 assert(!r);
579 }
580 }
581 }
582
583 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
584
585 static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
586 Error **errp)
587 {
588 VirtIONet *n = VIRTIO_NET(vdev);
589 NetClientState *nc = qemu_get_queue(n->nic);
590
591 /* Firstly sync all virtio-net possible supported features */
592 features |= n->host_features;
593
594 virtio_add_feature(&features, VIRTIO_NET_F_MAC);
595
596 if (!peer_has_vnet_hdr(n)) {
597 virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
598 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
599 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
600 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
601
602 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
603 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
604 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
605 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
606 }
607
608 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
609 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
610 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
611 }
612
613 if (!get_vhost_net(nc->peer)) {
614 return features;
615 }
616 features = vhost_net_get_features(get_vhost_net(nc->peer), features);
617 vdev->backend_features = features;
618
619 if (n->mtu_bypass_backend &&
620 (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) {
621 features |= (1ULL << VIRTIO_NET_F_MTU);
622 }
623
624 return features;
625 }
626
627 static uint64_t virtio_net_bad_features(VirtIODevice *vdev)
628 {
629 uint64_t features = 0;
630
631 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
632 * but also these: */
633 virtio_add_feature(&features, VIRTIO_NET_F_MAC);
634 virtio_add_feature(&features, VIRTIO_NET_F_CSUM);
635 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4);
636 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6);
637 virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN);
638
639 return features;
640 }
641
642 static void virtio_net_apply_guest_offloads(VirtIONet *n)
643 {
644 qemu_set_offload(qemu_get_queue(n->nic)->peer,
645 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
646 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
647 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
648 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
649 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)));
650 }
651
652 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features)
653 {
654 static const uint64_t guest_offloads_mask =
655 (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
656 (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
657 (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
658 (1ULL << VIRTIO_NET_F_GUEST_ECN) |
659 (1ULL << VIRTIO_NET_F_GUEST_UFO);
660
661 return guest_offloads_mask & features;
662 }
663
664 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
665 {
666 VirtIODevice *vdev = VIRTIO_DEVICE(n);
667 return virtio_net_guest_offloads_by_features(vdev->guest_features);
668 }
669
670 static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
671 {
672 VirtIONet *n = VIRTIO_NET(vdev);
673 int i;
674
675 if (n->mtu_bypass_backend &&
676 !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) {
677 features &= ~(1ULL << VIRTIO_NET_F_MTU);
678 }
679
680 virtio_net_set_multiqueue(n,
681 virtio_has_feature(features, VIRTIO_NET_F_MQ));
682
683 virtio_net_set_mrg_rx_bufs(n,
684 virtio_has_feature(features,
685 VIRTIO_NET_F_MRG_RXBUF),
686 virtio_has_feature(features,
687 VIRTIO_F_VERSION_1));
688
689 if (n->has_vnet_hdr) {
690 n->curr_guest_offloads =
691 virtio_net_guest_offloads_by_features(features);
692 virtio_net_apply_guest_offloads(n);
693 }
694
695 for (i = 0; i < n->max_queues; i++) {
696 NetClientState *nc = qemu_get_subqueue(n->nic, i);
697
698 if (!get_vhost_net(nc->peer)) {
699 continue;
700 }
701 vhost_net_ack_features(get_vhost_net(nc->peer), features);
702 }
703
704 if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
705 memset(n->vlans, 0, MAX_VLAN >> 3);
706 } else {
707 memset(n->vlans, 0xff, MAX_VLAN >> 3);
708 }
709 }
710
711 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
712 struct iovec *iov, unsigned int iov_cnt)
713 {
714 uint8_t on;
715 size_t s;
716 NetClientState *nc = qemu_get_queue(n->nic);
717
718 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
719 if (s != sizeof(on)) {
720 return VIRTIO_NET_ERR;
721 }
722
723 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
724 n->promisc = on;
725 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
726 n->allmulti = on;
727 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
728 n->alluni = on;
729 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
730 n->nomulti = on;
731 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
732 n->nouni = on;
733 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
734 n->nobcast = on;
735 } else {
736 return VIRTIO_NET_ERR;
737 }
738
739 rxfilter_notify(nc);
740
741 return VIRTIO_NET_OK;
742 }
743
744 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
745 struct iovec *iov, unsigned int iov_cnt)
746 {
747 VirtIODevice *vdev = VIRTIO_DEVICE(n);
748 uint64_t offloads;
749 size_t s;
750
751 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
752 return VIRTIO_NET_ERR;
753 }
754
755 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
756 if (s != sizeof(offloads)) {
757 return VIRTIO_NET_ERR;
758 }
759
760 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
761 uint64_t supported_offloads;
762
763 offloads = virtio_ldq_p(vdev, &offloads);
764
765 if (!n->has_vnet_hdr) {
766 return VIRTIO_NET_ERR;
767 }
768
769 supported_offloads = virtio_net_supported_guest_offloads(n);
770 if (offloads & ~supported_offloads) {
771 return VIRTIO_NET_ERR;
772 }
773
774 n->curr_guest_offloads = offloads;
775 virtio_net_apply_guest_offloads(n);
776
777 return VIRTIO_NET_OK;
778 } else {
779 return VIRTIO_NET_ERR;
780 }
781 }
782
783 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
784 struct iovec *iov, unsigned int iov_cnt)
785 {
786 VirtIODevice *vdev = VIRTIO_DEVICE(n);
787 struct virtio_net_ctrl_mac mac_data;
788 size_t s;
789 NetClientState *nc = qemu_get_queue(n->nic);
790
791 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
792 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
793 return VIRTIO_NET_ERR;
794 }
795 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
796 assert(s == sizeof(n->mac));
797 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
798 rxfilter_notify(nc);
799
800 return VIRTIO_NET_OK;
801 }
802
803 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
804 return VIRTIO_NET_ERR;
805 }
806
807 int in_use = 0;
808 int first_multi = 0;
809 uint8_t uni_overflow = 0;
810 uint8_t multi_overflow = 0;
811 uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
812
813 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
814 sizeof(mac_data.entries));
815 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
816 if (s != sizeof(mac_data.entries)) {
817 goto error;
818 }
819 iov_discard_front(&iov, &iov_cnt, s);
820
821 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
822 goto error;
823 }
824
825 if (mac_data.entries <= MAC_TABLE_ENTRIES) {
826 s = iov_to_buf(iov, iov_cnt, 0, macs,
827 mac_data.entries * ETH_ALEN);
828 if (s != mac_data.entries * ETH_ALEN) {
829 goto error;
830 }
831 in_use += mac_data.entries;
832 } else {
833 uni_overflow = 1;
834 }
835
836 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);
837
838 first_multi = in_use;
839
840 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
841 sizeof(mac_data.entries));
842 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
843 if (s != sizeof(mac_data.entries)) {
844 goto error;
845 }
846
847 iov_discard_front(&iov, &iov_cnt, s);
848
849 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
850 goto error;
851 }
852
853 if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
854 s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
855 mac_data.entries * ETH_ALEN);
856 if (s != mac_data.entries * ETH_ALEN) {
857 goto error;
858 }
859 in_use += mac_data.entries;
860 } else {
861 multi_overflow = 1;
862 }
863
864 n->mac_table.in_use = in_use;
865 n->mac_table.first_multi = first_multi;
866 n->mac_table.uni_overflow = uni_overflow;
867 n->mac_table.multi_overflow = multi_overflow;
868 memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN);
869 g_free(macs);
870 rxfilter_notify(nc);
871
872 return VIRTIO_NET_OK;
873
874 error:
875 g_free(macs);
876 return VIRTIO_NET_ERR;
877 }
878
879 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
880 struct iovec *iov, unsigned int iov_cnt)
881 {
882 VirtIODevice *vdev = VIRTIO_DEVICE(n);
883 uint16_t vid;
884 size_t s;
885 NetClientState *nc = qemu_get_queue(n->nic);
886
887 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
888 vid = virtio_lduw_p(vdev, &vid);
889 if (s != sizeof(vid)) {
890 return VIRTIO_NET_ERR;
891 }
892
893 if (vid >= MAX_VLAN)
894 return VIRTIO_NET_ERR;
895
896 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
897 n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
898 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
899 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
900 else
901 return VIRTIO_NET_ERR;
902
903 rxfilter_notify(nc);
904
905 return VIRTIO_NET_OK;
906 }
907
908 static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
909 struct iovec *iov, unsigned int iov_cnt)
910 {
911 if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK &&
912 n->status & VIRTIO_NET_S_ANNOUNCE) {
913 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
914 if (n->announce_counter) {
915 timer_mod(n->announce_timer,
916 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
917 self_announce_delay(n->announce_counter));
918 }
919 return VIRTIO_NET_OK;
920 } else {
921 return VIRTIO_NET_ERR;
922 }
923 }
924
925 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
926 struct iovec *iov, unsigned int iov_cnt)
927 {
928 VirtIODevice *vdev = VIRTIO_DEVICE(n);
929 struct virtio_net_ctrl_mq mq;
930 size_t s;
931 uint16_t queues;
932
933 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
934 if (s != sizeof(mq)) {
935 return VIRTIO_NET_ERR;
936 }
937
938 if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
939 return VIRTIO_NET_ERR;
940 }
941
942 queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
943
944 if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
945 queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
946 queues > n->max_queues ||
947 !n->multiqueue) {
948 return VIRTIO_NET_ERR;
949 }
950
951 n->curr_queues = queues;
952 /* stop the backend before changing the number of queues to avoid handling a
953 * disabled queue */
954 virtio_net_set_status(vdev, vdev->status);
955 virtio_net_set_queues(n);
956
957 return VIRTIO_NET_OK;
958 }
959
960 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
961 {
962 VirtIONet *n = VIRTIO_NET(vdev);
963 struct virtio_net_ctrl_hdr ctrl;
964 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
965 VirtQueueElement *elem;
966 size_t s;
967 struct iovec *iov, *iov2;
968 unsigned int iov_cnt;
969
970 for (;;) {
971 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
972 if (!elem) {
973 break;
974 }
975 if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) ||
976 iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) {
977 virtio_error(vdev, "virtio-net ctrl missing headers");
978 virtqueue_detach_element(vq, elem, 0);
979 g_free(elem);
980 break;
981 }
982
983 iov_cnt = elem->out_num;
984 iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num);
985 s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
986 iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
987 if (s != sizeof(ctrl)) {
988 status = VIRTIO_NET_ERR;
989 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
990 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt);
991 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
992 status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
993 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
994 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
995 } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
996 status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt);
997 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
998 status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
999 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
1000 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
1001 }
1002
1003 s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status));
1004 assert(s == sizeof(status));
1005
1006 virtqueue_push(vq, elem, sizeof(status));
1007 virtio_notify(vdev, vq);
1008 g_free(iov2);
1009 g_free(elem);
1010 }
1011 }
1012
1013 /* RX */
1014
1015 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
1016 {
1017 VirtIONet *n = VIRTIO_NET(vdev);
1018 int queue_index = vq2q(virtio_get_queue_index(vq));
1019
1020 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
1021 }
1022
1023 static int virtio_net_can_receive(NetClientState *nc)
1024 {
1025 VirtIONet *n = qemu_get_nic_opaque(nc);
1026 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1027 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1028
1029 if (!vdev->vm_running) {
1030 return 0;
1031 }
1032
1033 if (nc->queue_index >= n->curr_queues) {
1034 return 0;
1035 }
1036
1037 if (!virtio_queue_ready(q->rx_vq) ||
1038 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1039 return 0;
1040 }
1041
1042 return 1;
1043 }
1044
1045 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
1046 {
1047 VirtIONet *n = q->n;
1048 if (virtio_queue_empty(q->rx_vq) ||
1049 (n->mergeable_rx_bufs &&
1050 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1051 virtio_queue_set_notification(q->rx_vq, 1);
1052
1053 /* To avoid a race condition where the guest has made some buffers
1054 * available after the above check but before notification was
1055 * enabled, check for available buffers again.
1056 */
1057 if (virtio_queue_empty(q->rx_vq) ||
1058 (n->mergeable_rx_bufs &&
1059 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1060 return 0;
1061 }
1062 }
1063
1064 virtio_queue_set_notification(q->rx_vq, 0);
1065 return 1;
1066 }
1067
1068 static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
1069 {
1070 virtio_tswap16s(vdev, &hdr->hdr_len);
1071 virtio_tswap16s(vdev, &hdr->gso_size);
1072 virtio_tswap16s(vdev, &hdr->csum_start);
1073 virtio_tswap16s(vdev, &hdr->csum_offset);
1074 }
1075
1076 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
1077 * it never finds out that the packets don't have valid checksums. This
1078 * causes dhclient to get upset. Fedora's carried a patch for ages to
1079 * fix this with Xen but it hasn't appeared in an upstream release of
1080 * dhclient yet.
1081 *
1082 * To avoid breaking existing guests, we catch udp packets and add
1083 * checksums. This is terrible but it's better than hacking the guest
1084 * kernels.
1085 *
1086 * N.B. if we introduce a zero-copy API, this operation is no longer free so
1087 * we should provide a mechanism to disable it to avoid polluting the host
1088 * cache.
1089 */
1090 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
1091 uint8_t *buf, size_t size)
1092 {
1093 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
1094 (size > 27 && size < 1500) && /* normal sized MTU */
1095 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
1096 (buf[23] == 17) && /* ip.protocol == UDP */
1097 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
1098 net_checksum_calculate(buf, size);
1099 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
1100 }
1101 }
1102
1103 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
1104 const void *buf, size_t size)
1105 {
1106 if (n->has_vnet_hdr) {
1107 /* FIXME this cast is evil */
1108 void *wbuf = (void *)buf;
1109 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
1110 size - n->host_hdr_len);
1111
1112 if (n->needs_vnet_hdr_swap) {
1113 virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
1114 }
1115 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
1116 } else {
1117 struct virtio_net_hdr hdr = {
1118 .flags = 0,
1119 .gso_type = VIRTIO_NET_HDR_GSO_NONE
1120 };
1121 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
1122 }
1123 }
1124
1125 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
1126 {
1127 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1128 static const uint8_t vlan[] = {0x81, 0x00};
1129 uint8_t *ptr = (uint8_t *)buf;
1130 int i;
1131
1132 if (n->promisc)
1133 return 1;
1134
1135 ptr += n->host_hdr_len;
1136
1137 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
1138 int vid = lduw_be_p(ptr + 14) & 0xfff;
1139 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
1140 return 0;
1141 }
1142
1143 if (ptr[0] & 1) { // multicast
1144 if (!memcmp(ptr, bcast, sizeof(bcast))) {
1145 return !n->nobcast;
1146 } else if (n->nomulti) {
1147 return 0;
1148 } else if (n->allmulti || n->mac_table.multi_overflow) {
1149 return 1;
1150 }
1151
1152 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
1153 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1154 return 1;
1155 }
1156 }
1157 } else { // unicast
1158 if (n->nouni) {
1159 return 0;
1160 } else if (n->alluni || n->mac_table.uni_overflow) {
1161 return 1;
1162 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
1163 return 1;
1164 }
1165
1166 for (i = 0; i < n->mac_table.first_multi; i++) {
1167 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1168 return 1;
1169 }
1170 }
1171 }
1172
1173 return 0;
1174 }
1175
1176 static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
1177 size_t size)
1178 {
1179 VirtIONet *n = qemu_get_nic_opaque(nc);
1180 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1181 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1182 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
1183 struct virtio_net_hdr_mrg_rxbuf mhdr;
1184 unsigned mhdr_cnt = 0;
1185 size_t offset, i, guest_offset;
1186
1187 if (!virtio_net_can_receive(nc)) {
1188 return -1;
1189 }
1190
1191 /* hdr_len refers to the header we supply to the guest */
1192 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
1193 return 0;
1194 }
1195
1196 if (!receive_filter(n, buf, size))
1197 return size;
1198
1199 offset = i = 0;
1200
1201 while (offset < size) {
1202 VirtQueueElement *elem;
1203 int len, total;
1204 const struct iovec *sg;
1205
1206 total = 0;
1207
1208 elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
1209 if (!elem) {
1210 if (i) {
1211 virtio_error(vdev, "virtio-net unexpected empty queue: "
1212 "i %zd mergeable %d offset %zd, size %zd, "
1213 "guest hdr len %zd, host hdr len %zd "
1214 "guest features 0x%" PRIx64,
1215 i, n->mergeable_rx_bufs, offset, size,
1216 n->guest_hdr_len, n->host_hdr_len,
1217 vdev->guest_features);
1218 }
1219 return -1;
1220 }
1221
1222 if (elem->in_num < 1) {
1223 virtio_error(vdev,
1224 "virtio-net receive queue contains no in buffers");
1225 virtqueue_detach_element(q->rx_vq, elem, 0);
1226 g_free(elem);
1227 return -1;
1228 }
1229
1230 sg = elem->in_sg;
1231 if (i == 0) {
1232 assert(offset == 0);
1233 if (n->mergeable_rx_bufs) {
1234 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
1235 sg, elem->in_num,
1236 offsetof(typeof(mhdr), num_buffers),
1237 sizeof(mhdr.num_buffers));
1238 }
1239
1240 receive_header(n, sg, elem->in_num, buf, size);
1241 offset = n->host_hdr_len;
1242 total += n->guest_hdr_len;
1243 guest_offset = n->guest_hdr_len;
1244 } else {
1245 guest_offset = 0;
1246 }
1247
1248 /* copy in packet. ugh */
1249 len = iov_from_buf(sg, elem->in_num, guest_offset,
1250 buf + offset, size - offset);
1251 total += len;
1252 offset += len;
1253 /* If buffers can't be merged, at this point we
1254 * must have consumed the complete packet.
1255 * Otherwise, drop it. */
1256 if (!n->mergeable_rx_bufs && offset < size) {
1257 virtqueue_unpop(q->rx_vq, elem, total);
1258 g_free(elem);
1259 return size;
1260 }
1261
1262 /* signal other side */
1263 virtqueue_fill(q->rx_vq, elem, total, i++);
1264 g_free(elem);
1265 }
1266
1267 if (mhdr_cnt) {
1268 virtio_stw_p(vdev, &mhdr.num_buffers, i);
1269 iov_from_buf(mhdr_sg, mhdr_cnt,
1270 0,
1271 &mhdr.num_buffers, sizeof mhdr.num_buffers);
1272 }
1273
1274 virtqueue_flush(q->rx_vq, i);
1275 virtio_notify(vdev, q->rx_vq);
1276
1277 return size;
1278 }
1279
1280 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf,
1281 size_t size)
1282 {
1283 ssize_t r;
1284
1285 rcu_read_lock();
1286 r = virtio_net_receive_rcu(nc, buf, size);
1287 rcu_read_unlock();
1288 return r;
1289 }
1290
1291 static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
1292
1293 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
1294 {
1295 VirtIONet *n = qemu_get_nic_opaque(nc);
1296 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1297 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1298
1299 virtqueue_push(q->tx_vq, q->async_tx.elem, 0);
1300 virtio_notify(vdev, q->tx_vq);
1301
1302 g_free(q->async_tx.elem);
1303 q->async_tx.elem = NULL;
1304
1305 virtio_queue_set_notification(q->tx_vq, 1);
1306 virtio_net_flush_tx(q);
1307 }
1308
1309 /* TX */
1310 static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
1311 {
1312 VirtIONet *n = q->n;
1313 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1314 VirtQueueElement *elem;
1315 int32_t num_packets = 0;
1316 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
1317 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1318 return num_packets;
1319 }
1320
1321 if (q->async_tx.elem) {
1322 virtio_queue_set_notification(q->tx_vq, 0);
1323 return num_packets;
1324 }
1325
1326 for (;;) {
1327 ssize_t ret;
1328 unsigned int out_num;
1329 struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg;
1330 struct virtio_net_hdr_mrg_rxbuf mhdr;
1331
1332 elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
1333 if (!elem) {
1334 break;
1335 }
1336
1337 out_num = elem->out_num;
1338 out_sg = elem->out_sg;
1339 if (out_num < 1) {
1340 virtio_error(vdev, "virtio-net header not in first element");
1341 virtqueue_detach_element(q->tx_vq, elem, 0);
1342 g_free(elem);
1343 return -EINVAL;
1344 }
1345
1346 if (n->has_vnet_hdr) {
1347 if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
1348 n->guest_hdr_len) {
1349 virtio_error(vdev, "virtio-net header incorrect");
1350 virtqueue_detach_element(q->tx_vq, elem, 0);
1351 g_free(elem);
1352 return -EINVAL;
1353 }
1354 if (n->needs_vnet_hdr_swap) {
1355 virtio_net_hdr_swap(vdev, (void *) &mhdr);
1356 sg2[0].iov_base = &mhdr;
1357 sg2[0].iov_len = n->guest_hdr_len;
1358 out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1,
1359 out_sg, out_num,
1360 n->guest_hdr_len, -1);
1361 if (out_num == VIRTQUEUE_MAX_SIZE) {
1362 goto drop;
1363 }
1364 out_num += 1;
1365 out_sg = sg2;
1366 }
1367 }
1368 /*
1369 * If host wants to see the guest header as is, we can
1370 * pass it on unchanged. Otherwise, copy just the parts
1371 * that host is interested in.
1372 */
1373 assert(n->host_hdr_len <= n->guest_hdr_len);
1374 if (n->host_hdr_len != n->guest_hdr_len) {
1375 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
1376 out_sg, out_num,
1377 0, n->host_hdr_len);
1378 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
1379 out_sg, out_num,
1380 n->guest_hdr_len, -1);
1381 out_num = sg_num;
1382 out_sg = sg;
1383 }
1384
1385 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
1386 out_sg, out_num, virtio_net_tx_complete);
1387 if (ret == 0) {
1388 virtio_queue_set_notification(q->tx_vq, 0);
1389 q->async_tx.elem = elem;
1390 return -EBUSY;
1391 }
1392
1393 drop:
1394 virtqueue_push(q->tx_vq, elem, 0);
1395 virtio_notify(vdev, q->tx_vq);
1396 g_free(elem);
1397
1398 if (++num_packets >= n->tx_burst) {
1399 break;
1400 }
1401 }
1402 return num_packets;
1403 }
1404
1405 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
1406 {
1407 VirtIONet *n = VIRTIO_NET(vdev);
1408 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
1409
1410 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
1411 virtio_net_drop_tx_queue_data(vdev, vq);
1412 return;
1413 }
1414
1415 /* This happens when device was stopped but VCPU wasn't. */
1416 if (!vdev->vm_running) {
1417 q->tx_waiting = 1;
1418 return;
1419 }
1420
1421 if (q->tx_waiting) {
1422 virtio_queue_set_notification(vq, 1);
1423 timer_del(q->tx_timer);
1424 q->tx_waiting = 0;
1425 if (virtio_net_flush_tx(q) == -EINVAL) {
1426 return;
1427 }
1428 } else {
1429 timer_mod(q->tx_timer,
1430 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
1431 q->tx_waiting = 1;
1432 virtio_queue_set_notification(vq, 0);
1433 }
1434 }
1435
1436 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
1437 {
1438 VirtIONet *n = VIRTIO_NET(vdev);
1439 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
1440
1441 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
1442 virtio_net_drop_tx_queue_data(vdev, vq);
1443 return;
1444 }
1445
1446 if (unlikely(q->tx_waiting)) {
1447 return;
1448 }
1449 q->tx_waiting = 1;
1450 /* This happens when device was stopped but VCPU wasn't. */
1451 if (!vdev->vm_running) {
1452 return;
1453 }
1454 virtio_queue_set_notification(vq, 0);
1455 qemu_bh_schedule(q->tx_bh);
1456 }
1457
1458 static void virtio_net_tx_timer(void *opaque)
1459 {
1460 VirtIONetQueue *q = opaque;
1461 VirtIONet *n = q->n;
1462 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1463 /* This happens when device was stopped but BH wasn't. */
1464 if (!vdev->vm_running) {
1465 /* Make sure tx waiting is set, so we'll run when restarted. */
1466 assert(q->tx_waiting);
1467 return;
1468 }
1469
1470 q->tx_waiting = 0;
1471
1472 /* Just in case the driver is not ready on more */
1473 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1474 return;
1475 }
1476
1477 virtio_queue_set_notification(q->tx_vq, 1);
1478 virtio_net_flush_tx(q);
1479 }
1480
1481 static void virtio_net_tx_bh(void *opaque)
1482 {
1483 VirtIONetQueue *q = opaque;
1484 VirtIONet *n = q->n;
1485 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1486 int32_t ret;
1487
1488 /* This happens when device was stopped but BH wasn't. */
1489 if (!vdev->vm_running) {
1490 /* Make sure tx waiting is set, so we'll run when restarted. */
1491 assert(q->tx_waiting);
1492 return;
1493 }
1494
1495 q->tx_waiting = 0;
1496
1497 /* Just in case the driver is not ready on more */
1498 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
1499 return;
1500 }
1501
1502 ret = virtio_net_flush_tx(q);
1503 if (ret == -EBUSY || ret == -EINVAL) {
1504 return; /* Notification re-enable handled by tx_complete or device
1505 * broken */
1506 }
1507
1508 /* If we flush a full burst of packets, assume there are
1509 * more coming and immediately reschedule */
1510 if (ret >= n->tx_burst) {
1511 qemu_bh_schedule(q->tx_bh);
1512 q->tx_waiting = 1;
1513 return;
1514 }
1515
1516 /* If less than a full burst, re-enable notification and flush
1517 * anything that may have come in while we weren't looking. If
1518 * we find something, assume the guest is still active and reschedule */
1519 virtio_queue_set_notification(q->tx_vq, 1);
1520 ret = virtio_net_flush_tx(q);
1521 if (ret == -EINVAL) {
1522 return;
1523 } else if (ret > 0) {
1524 virtio_queue_set_notification(q->tx_vq, 0);
1525 qemu_bh_schedule(q->tx_bh);
1526 q->tx_waiting = 1;
1527 }
1528 }
1529
1530 static void virtio_net_add_queue(VirtIONet *n, int index)
1531 {
1532 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1533
1534 n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
1535 virtio_net_handle_rx);
1536
1537 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
1538 n->vqs[index].tx_vq =
1539 virtio_add_queue(vdev, n->net_conf.tx_queue_size,
1540 virtio_net_handle_tx_timer);
1541 n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1542 virtio_net_tx_timer,
1543 &n->vqs[index]);
1544 } else {
1545 n->vqs[index].tx_vq =
1546 virtio_add_queue(vdev, n->net_conf.tx_queue_size,
1547 virtio_net_handle_tx_bh);
1548 n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
1549 }
1550
1551 n->vqs[index].tx_waiting = 0;
1552 n->vqs[index].n = n;
1553 }
1554
1555 static void virtio_net_del_queue(VirtIONet *n, int index)
1556 {
1557 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1558 VirtIONetQueue *q = &n->vqs[index];
1559 NetClientState *nc = qemu_get_subqueue(n->nic, index);
1560
1561 qemu_purge_queued_packets(nc);
1562
1563 virtio_del_queue(vdev, index * 2);
1564 if (q->tx_timer) {
1565 timer_del(q->tx_timer);
1566 timer_free(q->tx_timer);
1567 q->tx_timer = NULL;
1568 } else {
1569 qemu_bh_delete(q->tx_bh);
1570 q->tx_bh = NULL;
1571 }
1572 q->tx_waiting = 0;
1573 virtio_del_queue(vdev, index * 2 + 1);
1574 }
1575
1576 static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues)
1577 {
1578 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1579 int old_num_queues = virtio_get_num_queues(vdev);
1580 int new_num_queues = new_max_queues * 2 + 1;
1581 int i;
1582
1583 assert(old_num_queues >= 3);
1584 assert(old_num_queues % 2 == 1);
1585
1586 if (old_num_queues == new_num_queues) {
1587 return;
1588 }
1589
1590 /*
1591 * We always need to remove and add ctrl vq if
1592 * old_num_queues != new_num_queues. Remove ctrl_vq first,
1593 * and then we only enter one of the following too loops.
1594 */
1595 virtio_del_queue(vdev, old_num_queues - 1);
1596
1597 for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) {
1598 /* new_num_queues < old_num_queues */
1599 virtio_net_del_queue(n, i / 2);
1600 }
1601
1602 for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
1603 /* new_num_queues > old_num_queues */
1604 virtio_net_add_queue(n, i / 2);
1605 }
1606
1607 /* add ctrl_vq last */
1608 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
1609 }
1610
1611 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
1612 {
1613 int max = multiqueue ? n->max_queues : 1;
1614
1615 n->multiqueue = multiqueue;
1616 virtio_net_change_num_queues(n, max);
1617
1618 virtio_net_set_queues(n);
1619 }
1620
1621 static int virtio_net_post_load_device(void *opaque, int version_id)
1622 {
1623 VirtIONet *n = opaque;
1624 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1625 int i, link_down;
1626
1627 virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs,
1628 virtio_vdev_has_feature(vdev,
1629 VIRTIO_F_VERSION_1));
1630
1631 /* MAC_TABLE_ENTRIES may be different from the saved image */
1632 if (n->mac_table.in_use > MAC_TABLE_ENTRIES) {
1633 n->mac_table.in_use = 0;
1634 }
1635
1636 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
1637 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
1638 }
1639
1640 if (peer_has_vnet_hdr(n)) {
1641 virtio_net_apply_guest_offloads(n);
1642 }
1643
1644 virtio_net_set_queues(n);
1645
1646 /* Find the first multicast entry in the saved MAC filter */
1647 for (i = 0; i < n->mac_table.in_use; i++) {
1648 if (n->mac_table.macs[i * ETH_ALEN] & 1) {
1649 break;
1650 }
1651 }
1652 n->mac_table.first_multi = i;
1653
1654 /* nc.link_down can't be migrated, so infer link_down according
1655 * to link status bit in n->status */
1656 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
1657 for (i = 0; i < n->max_queues; i++) {
1658 qemu_get_subqueue(n->nic, i)->link_down = link_down;
1659 }
1660
1661 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
1662 virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
1663 n->announce_counter = SELF_ANNOUNCE_ROUNDS;
1664 timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL));
1665 }
1666
1667 return 0;
1668 }
1669
1670 /* tx_waiting field of a VirtIONetQueue */
1671 static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = {
1672 .name = "virtio-net-queue-tx_waiting",
1673 .fields = (VMStateField[]) {
1674 VMSTATE_UINT32(tx_waiting, VirtIONetQueue),
1675 VMSTATE_END_OF_LIST()
1676 },
1677 };
1678
1679 static bool max_queues_gt_1(void *opaque, int version_id)
1680 {
1681 return VIRTIO_NET(opaque)->max_queues > 1;
1682 }
1683
1684 static bool has_ctrl_guest_offloads(void *opaque, int version_id)
1685 {
1686 return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque),
1687 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
1688 }
1689
1690 static bool mac_table_fits(void *opaque, int version_id)
1691 {
1692 return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES;
1693 }
1694
1695 static bool mac_table_doesnt_fit(void *opaque, int version_id)
1696 {
1697 return !mac_table_fits(opaque, version_id);
1698 }
1699
1700 /* This temporary type is shared by all the WITH_TMP methods
1701 * although only some fields are used by each.
1702 */
1703 struct VirtIONetMigTmp {
1704 VirtIONet *parent;
1705 VirtIONetQueue *vqs_1;
1706 uint16_t curr_queues_1;
1707 uint8_t has_ufo;
1708 uint32_t has_vnet_hdr;
1709 };
1710
1711 /* The 2nd and subsequent tx_waiting flags are loaded later than
1712 * the 1st entry in the queues and only if there's more than one
1713 * entry. We use the tmp mechanism to calculate a temporary
1714 * pointer and count and also validate the count.
1715 */
1716
1717 static int virtio_net_tx_waiting_pre_save(void *opaque)
1718 {
1719 struct VirtIONetMigTmp *tmp = opaque;
1720
1721 tmp->vqs_1 = tmp->parent->vqs + 1;
1722 tmp->curr_queues_1 = tmp->parent->curr_queues - 1;
1723 if (tmp->parent->curr_queues == 0) {
1724 tmp->curr_queues_1 = 0;
1725 }
1726
1727 return 0;
1728 }
1729
1730 static int virtio_net_tx_waiting_pre_load(void *opaque)
1731 {
1732 struct VirtIONetMigTmp *tmp = opaque;
1733
1734 /* Reuse the pointer setup from save */
1735 virtio_net_tx_waiting_pre_save(opaque);
1736
1737 if (tmp->parent->curr_queues > tmp->parent->max_queues) {
1738 error_report("virtio-net: curr_queues %x > max_queues %x",
1739 tmp->parent->curr_queues, tmp->parent->max_queues);
1740
1741 return -EINVAL;
1742 }
1743
1744 return 0; /* all good */
1745 }
1746
1747 static const VMStateDescription vmstate_virtio_net_tx_waiting = {
1748 .name = "virtio-net-tx_waiting",
1749 .pre_load = virtio_net_tx_waiting_pre_load,
1750 .pre_save = virtio_net_tx_waiting_pre_save,
1751 .fields = (VMStateField[]) {
1752 VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp,
1753 curr_queues_1,
1754 vmstate_virtio_net_queue_tx_waiting,
1755 struct VirtIONetQueue),
1756 VMSTATE_END_OF_LIST()
1757 },
1758 };
1759
1760 /* the 'has_ufo' flag is just tested; if the incoming stream has the
1761 * flag set we need to check that we have it
1762 */
1763 static int virtio_net_ufo_post_load(void *opaque, int version_id)
1764 {
1765 struct VirtIONetMigTmp *tmp = opaque;
1766
1767 if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) {
1768 error_report("virtio-net: saved image requires TUN_F_UFO support");
1769 return -EINVAL;
1770 }
1771
1772 return 0;
1773 }
1774
1775 static int virtio_net_ufo_pre_save(void *opaque)
1776 {
1777 struct VirtIONetMigTmp *tmp = opaque;
1778
1779 tmp->has_ufo = tmp->parent->has_ufo;
1780
1781 return 0;
1782 }
1783
1784 static const VMStateDescription vmstate_virtio_net_has_ufo = {
1785 .name = "virtio-net-ufo",
1786 .post_load = virtio_net_ufo_post_load,
1787 .pre_save = virtio_net_ufo_pre_save,
1788 .fields = (VMStateField[]) {
1789 VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp),
1790 VMSTATE_END_OF_LIST()
1791 },
1792 };
1793
1794 /* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the
1795 * flag set we need to check that we have it
1796 */
1797 static int virtio_net_vnet_post_load(void *opaque, int version_id)
1798 {
1799 struct VirtIONetMigTmp *tmp = opaque;
1800
1801 if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) {
1802 error_report("virtio-net: saved image requires vnet_hdr=on");
1803 return -EINVAL;
1804 }
1805
1806 return 0;
1807 }
1808
1809 static int virtio_net_vnet_pre_save(void *opaque)
1810 {
1811 struct VirtIONetMigTmp *tmp = opaque;
1812
1813 tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr;
1814
1815 return 0;
1816 }
1817
1818 static const VMStateDescription vmstate_virtio_net_has_vnet = {
1819 .name = "virtio-net-vnet",
1820 .post_load = virtio_net_vnet_post_load,
1821 .pre_save = virtio_net_vnet_pre_save,
1822 .fields = (VMStateField[]) {
1823 VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp),
1824 VMSTATE_END_OF_LIST()
1825 },
1826 };
1827
1828 static const VMStateDescription vmstate_virtio_net_device = {
1829 .name = "virtio-net-device",
1830 .version_id = VIRTIO_NET_VM_VERSION,
1831 .minimum_version_id = VIRTIO_NET_VM_VERSION,
1832 .post_load = virtio_net_post_load_device,
1833 .fields = (VMStateField[]) {
1834 VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN),
1835 VMSTATE_STRUCT_POINTER(vqs, VirtIONet,
1836 vmstate_virtio_net_queue_tx_waiting,
1837 VirtIONetQueue),
1838 VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet),
1839 VMSTATE_UINT16(status, VirtIONet),
1840 VMSTATE_UINT8(promisc, VirtIONet),
1841 VMSTATE_UINT8(allmulti, VirtIONet),
1842 VMSTATE_UINT32(mac_table.in_use, VirtIONet),
1843
1844 /* Guarded pair: If it fits we load it, else we throw it away
1845 * - can happen if source has a larger MAC table.; post-load
1846 * sets flags in this case.
1847 */
1848 VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet,
1849 0, mac_table_fits, mac_table.in_use,
1850 ETH_ALEN),
1851 VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0,
1852 mac_table.in_use, ETH_ALEN),
1853
1854 /* Note: This is an array of uint32's that's always been saved as a
1855 * buffer; hold onto your endiannesses; it's actually used as a bitmap
1856 * but based on the uint.
1857 */
1858 VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3),
1859 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
1860 vmstate_virtio_net_has_vnet),
1861 VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet),
1862 VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet),
1863 VMSTATE_UINT8(alluni, VirtIONet),
1864 VMSTATE_UINT8(nomulti, VirtIONet),
1865 VMSTATE_UINT8(nouni, VirtIONet),
1866 VMSTATE_UINT8(nobcast, VirtIONet),
1867 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
1868 vmstate_virtio_net_has_ufo),
1869 VMSTATE_SINGLE_TEST(max_queues, VirtIONet, max_queues_gt_1, 0,
1870 vmstate_info_uint16_equal, uint16_t),
1871 VMSTATE_UINT16_TEST(curr_queues, VirtIONet, max_queues_gt_1),
1872 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
1873 vmstate_virtio_net_tx_waiting),
1874 VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet,
1875 has_ctrl_guest_offloads),
1876 VMSTATE_END_OF_LIST()
1877 },
1878 };
1879
1880 static NetClientInfo net_virtio_info = {
1881 .type = NET_CLIENT_DRIVER_NIC,
1882 .size = sizeof(NICState),
1883 .can_receive = virtio_net_can_receive,
1884 .receive = virtio_net_receive,
1885 .link_status_changed = virtio_net_set_link_status,
1886 .query_rx_filter = virtio_net_query_rxfilter,
1887 };
1888
1889 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
1890 {
1891 VirtIONet *n = VIRTIO_NET(vdev);
1892 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1893 assert(n->vhost_started);
1894 return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
1895 }
1896
1897 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
1898 bool mask)
1899 {
1900 VirtIONet *n = VIRTIO_NET(vdev);
1901 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1902 assert(n->vhost_started);
1903 vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
1904 vdev, idx, mask);
1905 }
1906
1907 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
1908 {
1909 int i, config_size = 0;
1910 virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
1911
1912 for (i = 0; feature_sizes[i].flags != 0; i++) {
1913 if (host_features & feature_sizes[i].flags) {
1914 config_size = MAX(feature_sizes[i].end, config_size);
1915 }
1916 }
1917 n->config_size = config_size;
1918 }
1919
1920 void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
1921 const char *type)
1922 {
1923 /*
1924 * The name can be NULL, the netclient name will be type.x.
1925 */
1926 assert(type != NULL);
1927
1928 g_free(n->netclient_name);
1929 g_free(n->netclient_type);
1930 n->netclient_name = g_strdup(name);
1931 n->netclient_type = g_strdup(type);
1932 }
1933
1934 static void virtio_net_device_realize(DeviceState *dev, Error **errp)
1935 {
1936 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1937 VirtIONet *n = VIRTIO_NET(dev);
1938 NetClientState *nc;
1939 int i;
1940
1941 if (n->net_conf.mtu) {
1942 n->host_features |= (0x1 << VIRTIO_NET_F_MTU);
1943 }
1944
1945 virtio_net_set_config_size(n, n->host_features);
1946 virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
1947
1948 /*
1949 * We set a lower limit on RX queue size to what it always was.
1950 * Guests that want a smaller ring can always resize it without
1951 * help from us (using virtio 1 and up).
1952 */
1953 if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE ||
1954 n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE ||
1955 !is_power_of_2(n->net_conf.rx_queue_size)) {
1956 error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), "
1957 "must be a power of 2 between %d and %d.",
1958 n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE,
1959 VIRTQUEUE_MAX_SIZE);
1960 virtio_cleanup(vdev);
1961 return;
1962 }
1963
1964 if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
1965 n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
1966 !is_power_of_2(n->net_conf.tx_queue_size)) {
1967 error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
1968 "must be a power of 2 between %d and %d",
1969 n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
1970 VIRTQUEUE_MAX_SIZE);
1971 virtio_cleanup(vdev);
1972 return;
1973 }
1974
1975 n->max_queues = MAX(n->nic_conf.peers.queues, 1);
1976 if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
1977 error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
1978 "must be a positive integer less than %d.",
1979 n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2);
1980 virtio_cleanup(vdev);
1981 return;
1982 }
1983 n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
1984 n->curr_queues = 1;
1985 n->tx_timeout = n->net_conf.txtimer;
1986
1987 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
1988 && strcmp(n->net_conf.tx, "bh")) {
1989 error_report("virtio-net: "
1990 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1991 n->net_conf.tx);
1992 error_report("Defaulting to \"bh\"");
1993 }
1994
1995 n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n),
1996 n->net_conf.tx_queue_size);
1997
1998 for (i = 0; i < n->max_queues; i++) {
1999 virtio_net_add_queue(n, i);
2000 }
2001
2002 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
2003 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
2004 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
2005 n->status = VIRTIO_NET_S_LINK_UP;
2006 n->announce_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
2007 virtio_net_announce_timer, n);
2008
2009 if (n->netclient_type) {
2010 /*
2011 * Happen when virtio_net_set_netclient_name has been called.
2012 */
2013 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
2014 n->netclient_type, n->netclient_name, n);
2015 } else {
2016 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
2017 object_get_typename(OBJECT(dev)), dev->id, n);
2018 }
2019
2020 peer_test_vnet_hdr(n);
2021 if (peer_has_vnet_hdr(n)) {
2022 for (i = 0; i < n->max_queues; i++) {
2023 qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
2024 }
2025 n->host_hdr_len = sizeof(struct virtio_net_hdr);
2026 } else {
2027 n->host_hdr_len = 0;
2028 }
2029
2030 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a);
2031
2032 n->vqs[0].tx_waiting = 0;
2033 n->tx_burst = n->net_conf.txburst;
2034 virtio_net_set_mrg_rx_bufs(n, 0, 0);
2035 n->promisc = 1; /* for compatibility */
2036
2037 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
2038
2039 n->vlans = g_malloc0(MAX_VLAN >> 3);
2040
2041 nc = qemu_get_queue(n->nic);
2042 nc->rxfilter_notify_enabled = 1;
2043
2044 n->qdev = dev;
2045 }
2046
2047 static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
2048 {
2049 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2050 VirtIONet *n = VIRTIO_NET(dev);
2051 int i, max_queues;
2052
2053 /* This will stop vhost backend if appropriate. */
2054 virtio_net_set_status(vdev, 0);
2055
2056 g_free(n->netclient_name);
2057 n->netclient_name = NULL;
2058 g_free(n->netclient_type);
2059 n->netclient_type = NULL;
2060
2061 g_free(n->mac_table.macs);
2062 g_free(n->vlans);
2063
2064 max_queues = n->multiqueue ? n->max_queues : 1;
2065 for (i = 0; i < max_queues; i++) {
2066 virtio_net_del_queue(n, i);
2067 }
2068
2069 timer_del(n->announce_timer);
2070 timer_free(n->announce_timer);
2071 g_free(n->vqs);
2072 qemu_del_nic(n->nic);
2073 virtio_cleanup(vdev);
2074 }
2075
2076 static void virtio_net_instance_init(Object *obj)
2077 {
2078 VirtIONet *n = VIRTIO_NET(obj);
2079
2080 /*
2081 * The default config_size is sizeof(struct virtio_net_config).
2082 * Can be overriden with virtio_net_set_config_size.
2083 */
2084 n->config_size = sizeof(struct virtio_net_config);
2085 device_add_bootindex_property(obj, &n->nic_conf.bootindex,
2086 "bootindex", "/ethernet-phy@0",
2087 DEVICE(n), NULL);
2088 }
2089
2090 static int virtio_net_pre_save(void *opaque)
2091 {
2092 VirtIONet *n = opaque;
2093
2094 /* At this point, backend must be stopped, otherwise
2095 * it might keep writing to memory. */
2096 assert(!n->vhost_started);
2097
2098 return 0;
2099 }
2100
2101 static const VMStateDescription vmstate_virtio_net = {
2102 .name = "virtio-net",
2103 .minimum_version_id = VIRTIO_NET_VM_VERSION,
2104 .version_id = VIRTIO_NET_VM_VERSION,
2105 .fields = (VMStateField[]) {
2106 VMSTATE_VIRTIO_DEVICE,
2107 VMSTATE_END_OF_LIST()
2108 },
2109 .pre_save = virtio_net_pre_save,
2110 };
2111
2112 static Property virtio_net_properties[] = {
2113 DEFINE_PROP_BIT("csum", VirtIONet, host_features, VIRTIO_NET_F_CSUM, true),
2114 DEFINE_PROP_BIT("guest_csum", VirtIONet, host_features,
2115 VIRTIO_NET_F_GUEST_CSUM, true),
2116 DEFINE_PROP_BIT("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true),
2117 DEFINE_PROP_BIT("guest_tso4", VirtIONet, host_features,
2118 VIRTIO_NET_F_GUEST_TSO4, true),
2119 DEFINE_PROP_BIT("guest_tso6", VirtIONet, host_features,
2120 VIRTIO_NET_F_GUEST_TSO6, true),
2121 DEFINE_PROP_BIT("guest_ecn", VirtIONet, host_features,
2122 VIRTIO_NET_F_GUEST_ECN, true),
2123 DEFINE_PROP_BIT("guest_ufo", VirtIONet, host_features,
2124 VIRTIO_NET_F_GUEST_UFO, true),
2125 DEFINE_PROP_BIT("guest_announce", VirtIONet, host_features,
2126 VIRTIO_NET_F_GUEST_ANNOUNCE, true),
2127 DEFINE_PROP_BIT("host_tso4", VirtIONet, host_features,
2128 VIRTIO_NET_F_HOST_TSO4, true),
2129 DEFINE_PROP_BIT("host_tso6", VirtIONet, host_features,
2130 VIRTIO_NET_F_HOST_TSO6, true),
2131 DEFINE_PROP_BIT("host_ecn", VirtIONet, host_features,
2132 VIRTIO_NET_F_HOST_ECN, true),
2133 DEFINE_PROP_BIT("host_ufo", VirtIONet, host_features,
2134 VIRTIO_NET_F_HOST_UFO, true),
2135 DEFINE_PROP_BIT("mrg_rxbuf", VirtIONet, host_features,
2136 VIRTIO_NET_F_MRG_RXBUF, true),
2137 DEFINE_PROP_BIT("status", VirtIONet, host_features,
2138 VIRTIO_NET_F_STATUS, true),
2139 DEFINE_PROP_BIT("ctrl_vq", VirtIONet, host_features,
2140 VIRTIO_NET_F_CTRL_VQ, true),
2141 DEFINE_PROP_BIT("ctrl_rx", VirtIONet, host_features,
2142 VIRTIO_NET_F_CTRL_RX, true),
2143 DEFINE_PROP_BIT("ctrl_vlan", VirtIONet, host_features,
2144 VIRTIO_NET_F_CTRL_VLAN, true),
2145 DEFINE_PROP_BIT("ctrl_rx_extra", VirtIONet, host_features,
2146 VIRTIO_NET_F_CTRL_RX_EXTRA, true),
2147 DEFINE_PROP_BIT("ctrl_mac_addr", VirtIONet, host_features,
2148 VIRTIO_NET_F_CTRL_MAC_ADDR, true),
2149 DEFINE_PROP_BIT("ctrl_guest_offloads", VirtIONet, host_features,
2150 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true),
2151 DEFINE_PROP_BIT("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false),
2152 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
2153 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
2154 TX_TIMER_INTERVAL),
2155 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
2156 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
2157 DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
2158 VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
2159 DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
2160 VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
2161 DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
2162 DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend,
2163 true),
2164 DEFINE_PROP_END_OF_LIST(),
2165 };
2166
2167 static void virtio_net_class_init(ObjectClass *klass, void *data)
2168 {
2169 DeviceClass *dc = DEVICE_CLASS(klass);
2170 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
2171
2172 dc->props = virtio_net_properties;
2173 dc->vmsd = &vmstate_virtio_net;
2174 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
2175 vdc->realize = virtio_net_device_realize;
2176 vdc->unrealize = virtio_net_device_unrealize;
2177 vdc->get_config = virtio_net_get_config;
2178 vdc->set_config = virtio_net_set_config;
2179 vdc->get_features = virtio_net_get_features;
2180 vdc->set_features = virtio_net_set_features;
2181 vdc->bad_features = virtio_net_bad_features;
2182 vdc->reset = virtio_net_reset;
2183 vdc->set_status = virtio_net_set_status;
2184 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
2185 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
2186 vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO);
2187 vdc->vmsd = &vmstate_virtio_net_device;
2188 }
2189
2190 static const TypeInfo virtio_net_info = {
2191 .name = TYPE_VIRTIO_NET,
2192 .parent = TYPE_VIRTIO_DEVICE,
2193 .instance_size = sizeof(VirtIONet),
2194 .instance_init = virtio_net_instance_init,
2195 .class_init = virtio_net_class_init,
2196 };
2197
2198 static void virtio_register_types(void)
2199 {
2200 type_register_static(&virtio_net_info);
2201 }
2202
2203 type_init(virtio_register_types)