]> git.proxmox.com Git - mirror_qemu.git/blob - hw/net/virtio-net.c
virtio: use VRingMemoryRegionCaches for avail and used rings
[mirror_qemu.git] / hw / net / virtio-net.c
1 /*
2 * Virtio Network Device
3 *
4 * Copyright IBM, Corp. 2007
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #include "qemu/osdep.h"
15 #include "qemu/iov.h"
16 #include "hw/virtio/virtio.h"
17 #include "net/net.h"
18 #include "net/checksum.h"
19 #include "net/tap.h"
20 #include "qemu/error-report.h"
21 #include "qemu/timer.h"
22 #include "hw/virtio/virtio-net.h"
23 #include "net/vhost_net.h"
24 #include "hw/virtio/virtio-bus.h"
25 #include "qapi/qmp/qjson.h"
26 #include "qapi-event.h"
27 #include "hw/virtio/virtio-access.h"
28
29 #define VIRTIO_NET_VM_VERSION 11
30
31 #define MAC_TABLE_ENTRIES 64
32 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
33
34 /* previously fixed value */
35 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
36 /* for now, only allow larger queues; with virtio-1, guest can downsize */
37 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
38
39 /*
40 * Calculate the number of bytes up to and including the given 'field' of
41 * 'container'.
42 */
43 #define endof(container, field) \
44 (offsetof(container, field) + sizeof(((container *)0)->field))
45
46 typedef struct VirtIOFeature {
47 uint32_t flags;
48 size_t end;
49 } VirtIOFeature;
50
51 static VirtIOFeature feature_sizes[] = {
52 {.flags = 1 << VIRTIO_NET_F_MAC,
53 .end = endof(struct virtio_net_config, mac)},
54 {.flags = 1 << VIRTIO_NET_F_STATUS,
55 .end = endof(struct virtio_net_config, status)},
56 {.flags = 1 << VIRTIO_NET_F_MQ,
57 .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
58 {.flags = 1 << VIRTIO_NET_F_MTU,
59 .end = endof(struct virtio_net_config, mtu)},
60 {}
61 };
62
63 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
64 {
65 VirtIONet *n = qemu_get_nic_opaque(nc);
66
67 return &n->vqs[nc->queue_index];
68 }
69
70 static int vq2q(int queue_index)
71 {
72 return queue_index / 2;
73 }
74
75 /* TODO
76 * - we could suppress RX interrupt if we were so inclined.
77 */
78
79 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
80 {
81 VirtIONet *n = VIRTIO_NET(vdev);
82 struct virtio_net_config netcfg;
83
84 virtio_stw_p(vdev, &netcfg.status, n->status);
85 virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
86 virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
87 memcpy(netcfg.mac, n->mac, ETH_ALEN);
88 memcpy(config, &netcfg, n->config_size);
89 }
90
91 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
92 {
93 VirtIONet *n = VIRTIO_NET(vdev);
94 struct virtio_net_config netcfg = {};
95
96 memcpy(&netcfg, config, n->config_size);
97
98 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
99 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
100 memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
101 memcpy(n->mac, netcfg.mac, ETH_ALEN);
102 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
103 }
104 }
105
106 static bool virtio_net_started(VirtIONet *n, uint8_t status)
107 {
108 VirtIODevice *vdev = VIRTIO_DEVICE(n);
109 return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
110 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
111 }
112
113 static void virtio_net_announce_timer(void *opaque)
114 {
115 VirtIONet *n = opaque;
116 VirtIODevice *vdev = VIRTIO_DEVICE(n);
117
118 n->announce_counter--;
119 n->status |= VIRTIO_NET_S_ANNOUNCE;
120 virtio_notify_config(vdev);
121 }
122
123 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
124 {
125 VirtIODevice *vdev = VIRTIO_DEVICE(n);
126 NetClientState *nc = qemu_get_queue(n->nic);
127 int queues = n->multiqueue ? n->max_queues : 1;
128
129 if (!get_vhost_net(nc->peer)) {
130 return;
131 }
132
133 if ((virtio_net_started(n, status) && !nc->peer->link_down) ==
134 !!n->vhost_started) {
135 return;
136 }
137 if (!n->vhost_started) {
138 int r, i;
139
140 if (n->needs_vnet_hdr_swap) {
141 error_report("backend does not support %s vnet headers; "
142 "falling back on userspace virtio",
143 virtio_is_big_endian(vdev) ? "BE" : "LE");
144 return;
145 }
146
147 /* Any packets outstanding? Purge them to avoid touching rings
148 * when vhost is running.
149 */
150 for (i = 0; i < queues; i++) {
151 NetClientState *qnc = qemu_get_subqueue(n->nic, i);
152
153 /* Purge both directions: TX and RX. */
154 qemu_net_queue_purge(qnc->peer->incoming_queue, qnc);
155 qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
156 }
157
158 if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) {
159 r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu);
160 if (r < 0) {
161 error_report("%uBytes MTU not supported by the backend",
162 n->net_conf.mtu);
163
164 return;
165 }
166 }
167
168 n->vhost_started = 1;
169 r = vhost_net_start(vdev, n->nic->ncs, queues);
170 if (r < 0) {
171 error_report("unable to start vhost net: %d: "
172 "falling back on userspace virtio", -r);
173 n->vhost_started = 0;
174 }
175 } else {
176 vhost_net_stop(vdev, n->nic->ncs, queues);
177 n->vhost_started = 0;
178 }
179 }
180
181 static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev,
182 NetClientState *peer,
183 bool enable)
184 {
185 if (virtio_is_big_endian(vdev)) {
186 return qemu_set_vnet_be(peer, enable);
187 } else {
188 return qemu_set_vnet_le(peer, enable);
189 }
190 }
191
192 static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs,
193 int queues, bool enable)
194 {
195 int i;
196
197 for (i = 0; i < queues; i++) {
198 if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 &&
199 enable) {
200 while (--i >= 0) {
201 virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false);
202 }
203
204 return true;
205 }
206 }
207
208 return false;
209 }
210
211 static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
212 {
213 VirtIODevice *vdev = VIRTIO_DEVICE(n);
214 int queues = n->multiqueue ? n->max_queues : 1;
215
216 if (virtio_net_started(n, status)) {
217 /* Before using the device, we tell the network backend about the
218 * endianness to use when parsing vnet headers. If the backend
219 * can't do it, we fallback onto fixing the headers in the core
220 * virtio-net code.
221 */
222 n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs,
223 queues, true);
224 } else if (virtio_net_started(n, vdev->status)) {
225 /* After using the device, we need to reset the network backend to
226 * the default (guest native endianness), otherwise the guest may
227 * lose network connectivity if it is rebooted into a different
228 * endianness.
229 */
230 virtio_net_set_vnet_endian(vdev, n->nic->ncs, queues, false);
231 }
232 }
233
234 static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq)
235 {
236 unsigned int dropped = virtqueue_drop_all(vq);
237 if (dropped) {
238 virtio_notify(vdev, vq);
239 }
240 }
241
242 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
243 {
244 VirtIONet *n = VIRTIO_NET(vdev);
245 VirtIONetQueue *q;
246 int i;
247 uint8_t queue_status;
248
249 virtio_net_vnet_endian_status(n, status);
250 virtio_net_vhost_status(n, status);
251
252 for (i = 0; i < n->max_queues; i++) {
253 NetClientState *ncs = qemu_get_subqueue(n->nic, i);
254 bool queue_started;
255 q = &n->vqs[i];
256
257 if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
258 queue_status = 0;
259 } else {
260 queue_status = status;
261 }
262 queue_started =
263 virtio_net_started(n, queue_status) && !n->vhost_started;
264
265 if (queue_started) {
266 qemu_flush_queued_packets(ncs);
267 }
268
269 if (!q->tx_waiting) {
270 continue;
271 }
272
273 if (queue_started) {
274 if (q->tx_timer) {
275 timer_mod(q->tx_timer,
276 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
277 } else {
278 qemu_bh_schedule(q->tx_bh);
279 }
280 } else {
281 if (q->tx_timer) {
282 timer_del(q->tx_timer);
283 } else {
284 qemu_bh_cancel(q->tx_bh);
285 }
286 if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 &&
287 (queue_status & VIRTIO_CONFIG_S_DRIVER_OK)) {
288 /* if tx is waiting we are likely have some packets in tx queue
289 * and disabled notification */
290 q->tx_waiting = 0;
291 virtio_queue_set_notification(q->tx_vq, 1);
292 virtio_net_drop_tx_queue_data(vdev, q->tx_vq);
293 }
294 }
295 }
296 }
297
298 static void virtio_net_set_link_status(NetClientState *nc)
299 {
300 VirtIONet *n = qemu_get_nic_opaque(nc);
301 VirtIODevice *vdev = VIRTIO_DEVICE(n);
302 uint16_t old_status = n->status;
303
304 if (nc->link_down)
305 n->status &= ~VIRTIO_NET_S_LINK_UP;
306 else
307 n->status |= VIRTIO_NET_S_LINK_UP;
308
309 if (n->status != old_status)
310 virtio_notify_config(vdev);
311
312 virtio_net_set_status(vdev, vdev->status);
313 }
314
315 static void rxfilter_notify(NetClientState *nc)
316 {
317 VirtIONet *n = qemu_get_nic_opaque(nc);
318
319 if (nc->rxfilter_notify_enabled) {
320 gchar *path = object_get_canonical_path(OBJECT(n->qdev));
321 qapi_event_send_nic_rx_filter_changed(!!n->netclient_name,
322 n->netclient_name, path, &error_abort);
323 g_free(path);
324
325 /* disable event notification to avoid events flooding */
326 nc->rxfilter_notify_enabled = 0;
327 }
328 }
329
330 static intList *get_vlan_table(VirtIONet *n)
331 {
332 intList *list, *entry;
333 int i, j;
334
335 list = NULL;
336 for (i = 0; i < MAX_VLAN >> 5; i++) {
337 for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
338 if (n->vlans[i] & (1U << j)) {
339 entry = g_malloc0(sizeof(*entry));
340 entry->value = (i << 5) + j;
341 entry->next = list;
342 list = entry;
343 }
344 }
345 }
346
347 return list;
348 }
349
350 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
351 {
352 VirtIONet *n = qemu_get_nic_opaque(nc);
353 VirtIODevice *vdev = VIRTIO_DEVICE(n);
354 RxFilterInfo *info;
355 strList *str_list, *entry;
356 int i;
357
358 info = g_malloc0(sizeof(*info));
359 info->name = g_strdup(nc->name);
360 info->promiscuous = n->promisc;
361
362 if (n->nouni) {
363 info->unicast = RX_STATE_NONE;
364 } else if (n->alluni) {
365 info->unicast = RX_STATE_ALL;
366 } else {
367 info->unicast = RX_STATE_NORMAL;
368 }
369
370 if (n->nomulti) {
371 info->multicast = RX_STATE_NONE;
372 } else if (n->allmulti) {
373 info->multicast = RX_STATE_ALL;
374 } else {
375 info->multicast = RX_STATE_NORMAL;
376 }
377
378 info->broadcast_allowed = n->nobcast;
379 info->multicast_overflow = n->mac_table.multi_overflow;
380 info->unicast_overflow = n->mac_table.uni_overflow;
381
382 info->main_mac = qemu_mac_strdup_printf(n->mac);
383
384 str_list = NULL;
385 for (i = 0; i < n->mac_table.first_multi; i++) {
386 entry = g_malloc0(sizeof(*entry));
387 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
388 entry->next = str_list;
389 str_list = entry;
390 }
391 info->unicast_table = str_list;
392
393 str_list = NULL;
394 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
395 entry = g_malloc0(sizeof(*entry));
396 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
397 entry->next = str_list;
398 str_list = entry;
399 }
400 info->multicast_table = str_list;
401 info->vlan_table = get_vlan_table(n);
402
403 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
404 info->vlan = RX_STATE_ALL;
405 } else if (!info->vlan_table) {
406 info->vlan = RX_STATE_NONE;
407 } else {
408 info->vlan = RX_STATE_NORMAL;
409 }
410
411 /* enable event notification after query */
412 nc->rxfilter_notify_enabled = 1;
413
414 return info;
415 }
416
417 static void virtio_net_reset(VirtIODevice *vdev)
418 {
419 VirtIONet *n = VIRTIO_NET(vdev);
420
421 /* Reset back to compatibility mode */
422 n->promisc = 1;
423 n->allmulti = 0;
424 n->alluni = 0;
425 n->nomulti = 0;
426 n->nouni = 0;
427 n->nobcast = 0;
428 /* multiqueue is disabled by default */
429 n->curr_queues = 1;
430 timer_del(n->announce_timer);
431 n->announce_counter = 0;
432 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
433
434 /* Flush any MAC and VLAN filter table state */
435 n->mac_table.in_use = 0;
436 n->mac_table.first_multi = 0;
437 n->mac_table.multi_overflow = 0;
438 n->mac_table.uni_overflow = 0;
439 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
440 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
441 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
442 memset(n->vlans, 0, MAX_VLAN >> 3);
443 }
444
445 static void peer_test_vnet_hdr(VirtIONet *n)
446 {
447 NetClientState *nc = qemu_get_queue(n->nic);
448 if (!nc->peer) {
449 return;
450 }
451
452 n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
453 }
454
455 static int peer_has_vnet_hdr(VirtIONet *n)
456 {
457 return n->has_vnet_hdr;
458 }
459
460 static int peer_has_ufo(VirtIONet *n)
461 {
462 if (!peer_has_vnet_hdr(n))
463 return 0;
464
465 n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
466
467 return n->has_ufo;
468 }
469
470 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
471 int version_1)
472 {
473 int i;
474 NetClientState *nc;
475
476 n->mergeable_rx_bufs = mergeable_rx_bufs;
477
478 if (version_1) {
479 n->guest_hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
480 } else {
481 n->guest_hdr_len = n->mergeable_rx_bufs ?
482 sizeof(struct virtio_net_hdr_mrg_rxbuf) :
483 sizeof(struct virtio_net_hdr);
484 }
485
486 for (i = 0; i < n->max_queues; i++) {
487 nc = qemu_get_subqueue(n->nic, i);
488
489 if (peer_has_vnet_hdr(n) &&
490 qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
491 qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
492 n->host_hdr_len = n->guest_hdr_len;
493 }
494 }
495 }
496
497 static int peer_attach(VirtIONet *n, int index)
498 {
499 NetClientState *nc = qemu_get_subqueue(n->nic, index);
500
501 if (!nc->peer) {
502 return 0;
503 }
504
505 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
506 vhost_set_vring_enable(nc->peer, 1);
507 }
508
509 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
510 return 0;
511 }
512
513 return tap_enable(nc->peer);
514 }
515
516 static int peer_detach(VirtIONet *n, int index)
517 {
518 NetClientState *nc = qemu_get_subqueue(n->nic, index);
519
520 if (!nc->peer) {
521 return 0;
522 }
523
524 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
525 vhost_set_vring_enable(nc->peer, 0);
526 }
527
528 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
529 return 0;
530 }
531
532 return tap_disable(nc->peer);
533 }
534
535 static void virtio_net_set_queues(VirtIONet *n)
536 {
537 int i;
538 int r;
539
540 if (n->nic->peer_deleted) {
541 return;
542 }
543
544 for (i = 0; i < n->max_queues; i++) {
545 if (i < n->curr_queues) {
546 r = peer_attach(n, i);
547 assert(!r);
548 } else {
549 r = peer_detach(n, i);
550 assert(!r);
551 }
552 }
553 }
554
555 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
556
557 static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
558 Error **errp)
559 {
560 VirtIONet *n = VIRTIO_NET(vdev);
561 NetClientState *nc = qemu_get_queue(n->nic);
562
563 /* Firstly sync all virtio-net possible supported features */
564 features |= n->host_features;
565
566 virtio_add_feature(&features, VIRTIO_NET_F_MAC);
567
568 if (!peer_has_vnet_hdr(n)) {
569 virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
570 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
571 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
572 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
573
574 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
575 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
576 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
577 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
578 }
579
580 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
581 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
582 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
583 }
584
585 if (!get_vhost_net(nc->peer)) {
586 return features;
587 }
588 return vhost_net_get_features(get_vhost_net(nc->peer), features);
589 }
590
591 static uint64_t virtio_net_bad_features(VirtIODevice *vdev)
592 {
593 uint64_t features = 0;
594
595 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
596 * but also these: */
597 virtio_add_feature(&features, VIRTIO_NET_F_MAC);
598 virtio_add_feature(&features, VIRTIO_NET_F_CSUM);
599 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4);
600 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6);
601 virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN);
602
603 return features;
604 }
605
606 static void virtio_net_apply_guest_offloads(VirtIONet *n)
607 {
608 qemu_set_offload(qemu_get_queue(n->nic)->peer,
609 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
610 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
611 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
612 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
613 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)));
614 }
615
616 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features)
617 {
618 static const uint64_t guest_offloads_mask =
619 (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
620 (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
621 (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
622 (1ULL << VIRTIO_NET_F_GUEST_ECN) |
623 (1ULL << VIRTIO_NET_F_GUEST_UFO);
624
625 return guest_offloads_mask & features;
626 }
627
628 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
629 {
630 VirtIODevice *vdev = VIRTIO_DEVICE(n);
631 return virtio_net_guest_offloads_by_features(vdev->guest_features);
632 }
633
634 static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
635 {
636 VirtIONet *n = VIRTIO_NET(vdev);
637 int i;
638
639 virtio_net_set_multiqueue(n,
640 virtio_has_feature(features, VIRTIO_NET_F_MQ));
641
642 virtio_net_set_mrg_rx_bufs(n,
643 virtio_has_feature(features,
644 VIRTIO_NET_F_MRG_RXBUF),
645 virtio_has_feature(features,
646 VIRTIO_F_VERSION_1));
647
648 if (n->has_vnet_hdr) {
649 n->curr_guest_offloads =
650 virtio_net_guest_offloads_by_features(features);
651 virtio_net_apply_guest_offloads(n);
652 }
653
654 for (i = 0; i < n->max_queues; i++) {
655 NetClientState *nc = qemu_get_subqueue(n->nic, i);
656
657 if (!get_vhost_net(nc->peer)) {
658 continue;
659 }
660 vhost_net_ack_features(get_vhost_net(nc->peer), features);
661 }
662
663 if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
664 memset(n->vlans, 0, MAX_VLAN >> 3);
665 } else {
666 memset(n->vlans, 0xff, MAX_VLAN >> 3);
667 }
668 }
669
670 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
671 struct iovec *iov, unsigned int iov_cnt)
672 {
673 uint8_t on;
674 size_t s;
675 NetClientState *nc = qemu_get_queue(n->nic);
676
677 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
678 if (s != sizeof(on)) {
679 return VIRTIO_NET_ERR;
680 }
681
682 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
683 n->promisc = on;
684 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
685 n->allmulti = on;
686 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
687 n->alluni = on;
688 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
689 n->nomulti = on;
690 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
691 n->nouni = on;
692 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
693 n->nobcast = on;
694 } else {
695 return VIRTIO_NET_ERR;
696 }
697
698 rxfilter_notify(nc);
699
700 return VIRTIO_NET_OK;
701 }
702
703 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
704 struct iovec *iov, unsigned int iov_cnt)
705 {
706 VirtIODevice *vdev = VIRTIO_DEVICE(n);
707 uint64_t offloads;
708 size_t s;
709
710 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
711 return VIRTIO_NET_ERR;
712 }
713
714 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
715 if (s != sizeof(offloads)) {
716 return VIRTIO_NET_ERR;
717 }
718
719 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
720 uint64_t supported_offloads;
721
722 if (!n->has_vnet_hdr) {
723 return VIRTIO_NET_ERR;
724 }
725
726 supported_offloads = virtio_net_supported_guest_offloads(n);
727 if (offloads & ~supported_offloads) {
728 return VIRTIO_NET_ERR;
729 }
730
731 n->curr_guest_offloads = offloads;
732 virtio_net_apply_guest_offloads(n);
733
734 return VIRTIO_NET_OK;
735 } else {
736 return VIRTIO_NET_ERR;
737 }
738 }
739
740 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
741 struct iovec *iov, unsigned int iov_cnt)
742 {
743 VirtIODevice *vdev = VIRTIO_DEVICE(n);
744 struct virtio_net_ctrl_mac mac_data;
745 size_t s;
746 NetClientState *nc = qemu_get_queue(n->nic);
747
748 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
749 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
750 return VIRTIO_NET_ERR;
751 }
752 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
753 assert(s == sizeof(n->mac));
754 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
755 rxfilter_notify(nc);
756
757 return VIRTIO_NET_OK;
758 }
759
760 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
761 return VIRTIO_NET_ERR;
762 }
763
764 int in_use = 0;
765 int first_multi = 0;
766 uint8_t uni_overflow = 0;
767 uint8_t multi_overflow = 0;
768 uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
769
770 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
771 sizeof(mac_data.entries));
772 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
773 if (s != sizeof(mac_data.entries)) {
774 goto error;
775 }
776 iov_discard_front(&iov, &iov_cnt, s);
777
778 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
779 goto error;
780 }
781
782 if (mac_data.entries <= MAC_TABLE_ENTRIES) {
783 s = iov_to_buf(iov, iov_cnt, 0, macs,
784 mac_data.entries * ETH_ALEN);
785 if (s != mac_data.entries * ETH_ALEN) {
786 goto error;
787 }
788 in_use += mac_data.entries;
789 } else {
790 uni_overflow = 1;
791 }
792
793 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);
794
795 first_multi = in_use;
796
797 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
798 sizeof(mac_data.entries));
799 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
800 if (s != sizeof(mac_data.entries)) {
801 goto error;
802 }
803
804 iov_discard_front(&iov, &iov_cnt, s);
805
806 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
807 goto error;
808 }
809
810 if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
811 s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
812 mac_data.entries * ETH_ALEN);
813 if (s != mac_data.entries * ETH_ALEN) {
814 goto error;
815 }
816 in_use += mac_data.entries;
817 } else {
818 multi_overflow = 1;
819 }
820
821 n->mac_table.in_use = in_use;
822 n->mac_table.first_multi = first_multi;
823 n->mac_table.uni_overflow = uni_overflow;
824 n->mac_table.multi_overflow = multi_overflow;
825 memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN);
826 g_free(macs);
827 rxfilter_notify(nc);
828
829 return VIRTIO_NET_OK;
830
831 error:
832 g_free(macs);
833 return VIRTIO_NET_ERR;
834 }
835
836 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
837 struct iovec *iov, unsigned int iov_cnt)
838 {
839 VirtIODevice *vdev = VIRTIO_DEVICE(n);
840 uint16_t vid;
841 size_t s;
842 NetClientState *nc = qemu_get_queue(n->nic);
843
844 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
845 vid = virtio_lduw_p(vdev, &vid);
846 if (s != sizeof(vid)) {
847 return VIRTIO_NET_ERR;
848 }
849
850 if (vid >= MAX_VLAN)
851 return VIRTIO_NET_ERR;
852
853 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
854 n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
855 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
856 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
857 else
858 return VIRTIO_NET_ERR;
859
860 rxfilter_notify(nc);
861
862 return VIRTIO_NET_OK;
863 }
864
865 static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
866 struct iovec *iov, unsigned int iov_cnt)
867 {
868 if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK &&
869 n->status & VIRTIO_NET_S_ANNOUNCE) {
870 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
871 if (n->announce_counter) {
872 timer_mod(n->announce_timer,
873 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
874 self_announce_delay(n->announce_counter));
875 }
876 return VIRTIO_NET_OK;
877 } else {
878 return VIRTIO_NET_ERR;
879 }
880 }
881
882 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
883 struct iovec *iov, unsigned int iov_cnt)
884 {
885 VirtIODevice *vdev = VIRTIO_DEVICE(n);
886 struct virtio_net_ctrl_mq mq;
887 size_t s;
888 uint16_t queues;
889
890 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
891 if (s != sizeof(mq)) {
892 return VIRTIO_NET_ERR;
893 }
894
895 if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
896 return VIRTIO_NET_ERR;
897 }
898
899 queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
900
901 if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
902 queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
903 queues > n->max_queues ||
904 !n->multiqueue) {
905 return VIRTIO_NET_ERR;
906 }
907
908 n->curr_queues = queues;
909 /* stop the backend before changing the number of queues to avoid handling a
910 * disabled queue */
911 virtio_net_set_status(vdev, vdev->status);
912 virtio_net_set_queues(n);
913
914 return VIRTIO_NET_OK;
915 }
916
917 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
918 {
919 VirtIONet *n = VIRTIO_NET(vdev);
920 struct virtio_net_ctrl_hdr ctrl;
921 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
922 VirtQueueElement *elem;
923 size_t s;
924 struct iovec *iov, *iov2;
925 unsigned int iov_cnt;
926
927 for (;;) {
928 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
929 if (!elem) {
930 break;
931 }
932 if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) ||
933 iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) {
934 virtio_error(vdev, "virtio-net ctrl missing headers");
935 virtqueue_detach_element(vq, elem, 0);
936 g_free(elem);
937 break;
938 }
939
940 iov_cnt = elem->out_num;
941 iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num);
942 s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
943 iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
944 if (s != sizeof(ctrl)) {
945 status = VIRTIO_NET_ERR;
946 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
947 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt);
948 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
949 status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
950 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
951 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
952 } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
953 status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt);
954 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
955 status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
956 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
957 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
958 }
959
960 s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status));
961 assert(s == sizeof(status));
962
963 virtqueue_push(vq, elem, sizeof(status));
964 virtio_notify(vdev, vq);
965 g_free(iov2);
966 g_free(elem);
967 }
968 }
969
970 /* RX */
971
972 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
973 {
974 VirtIONet *n = VIRTIO_NET(vdev);
975 int queue_index = vq2q(virtio_get_queue_index(vq));
976
977 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
978 }
979
980 static int virtio_net_can_receive(NetClientState *nc)
981 {
982 VirtIONet *n = qemu_get_nic_opaque(nc);
983 VirtIODevice *vdev = VIRTIO_DEVICE(n);
984 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
985
986 if (!vdev->vm_running) {
987 return 0;
988 }
989
990 if (nc->queue_index >= n->curr_queues) {
991 return 0;
992 }
993
994 if (!virtio_queue_ready(q->rx_vq) ||
995 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
996 return 0;
997 }
998
999 return 1;
1000 }
1001
1002 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
1003 {
1004 VirtIONet *n = q->n;
1005 if (virtio_queue_empty(q->rx_vq) ||
1006 (n->mergeable_rx_bufs &&
1007 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1008 virtio_queue_set_notification(q->rx_vq, 1);
1009
1010 /* To avoid a race condition where the guest has made some buffers
1011 * available after the above check but before notification was
1012 * enabled, check for available buffers again.
1013 */
1014 if (virtio_queue_empty(q->rx_vq) ||
1015 (n->mergeable_rx_bufs &&
1016 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1017 return 0;
1018 }
1019 }
1020
1021 virtio_queue_set_notification(q->rx_vq, 0);
1022 return 1;
1023 }
1024
1025 static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
1026 {
1027 virtio_tswap16s(vdev, &hdr->hdr_len);
1028 virtio_tswap16s(vdev, &hdr->gso_size);
1029 virtio_tswap16s(vdev, &hdr->csum_start);
1030 virtio_tswap16s(vdev, &hdr->csum_offset);
1031 }
1032
1033 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
1034 * it never finds out that the packets don't have valid checksums. This
1035 * causes dhclient to get upset. Fedora's carried a patch for ages to
1036 * fix this with Xen but it hasn't appeared in an upstream release of
1037 * dhclient yet.
1038 *
1039 * To avoid breaking existing guests, we catch udp packets and add
1040 * checksums. This is terrible but it's better than hacking the guest
1041 * kernels.
1042 *
1043 * N.B. if we introduce a zero-copy API, this operation is no longer free so
1044 * we should provide a mechanism to disable it to avoid polluting the host
1045 * cache.
1046 */
1047 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
1048 uint8_t *buf, size_t size)
1049 {
1050 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
1051 (size > 27 && size < 1500) && /* normal sized MTU */
1052 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
1053 (buf[23] == 17) && /* ip.protocol == UDP */
1054 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
1055 net_checksum_calculate(buf, size);
1056 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
1057 }
1058 }
1059
1060 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
1061 const void *buf, size_t size)
1062 {
1063 if (n->has_vnet_hdr) {
1064 /* FIXME this cast is evil */
1065 void *wbuf = (void *)buf;
1066 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
1067 size - n->host_hdr_len);
1068
1069 if (n->needs_vnet_hdr_swap) {
1070 virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
1071 }
1072 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
1073 } else {
1074 struct virtio_net_hdr hdr = {
1075 .flags = 0,
1076 .gso_type = VIRTIO_NET_HDR_GSO_NONE
1077 };
1078 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
1079 }
1080 }
1081
1082 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
1083 {
1084 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1085 static const uint8_t vlan[] = {0x81, 0x00};
1086 uint8_t *ptr = (uint8_t *)buf;
1087 int i;
1088
1089 if (n->promisc)
1090 return 1;
1091
1092 ptr += n->host_hdr_len;
1093
1094 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
1095 int vid = lduw_be_p(ptr + 14) & 0xfff;
1096 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
1097 return 0;
1098 }
1099
1100 if (ptr[0] & 1) { // multicast
1101 if (!memcmp(ptr, bcast, sizeof(bcast))) {
1102 return !n->nobcast;
1103 } else if (n->nomulti) {
1104 return 0;
1105 } else if (n->allmulti || n->mac_table.multi_overflow) {
1106 return 1;
1107 }
1108
1109 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
1110 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1111 return 1;
1112 }
1113 }
1114 } else { // unicast
1115 if (n->nouni) {
1116 return 0;
1117 } else if (n->alluni || n->mac_table.uni_overflow) {
1118 return 1;
1119 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
1120 return 1;
1121 }
1122
1123 for (i = 0; i < n->mac_table.first_multi; i++) {
1124 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1125 return 1;
1126 }
1127 }
1128 }
1129
1130 return 0;
1131 }
1132
1133 static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
1134 size_t size)
1135 {
1136 VirtIONet *n = qemu_get_nic_opaque(nc);
1137 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1138 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1139 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
1140 struct virtio_net_hdr_mrg_rxbuf mhdr;
1141 unsigned mhdr_cnt = 0;
1142 size_t offset, i, guest_offset;
1143
1144 if (!virtio_net_can_receive(nc)) {
1145 return -1;
1146 }
1147
1148 /* hdr_len refers to the header we supply to the guest */
1149 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
1150 return 0;
1151 }
1152
1153 if (!receive_filter(n, buf, size))
1154 return size;
1155
1156 offset = i = 0;
1157
1158 while (offset < size) {
1159 VirtQueueElement *elem;
1160 int len, total;
1161 const struct iovec *sg;
1162
1163 total = 0;
1164
1165 elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
1166 if (!elem) {
1167 if (i) {
1168 virtio_error(vdev, "virtio-net unexpected empty queue: "
1169 "i %zd mergeable %d offset %zd, size %zd, "
1170 "guest hdr len %zd, host hdr len %zd "
1171 "guest features 0x%" PRIx64,
1172 i, n->mergeable_rx_bufs, offset, size,
1173 n->guest_hdr_len, n->host_hdr_len,
1174 vdev->guest_features);
1175 }
1176 return -1;
1177 }
1178
1179 if (elem->in_num < 1) {
1180 virtio_error(vdev,
1181 "virtio-net receive queue contains no in buffers");
1182 virtqueue_detach_element(q->rx_vq, elem, 0);
1183 g_free(elem);
1184 return -1;
1185 }
1186
1187 sg = elem->in_sg;
1188 if (i == 0) {
1189 assert(offset == 0);
1190 if (n->mergeable_rx_bufs) {
1191 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
1192 sg, elem->in_num,
1193 offsetof(typeof(mhdr), num_buffers),
1194 sizeof(mhdr.num_buffers));
1195 }
1196
1197 receive_header(n, sg, elem->in_num, buf, size);
1198 offset = n->host_hdr_len;
1199 total += n->guest_hdr_len;
1200 guest_offset = n->guest_hdr_len;
1201 } else {
1202 guest_offset = 0;
1203 }
1204
1205 /* copy in packet. ugh */
1206 len = iov_from_buf(sg, elem->in_num, guest_offset,
1207 buf + offset, size - offset);
1208 total += len;
1209 offset += len;
1210 /* If buffers can't be merged, at this point we
1211 * must have consumed the complete packet.
1212 * Otherwise, drop it. */
1213 if (!n->mergeable_rx_bufs && offset < size) {
1214 virtqueue_unpop(q->rx_vq, elem, total);
1215 g_free(elem);
1216 return size;
1217 }
1218
1219 /* signal other side */
1220 virtqueue_fill(q->rx_vq, elem, total, i++);
1221 g_free(elem);
1222 }
1223
1224 if (mhdr_cnt) {
1225 virtio_stw_p(vdev, &mhdr.num_buffers, i);
1226 iov_from_buf(mhdr_sg, mhdr_cnt,
1227 0,
1228 &mhdr.num_buffers, sizeof mhdr.num_buffers);
1229 }
1230
1231 virtqueue_flush(q->rx_vq, i);
1232 virtio_notify(vdev, q->rx_vq);
1233
1234 return size;
1235 }
1236
1237 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf,
1238 size_t size)
1239 {
1240 ssize_t r;
1241
1242 rcu_read_lock();
1243 r = virtio_net_receive_rcu(nc, buf, size);
1244 rcu_read_unlock();
1245 return r;
1246 }
1247
1248 static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
1249
1250 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
1251 {
1252 VirtIONet *n = qemu_get_nic_opaque(nc);
1253 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1254 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1255
1256 virtqueue_push(q->tx_vq, q->async_tx.elem, 0);
1257 virtio_notify(vdev, q->tx_vq);
1258
1259 g_free(q->async_tx.elem);
1260 q->async_tx.elem = NULL;
1261
1262 virtio_queue_set_notification(q->tx_vq, 1);
1263 virtio_net_flush_tx(q);
1264 }
1265
1266 /* TX */
1267 static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
1268 {
1269 VirtIONet *n = q->n;
1270 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1271 VirtQueueElement *elem;
1272 int32_t num_packets = 0;
1273 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
1274 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1275 return num_packets;
1276 }
1277
1278 if (q->async_tx.elem) {
1279 virtio_queue_set_notification(q->tx_vq, 0);
1280 return num_packets;
1281 }
1282
1283 for (;;) {
1284 ssize_t ret;
1285 unsigned int out_num;
1286 struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg;
1287 struct virtio_net_hdr_mrg_rxbuf mhdr;
1288
1289 elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
1290 if (!elem) {
1291 break;
1292 }
1293
1294 out_num = elem->out_num;
1295 out_sg = elem->out_sg;
1296 if (out_num < 1) {
1297 virtio_error(vdev, "virtio-net header not in first element");
1298 virtqueue_detach_element(q->tx_vq, elem, 0);
1299 g_free(elem);
1300 return -EINVAL;
1301 }
1302
1303 if (n->has_vnet_hdr) {
1304 if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
1305 n->guest_hdr_len) {
1306 virtio_error(vdev, "virtio-net header incorrect");
1307 virtqueue_detach_element(q->tx_vq, elem, 0);
1308 g_free(elem);
1309 return -EINVAL;
1310 }
1311 if (n->needs_vnet_hdr_swap) {
1312 virtio_net_hdr_swap(vdev, (void *) &mhdr);
1313 sg2[0].iov_base = &mhdr;
1314 sg2[0].iov_len = n->guest_hdr_len;
1315 out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1,
1316 out_sg, out_num,
1317 n->guest_hdr_len, -1);
1318 if (out_num == VIRTQUEUE_MAX_SIZE) {
1319 goto drop;
1320 }
1321 out_num += 1;
1322 out_sg = sg2;
1323 }
1324 }
1325 /*
1326 * If host wants to see the guest header as is, we can
1327 * pass it on unchanged. Otherwise, copy just the parts
1328 * that host is interested in.
1329 */
1330 assert(n->host_hdr_len <= n->guest_hdr_len);
1331 if (n->host_hdr_len != n->guest_hdr_len) {
1332 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
1333 out_sg, out_num,
1334 0, n->host_hdr_len);
1335 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
1336 out_sg, out_num,
1337 n->guest_hdr_len, -1);
1338 out_num = sg_num;
1339 out_sg = sg;
1340 }
1341
1342 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
1343 out_sg, out_num, virtio_net_tx_complete);
1344 if (ret == 0) {
1345 virtio_queue_set_notification(q->tx_vq, 0);
1346 q->async_tx.elem = elem;
1347 return -EBUSY;
1348 }
1349
1350 drop:
1351 virtqueue_push(q->tx_vq, elem, 0);
1352 virtio_notify(vdev, q->tx_vq);
1353 g_free(elem);
1354
1355 if (++num_packets >= n->tx_burst) {
1356 break;
1357 }
1358 }
1359 return num_packets;
1360 }
1361
1362 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
1363 {
1364 VirtIONet *n = VIRTIO_NET(vdev);
1365 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
1366
1367 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
1368 virtio_net_drop_tx_queue_data(vdev, vq);
1369 return;
1370 }
1371
1372 /* This happens when device was stopped but VCPU wasn't. */
1373 if (!vdev->vm_running) {
1374 q->tx_waiting = 1;
1375 return;
1376 }
1377
1378 if (q->tx_waiting) {
1379 virtio_queue_set_notification(vq, 1);
1380 timer_del(q->tx_timer);
1381 q->tx_waiting = 0;
1382 if (virtio_net_flush_tx(q) == -EINVAL) {
1383 return;
1384 }
1385 } else {
1386 timer_mod(q->tx_timer,
1387 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
1388 q->tx_waiting = 1;
1389 virtio_queue_set_notification(vq, 0);
1390 }
1391 }
1392
1393 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
1394 {
1395 VirtIONet *n = VIRTIO_NET(vdev);
1396 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
1397
1398 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
1399 virtio_net_drop_tx_queue_data(vdev, vq);
1400 return;
1401 }
1402
1403 if (unlikely(q->tx_waiting)) {
1404 return;
1405 }
1406 q->tx_waiting = 1;
1407 /* This happens when device was stopped but VCPU wasn't. */
1408 if (!vdev->vm_running) {
1409 return;
1410 }
1411 virtio_queue_set_notification(vq, 0);
1412 qemu_bh_schedule(q->tx_bh);
1413 }
1414
1415 static void virtio_net_tx_timer(void *opaque)
1416 {
1417 VirtIONetQueue *q = opaque;
1418 VirtIONet *n = q->n;
1419 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1420 /* This happens when device was stopped but BH wasn't. */
1421 if (!vdev->vm_running) {
1422 /* Make sure tx waiting is set, so we'll run when restarted. */
1423 assert(q->tx_waiting);
1424 return;
1425 }
1426
1427 q->tx_waiting = 0;
1428
1429 /* Just in case the driver is not ready on more */
1430 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1431 return;
1432 }
1433
1434 virtio_queue_set_notification(q->tx_vq, 1);
1435 virtio_net_flush_tx(q);
1436 }
1437
1438 static void virtio_net_tx_bh(void *opaque)
1439 {
1440 VirtIONetQueue *q = opaque;
1441 VirtIONet *n = q->n;
1442 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1443 int32_t ret;
1444
1445 /* This happens when device was stopped but BH wasn't. */
1446 if (!vdev->vm_running) {
1447 /* Make sure tx waiting is set, so we'll run when restarted. */
1448 assert(q->tx_waiting);
1449 return;
1450 }
1451
1452 q->tx_waiting = 0;
1453
1454 /* Just in case the driver is not ready on more */
1455 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
1456 return;
1457 }
1458
1459 ret = virtio_net_flush_tx(q);
1460 if (ret == -EBUSY || ret == -EINVAL) {
1461 return; /* Notification re-enable handled by tx_complete or device
1462 * broken */
1463 }
1464
1465 /* If we flush a full burst of packets, assume there are
1466 * more coming and immediately reschedule */
1467 if (ret >= n->tx_burst) {
1468 qemu_bh_schedule(q->tx_bh);
1469 q->tx_waiting = 1;
1470 return;
1471 }
1472
1473 /* If less than a full burst, re-enable notification and flush
1474 * anything that may have come in while we weren't looking. If
1475 * we find something, assume the guest is still active and reschedule */
1476 virtio_queue_set_notification(q->tx_vq, 1);
1477 ret = virtio_net_flush_tx(q);
1478 if (ret == -EINVAL) {
1479 return;
1480 } else if (ret > 0) {
1481 virtio_queue_set_notification(q->tx_vq, 0);
1482 qemu_bh_schedule(q->tx_bh);
1483 q->tx_waiting = 1;
1484 }
1485 }
1486
1487 static void virtio_net_add_queue(VirtIONet *n, int index)
1488 {
1489 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1490
1491 n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
1492 virtio_net_handle_rx);
1493 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
1494 n->vqs[index].tx_vq =
1495 virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
1496 n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1497 virtio_net_tx_timer,
1498 &n->vqs[index]);
1499 } else {
1500 n->vqs[index].tx_vq =
1501 virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
1502 n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
1503 }
1504
1505 n->vqs[index].tx_waiting = 0;
1506 n->vqs[index].n = n;
1507 }
1508
1509 static void virtio_net_del_queue(VirtIONet *n, int index)
1510 {
1511 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1512 VirtIONetQueue *q = &n->vqs[index];
1513 NetClientState *nc = qemu_get_subqueue(n->nic, index);
1514
1515 qemu_purge_queued_packets(nc);
1516
1517 virtio_del_queue(vdev, index * 2);
1518 if (q->tx_timer) {
1519 timer_del(q->tx_timer);
1520 timer_free(q->tx_timer);
1521 } else {
1522 qemu_bh_delete(q->tx_bh);
1523 }
1524 virtio_del_queue(vdev, index * 2 + 1);
1525 }
1526
1527 static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues)
1528 {
1529 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1530 int old_num_queues = virtio_get_num_queues(vdev);
1531 int new_num_queues = new_max_queues * 2 + 1;
1532 int i;
1533
1534 assert(old_num_queues >= 3);
1535 assert(old_num_queues % 2 == 1);
1536
1537 if (old_num_queues == new_num_queues) {
1538 return;
1539 }
1540
1541 /*
1542 * We always need to remove and add ctrl vq if
1543 * old_num_queues != new_num_queues. Remove ctrl_vq first,
1544 * and then we only enter one of the following too loops.
1545 */
1546 virtio_del_queue(vdev, old_num_queues - 1);
1547
1548 for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) {
1549 /* new_num_queues < old_num_queues */
1550 virtio_net_del_queue(n, i / 2);
1551 }
1552
1553 for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
1554 /* new_num_queues > old_num_queues */
1555 virtio_net_add_queue(n, i / 2);
1556 }
1557
1558 /* add ctrl_vq last */
1559 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
1560 }
1561
1562 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
1563 {
1564 int max = multiqueue ? n->max_queues : 1;
1565
1566 n->multiqueue = multiqueue;
1567 virtio_net_change_num_queues(n, max);
1568
1569 virtio_net_set_queues(n);
1570 }
1571
1572 static int virtio_net_post_load_device(void *opaque, int version_id)
1573 {
1574 VirtIONet *n = opaque;
1575 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1576 int i, link_down;
1577
1578 virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs,
1579 virtio_vdev_has_feature(vdev,
1580 VIRTIO_F_VERSION_1));
1581
1582 /* MAC_TABLE_ENTRIES may be different from the saved image */
1583 if (n->mac_table.in_use > MAC_TABLE_ENTRIES) {
1584 n->mac_table.in_use = 0;
1585 }
1586
1587 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
1588 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
1589 }
1590
1591 if (peer_has_vnet_hdr(n)) {
1592 virtio_net_apply_guest_offloads(n);
1593 }
1594
1595 virtio_net_set_queues(n);
1596
1597 /* Find the first multicast entry in the saved MAC filter */
1598 for (i = 0; i < n->mac_table.in_use; i++) {
1599 if (n->mac_table.macs[i * ETH_ALEN] & 1) {
1600 break;
1601 }
1602 }
1603 n->mac_table.first_multi = i;
1604
1605 /* nc.link_down can't be migrated, so infer link_down according
1606 * to link status bit in n->status */
1607 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
1608 for (i = 0; i < n->max_queues; i++) {
1609 qemu_get_subqueue(n->nic, i)->link_down = link_down;
1610 }
1611
1612 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
1613 virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
1614 n->announce_counter = SELF_ANNOUNCE_ROUNDS;
1615 timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL));
1616 }
1617
1618 return 0;
1619 }
1620
1621 /* tx_waiting field of a VirtIONetQueue */
1622 static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = {
1623 .name = "virtio-net-queue-tx_waiting",
1624 .fields = (VMStateField[]) {
1625 VMSTATE_UINT32(tx_waiting, VirtIONetQueue),
1626 VMSTATE_END_OF_LIST()
1627 },
1628 };
1629
1630 static bool max_queues_gt_1(void *opaque, int version_id)
1631 {
1632 return VIRTIO_NET(opaque)->max_queues > 1;
1633 }
1634
1635 static bool has_ctrl_guest_offloads(void *opaque, int version_id)
1636 {
1637 return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque),
1638 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
1639 }
1640
1641 static bool mac_table_fits(void *opaque, int version_id)
1642 {
1643 return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES;
1644 }
1645
1646 static bool mac_table_doesnt_fit(void *opaque, int version_id)
1647 {
1648 return !mac_table_fits(opaque, version_id);
1649 }
1650
1651 /* This temporary type is shared by all the WITH_TMP methods
1652 * although only some fields are used by each.
1653 */
1654 struct VirtIONetMigTmp {
1655 VirtIONet *parent;
1656 VirtIONetQueue *vqs_1;
1657 uint16_t curr_queues_1;
1658 uint8_t has_ufo;
1659 uint32_t has_vnet_hdr;
1660 };
1661
1662 /* The 2nd and subsequent tx_waiting flags are loaded later than
1663 * the 1st entry in the queues and only if there's more than one
1664 * entry. We use the tmp mechanism to calculate a temporary
1665 * pointer and count and also validate the count.
1666 */
1667
1668 static void virtio_net_tx_waiting_pre_save(void *opaque)
1669 {
1670 struct VirtIONetMigTmp *tmp = opaque;
1671
1672 tmp->vqs_1 = tmp->parent->vqs + 1;
1673 tmp->curr_queues_1 = tmp->parent->curr_queues - 1;
1674 if (tmp->parent->curr_queues == 0) {
1675 tmp->curr_queues_1 = 0;
1676 }
1677 }
1678
1679 static int virtio_net_tx_waiting_pre_load(void *opaque)
1680 {
1681 struct VirtIONetMigTmp *tmp = opaque;
1682
1683 /* Reuse the pointer setup from save */
1684 virtio_net_tx_waiting_pre_save(opaque);
1685
1686 if (tmp->parent->curr_queues > tmp->parent->max_queues) {
1687 error_report("virtio-net: curr_queues %x > max_queues %x",
1688 tmp->parent->curr_queues, tmp->parent->max_queues);
1689
1690 return -EINVAL;
1691 }
1692
1693 return 0; /* all good */
1694 }
1695
1696 static const VMStateDescription vmstate_virtio_net_tx_waiting = {
1697 .name = "virtio-net-tx_waiting",
1698 .pre_load = virtio_net_tx_waiting_pre_load,
1699 .pre_save = virtio_net_tx_waiting_pre_save,
1700 .fields = (VMStateField[]) {
1701 VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp,
1702 curr_queues_1,
1703 vmstate_virtio_net_queue_tx_waiting,
1704 struct VirtIONetQueue),
1705 VMSTATE_END_OF_LIST()
1706 },
1707 };
1708
1709 /* the 'has_ufo' flag is just tested; if the incoming stream has the
1710 * flag set we need to check that we have it
1711 */
1712 static int virtio_net_ufo_post_load(void *opaque, int version_id)
1713 {
1714 struct VirtIONetMigTmp *tmp = opaque;
1715
1716 if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) {
1717 error_report("virtio-net: saved image requires TUN_F_UFO support");
1718 return -EINVAL;
1719 }
1720
1721 return 0;
1722 }
1723
1724 static void virtio_net_ufo_pre_save(void *opaque)
1725 {
1726 struct VirtIONetMigTmp *tmp = opaque;
1727
1728 tmp->has_ufo = tmp->parent->has_ufo;
1729 }
1730
1731 static const VMStateDescription vmstate_virtio_net_has_ufo = {
1732 .name = "virtio-net-ufo",
1733 .post_load = virtio_net_ufo_post_load,
1734 .pre_save = virtio_net_ufo_pre_save,
1735 .fields = (VMStateField[]) {
1736 VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp),
1737 VMSTATE_END_OF_LIST()
1738 },
1739 };
1740
1741 /* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the
1742 * flag set we need to check that we have it
1743 */
1744 static int virtio_net_vnet_post_load(void *opaque, int version_id)
1745 {
1746 struct VirtIONetMigTmp *tmp = opaque;
1747
1748 if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) {
1749 error_report("virtio-net: saved image requires vnet_hdr=on");
1750 return -EINVAL;
1751 }
1752
1753 return 0;
1754 }
1755
1756 static void virtio_net_vnet_pre_save(void *opaque)
1757 {
1758 struct VirtIONetMigTmp *tmp = opaque;
1759
1760 tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr;
1761 }
1762
1763 static const VMStateDescription vmstate_virtio_net_has_vnet = {
1764 .name = "virtio-net-vnet",
1765 .post_load = virtio_net_vnet_post_load,
1766 .pre_save = virtio_net_vnet_pre_save,
1767 .fields = (VMStateField[]) {
1768 VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp),
1769 VMSTATE_END_OF_LIST()
1770 },
1771 };
1772
1773 static const VMStateDescription vmstate_virtio_net_device = {
1774 .name = "virtio-net-device",
1775 .version_id = VIRTIO_NET_VM_VERSION,
1776 .minimum_version_id = VIRTIO_NET_VM_VERSION,
1777 .post_load = virtio_net_post_load_device,
1778 .fields = (VMStateField[]) {
1779 VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN),
1780 VMSTATE_STRUCT_POINTER(vqs, VirtIONet,
1781 vmstate_virtio_net_queue_tx_waiting,
1782 VirtIONetQueue),
1783 VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet),
1784 VMSTATE_UINT16(status, VirtIONet),
1785 VMSTATE_UINT8(promisc, VirtIONet),
1786 VMSTATE_UINT8(allmulti, VirtIONet),
1787 VMSTATE_UINT32(mac_table.in_use, VirtIONet),
1788
1789 /* Guarded pair: If it fits we load it, else we throw it away
1790 * - can happen if source has a larger MAC table.; post-load
1791 * sets flags in this case.
1792 */
1793 VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet,
1794 0, mac_table_fits, mac_table.in_use,
1795 ETH_ALEN),
1796 VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0,
1797 mac_table.in_use, ETH_ALEN),
1798
1799 /* Note: This is an array of uint32's that's always been saved as a
1800 * buffer; hold onto your endiannesses; it's actually used as a bitmap
1801 * but based on the uint.
1802 */
1803 VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3),
1804 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
1805 vmstate_virtio_net_has_vnet),
1806 VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet),
1807 VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet),
1808 VMSTATE_UINT8(alluni, VirtIONet),
1809 VMSTATE_UINT8(nomulti, VirtIONet),
1810 VMSTATE_UINT8(nouni, VirtIONet),
1811 VMSTATE_UINT8(nobcast, VirtIONet),
1812 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
1813 vmstate_virtio_net_has_ufo),
1814 VMSTATE_SINGLE_TEST(max_queues, VirtIONet, max_queues_gt_1, 0,
1815 vmstate_info_uint16_equal, uint16_t),
1816 VMSTATE_UINT16_TEST(curr_queues, VirtIONet, max_queues_gt_1),
1817 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
1818 vmstate_virtio_net_tx_waiting),
1819 VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet,
1820 has_ctrl_guest_offloads),
1821 VMSTATE_END_OF_LIST()
1822 },
1823 };
1824
1825 static NetClientInfo net_virtio_info = {
1826 .type = NET_CLIENT_DRIVER_NIC,
1827 .size = sizeof(NICState),
1828 .can_receive = virtio_net_can_receive,
1829 .receive = virtio_net_receive,
1830 .link_status_changed = virtio_net_set_link_status,
1831 .query_rx_filter = virtio_net_query_rxfilter,
1832 };
1833
1834 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
1835 {
1836 VirtIONet *n = VIRTIO_NET(vdev);
1837 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1838 assert(n->vhost_started);
1839 return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
1840 }
1841
1842 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
1843 bool mask)
1844 {
1845 VirtIONet *n = VIRTIO_NET(vdev);
1846 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1847 assert(n->vhost_started);
1848 vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
1849 vdev, idx, mask);
1850 }
1851
1852 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
1853 {
1854 int i, config_size = 0;
1855 virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
1856
1857 for (i = 0; feature_sizes[i].flags != 0; i++) {
1858 if (host_features & feature_sizes[i].flags) {
1859 config_size = MAX(feature_sizes[i].end, config_size);
1860 }
1861 }
1862 n->config_size = config_size;
1863 }
1864
1865 void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
1866 const char *type)
1867 {
1868 /*
1869 * The name can be NULL, the netclient name will be type.x.
1870 */
1871 assert(type != NULL);
1872
1873 g_free(n->netclient_name);
1874 g_free(n->netclient_type);
1875 n->netclient_name = g_strdup(name);
1876 n->netclient_type = g_strdup(type);
1877 }
1878
1879 static void virtio_net_device_realize(DeviceState *dev, Error **errp)
1880 {
1881 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1882 VirtIONet *n = VIRTIO_NET(dev);
1883 NetClientState *nc;
1884 int i;
1885
1886 if (n->net_conf.mtu) {
1887 n->host_features |= (0x1 << VIRTIO_NET_F_MTU);
1888 }
1889
1890 virtio_net_set_config_size(n, n->host_features);
1891 virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
1892
1893 /*
1894 * We set a lower limit on RX queue size to what it always was.
1895 * Guests that want a smaller ring can always resize it without
1896 * help from us (using virtio 1 and up).
1897 */
1898 if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE ||
1899 n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE ||
1900 (n->net_conf.rx_queue_size & (n->net_conf.rx_queue_size - 1))) {
1901 error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), "
1902 "must be a power of 2 between %d and %d.",
1903 n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE,
1904 VIRTQUEUE_MAX_SIZE);
1905 virtio_cleanup(vdev);
1906 return;
1907 }
1908
1909 n->max_queues = MAX(n->nic_conf.peers.queues, 1);
1910 if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
1911 error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
1912 "must be a positive integer less than %d.",
1913 n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2);
1914 virtio_cleanup(vdev);
1915 return;
1916 }
1917 n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
1918 n->curr_queues = 1;
1919 n->tx_timeout = n->net_conf.txtimer;
1920
1921 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
1922 && strcmp(n->net_conf.tx, "bh")) {
1923 error_report("virtio-net: "
1924 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1925 n->net_conf.tx);
1926 error_report("Defaulting to \"bh\"");
1927 }
1928
1929 for (i = 0; i < n->max_queues; i++) {
1930 virtio_net_add_queue(n, i);
1931 }
1932
1933 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
1934 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
1935 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
1936 n->status = VIRTIO_NET_S_LINK_UP;
1937 n->announce_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
1938 virtio_net_announce_timer, n);
1939
1940 if (n->netclient_type) {
1941 /*
1942 * Happen when virtio_net_set_netclient_name has been called.
1943 */
1944 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
1945 n->netclient_type, n->netclient_name, n);
1946 } else {
1947 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
1948 object_get_typename(OBJECT(dev)), dev->id, n);
1949 }
1950
1951 peer_test_vnet_hdr(n);
1952 if (peer_has_vnet_hdr(n)) {
1953 for (i = 0; i < n->max_queues; i++) {
1954 qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
1955 }
1956 n->host_hdr_len = sizeof(struct virtio_net_hdr);
1957 } else {
1958 n->host_hdr_len = 0;
1959 }
1960
1961 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a);
1962
1963 n->vqs[0].tx_waiting = 0;
1964 n->tx_burst = n->net_conf.txburst;
1965 virtio_net_set_mrg_rx_bufs(n, 0, 0);
1966 n->promisc = 1; /* for compatibility */
1967
1968 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
1969
1970 n->vlans = g_malloc0(MAX_VLAN >> 3);
1971
1972 nc = qemu_get_queue(n->nic);
1973 nc->rxfilter_notify_enabled = 1;
1974
1975 n->qdev = dev;
1976 }
1977
1978 static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
1979 {
1980 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1981 VirtIONet *n = VIRTIO_NET(dev);
1982 int i, max_queues;
1983
1984 /* This will stop vhost backend if appropriate. */
1985 virtio_net_set_status(vdev, 0);
1986
1987 g_free(n->netclient_name);
1988 n->netclient_name = NULL;
1989 g_free(n->netclient_type);
1990 n->netclient_type = NULL;
1991
1992 g_free(n->mac_table.macs);
1993 g_free(n->vlans);
1994
1995 max_queues = n->multiqueue ? n->max_queues : 1;
1996 for (i = 0; i < max_queues; i++) {
1997 virtio_net_del_queue(n, i);
1998 }
1999
2000 timer_del(n->announce_timer);
2001 timer_free(n->announce_timer);
2002 g_free(n->vqs);
2003 qemu_del_nic(n->nic);
2004 virtio_cleanup(vdev);
2005 }
2006
2007 static void virtio_net_instance_init(Object *obj)
2008 {
2009 VirtIONet *n = VIRTIO_NET(obj);
2010
2011 /*
2012 * The default config_size is sizeof(struct virtio_net_config).
2013 * Can be overriden with virtio_net_set_config_size.
2014 */
2015 n->config_size = sizeof(struct virtio_net_config);
2016 device_add_bootindex_property(obj, &n->nic_conf.bootindex,
2017 "bootindex", "/ethernet-phy@0",
2018 DEVICE(n), NULL);
2019 }
2020
2021 static void virtio_net_pre_save(void *opaque)
2022 {
2023 VirtIONet *n = opaque;
2024
2025 /* At this point, backend must be stopped, otherwise
2026 * it might keep writing to memory. */
2027 assert(!n->vhost_started);
2028 }
2029
2030 static const VMStateDescription vmstate_virtio_net = {
2031 .name = "virtio-net",
2032 .minimum_version_id = VIRTIO_NET_VM_VERSION,
2033 .version_id = VIRTIO_NET_VM_VERSION,
2034 .fields = (VMStateField[]) {
2035 VMSTATE_VIRTIO_DEVICE,
2036 VMSTATE_END_OF_LIST()
2037 },
2038 .pre_save = virtio_net_pre_save,
2039 };
2040
2041 static Property virtio_net_properties[] = {
2042 DEFINE_PROP_BIT("csum", VirtIONet, host_features, VIRTIO_NET_F_CSUM, true),
2043 DEFINE_PROP_BIT("guest_csum", VirtIONet, host_features,
2044 VIRTIO_NET_F_GUEST_CSUM, true),
2045 DEFINE_PROP_BIT("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true),
2046 DEFINE_PROP_BIT("guest_tso4", VirtIONet, host_features,
2047 VIRTIO_NET_F_GUEST_TSO4, true),
2048 DEFINE_PROP_BIT("guest_tso6", VirtIONet, host_features,
2049 VIRTIO_NET_F_GUEST_TSO6, true),
2050 DEFINE_PROP_BIT("guest_ecn", VirtIONet, host_features,
2051 VIRTIO_NET_F_GUEST_ECN, true),
2052 DEFINE_PROP_BIT("guest_ufo", VirtIONet, host_features,
2053 VIRTIO_NET_F_GUEST_UFO, true),
2054 DEFINE_PROP_BIT("guest_announce", VirtIONet, host_features,
2055 VIRTIO_NET_F_GUEST_ANNOUNCE, true),
2056 DEFINE_PROP_BIT("host_tso4", VirtIONet, host_features,
2057 VIRTIO_NET_F_HOST_TSO4, true),
2058 DEFINE_PROP_BIT("host_tso6", VirtIONet, host_features,
2059 VIRTIO_NET_F_HOST_TSO6, true),
2060 DEFINE_PROP_BIT("host_ecn", VirtIONet, host_features,
2061 VIRTIO_NET_F_HOST_ECN, true),
2062 DEFINE_PROP_BIT("host_ufo", VirtIONet, host_features,
2063 VIRTIO_NET_F_HOST_UFO, true),
2064 DEFINE_PROP_BIT("mrg_rxbuf", VirtIONet, host_features,
2065 VIRTIO_NET_F_MRG_RXBUF, true),
2066 DEFINE_PROP_BIT("status", VirtIONet, host_features,
2067 VIRTIO_NET_F_STATUS, true),
2068 DEFINE_PROP_BIT("ctrl_vq", VirtIONet, host_features,
2069 VIRTIO_NET_F_CTRL_VQ, true),
2070 DEFINE_PROP_BIT("ctrl_rx", VirtIONet, host_features,
2071 VIRTIO_NET_F_CTRL_RX, true),
2072 DEFINE_PROP_BIT("ctrl_vlan", VirtIONet, host_features,
2073 VIRTIO_NET_F_CTRL_VLAN, true),
2074 DEFINE_PROP_BIT("ctrl_rx_extra", VirtIONet, host_features,
2075 VIRTIO_NET_F_CTRL_RX_EXTRA, true),
2076 DEFINE_PROP_BIT("ctrl_mac_addr", VirtIONet, host_features,
2077 VIRTIO_NET_F_CTRL_MAC_ADDR, true),
2078 DEFINE_PROP_BIT("ctrl_guest_offloads", VirtIONet, host_features,
2079 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true),
2080 DEFINE_PROP_BIT("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false),
2081 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
2082 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
2083 TX_TIMER_INTERVAL),
2084 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
2085 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
2086 DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
2087 VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
2088 DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
2089 DEFINE_PROP_END_OF_LIST(),
2090 };
2091
2092 static void virtio_net_class_init(ObjectClass *klass, void *data)
2093 {
2094 DeviceClass *dc = DEVICE_CLASS(klass);
2095 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
2096
2097 dc->props = virtio_net_properties;
2098 dc->vmsd = &vmstate_virtio_net;
2099 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
2100 vdc->realize = virtio_net_device_realize;
2101 vdc->unrealize = virtio_net_device_unrealize;
2102 vdc->get_config = virtio_net_get_config;
2103 vdc->set_config = virtio_net_set_config;
2104 vdc->get_features = virtio_net_get_features;
2105 vdc->set_features = virtio_net_set_features;
2106 vdc->bad_features = virtio_net_bad_features;
2107 vdc->reset = virtio_net_reset;
2108 vdc->set_status = virtio_net_set_status;
2109 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
2110 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
2111 vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO);
2112 vdc->vmsd = &vmstate_virtio_net_device;
2113 }
2114
2115 static const TypeInfo virtio_net_info = {
2116 .name = TYPE_VIRTIO_NET,
2117 .parent = TYPE_VIRTIO_DEVICE,
2118 .instance_size = sizeof(VirtIONet),
2119 .instance_init = virtio_net_instance_init,
2120 .class_init = virtio_net_class_init,
2121 };
2122
2123 static void virtio_register_types(void)
2124 {
2125 type_register_static(&virtio_net_info);
2126 }
2127
2128 type_init(virtio_register_types)