]> git.proxmox.com Git - qemu.git/blob - hw/net/virtio-net.c
virtio-net: cleanup: use QOM cast.
[qemu.git] / hw / net / virtio-net.c
1 /*
2 * Virtio Network Device
3 *
4 * Copyright IBM, Corp. 2007
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #include "qemu/iov.h"
15 #include "hw/virtio/virtio.h"
16 #include "net/net.h"
17 #include "net/checksum.h"
18 #include "net/tap.h"
19 #include "qemu/error-report.h"
20 #include "qemu/timer.h"
21 #include "hw/virtio/virtio-net.h"
22 #include "net/vhost_net.h"
23 #include "hw/virtio/virtio-bus.h"
24
25 #define VIRTIO_NET_VM_VERSION 11
26
27 #define MAC_TABLE_ENTRIES 64
28 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
29
30 /*
31 * Calculate the number of bytes up to and including the given 'field' of
32 * 'container'.
33 */
34 #define endof(container, field) \
35 (offsetof(container, field) + sizeof(((container *)0)->field))
36
37 typedef struct VirtIOFeature {
38 uint32_t flags;
39 size_t end;
40 } VirtIOFeature;
41
42 static VirtIOFeature feature_sizes[] = {
43 {.flags = 1 << VIRTIO_NET_F_MAC,
44 .end = endof(struct virtio_net_config, mac)},
45 {.flags = 1 << VIRTIO_NET_F_STATUS,
46 .end = endof(struct virtio_net_config, status)},
47 {.flags = 1 << VIRTIO_NET_F_MQ,
48 .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
49 {}
50 };
51
52 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
53 {
54 VirtIONet *n = qemu_get_nic_opaque(nc);
55
56 return &n->vqs[nc->queue_index];
57 }
58
59 static int vq2q(int queue_index)
60 {
61 return queue_index / 2;
62 }
63
64 /* TODO
65 * - we could suppress RX interrupt if we were so inclined.
66 */
67
68 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
69 {
70 VirtIONet *n = VIRTIO_NET(vdev);
71 struct virtio_net_config netcfg;
72
73 stw_p(&netcfg.status, n->status);
74 stw_p(&netcfg.max_virtqueue_pairs, n->max_queues);
75 memcpy(netcfg.mac, n->mac, ETH_ALEN);
76 memcpy(config, &netcfg, n->config_size);
77 }
78
79 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
80 {
81 VirtIONet *n = VIRTIO_NET(vdev);
82 struct virtio_net_config netcfg = {};
83
84 memcpy(&netcfg, config, n->config_size);
85
86 if (!(vdev->guest_features >> VIRTIO_NET_F_CTRL_MAC_ADDR & 1) &&
87 memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
88 memcpy(n->mac, netcfg.mac, ETH_ALEN);
89 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
90 }
91 }
92
93 static bool virtio_net_started(VirtIONet *n, uint8_t status)
94 {
95 VirtIODevice *vdev = VIRTIO_DEVICE(n);
96 return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
97 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
98 }
99
100 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
101 {
102 VirtIODevice *vdev = VIRTIO_DEVICE(n);
103 NetClientState *nc = qemu_get_queue(n->nic);
104 int queues = n->multiqueue ? n->max_queues : 1;
105
106 if (!nc->peer) {
107 return;
108 }
109 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
110 return;
111 }
112
113 if (!tap_get_vhost_net(nc->peer)) {
114 return;
115 }
116
117 if (!!n->vhost_started == virtio_net_started(n, status) &&
118 !nc->peer->link_down) {
119 return;
120 }
121 if (!n->vhost_started) {
122 int r;
123 if (!vhost_net_query(tap_get_vhost_net(nc->peer), vdev)) {
124 return;
125 }
126 n->vhost_started = 1;
127 r = vhost_net_start(vdev, n->nic->ncs, queues);
128 if (r < 0) {
129 error_report("unable to start vhost net: %d: "
130 "falling back on userspace virtio", -r);
131 n->vhost_started = 0;
132 }
133 } else {
134 vhost_net_stop(vdev, n->nic->ncs, queues);
135 n->vhost_started = 0;
136 }
137 }
138
139 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
140 {
141 VirtIONet *n = VIRTIO_NET(vdev);
142 VirtIONetQueue *q;
143 int i;
144 uint8_t queue_status;
145
146 virtio_net_vhost_status(n, status);
147
148 for (i = 0; i < n->max_queues; i++) {
149 q = &n->vqs[i];
150
151 if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
152 queue_status = 0;
153 } else {
154 queue_status = status;
155 }
156
157 if (!q->tx_waiting) {
158 continue;
159 }
160
161 if (virtio_net_started(n, queue_status) && !n->vhost_started) {
162 if (q->tx_timer) {
163 qemu_mod_timer(q->tx_timer,
164 qemu_get_clock_ns(vm_clock) + n->tx_timeout);
165 } else {
166 qemu_bh_schedule(q->tx_bh);
167 }
168 } else {
169 if (q->tx_timer) {
170 qemu_del_timer(q->tx_timer);
171 } else {
172 qemu_bh_cancel(q->tx_bh);
173 }
174 }
175 }
176 }
177
178 static void virtio_net_set_link_status(NetClientState *nc)
179 {
180 VirtIONet *n = qemu_get_nic_opaque(nc);
181 VirtIODevice *vdev = VIRTIO_DEVICE(n);
182 uint16_t old_status = n->status;
183
184 if (nc->link_down)
185 n->status &= ~VIRTIO_NET_S_LINK_UP;
186 else
187 n->status |= VIRTIO_NET_S_LINK_UP;
188
189 if (n->status != old_status)
190 virtio_notify_config(vdev);
191
192 virtio_net_set_status(vdev, vdev->status);
193 }
194
195 static void virtio_net_reset(VirtIODevice *vdev)
196 {
197 VirtIONet *n = VIRTIO_NET(vdev);
198
199 /* Reset back to compatibility mode */
200 n->promisc = 1;
201 n->allmulti = 0;
202 n->alluni = 0;
203 n->nomulti = 0;
204 n->nouni = 0;
205 n->nobcast = 0;
206 /* multiqueue is disabled by default */
207 n->curr_queues = 1;
208
209 /* Flush any MAC and VLAN filter table state */
210 n->mac_table.in_use = 0;
211 n->mac_table.first_multi = 0;
212 n->mac_table.multi_overflow = 0;
213 n->mac_table.uni_overflow = 0;
214 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
215 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
216 memset(n->vlans, 0, MAX_VLAN >> 3);
217 }
218
219 static void peer_test_vnet_hdr(VirtIONet *n)
220 {
221 NetClientState *nc = qemu_get_queue(n->nic);
222 if (!nc->peer) {
223 return;
224 }
225
226 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
227 return;
228 }
229
230 n->has_vnet_hdr = tap_has_vnet_hdr(nc->peer);
231 }
232
233 static int peer_has_vnet_hdr(VirtIONet *n)
234 {
235 return n->has_vnet_hdr;
236 }
237
238 static int peer_has_ufo(VirtIONet *n)
239 {
240 if (!peer_has_vnet_hdr(n))
241 return 0;
242
243 n->has_ufo = tap_has_ufo(qemu_get_queue(n->nic)->peer);
244
245 return n->has_ufo;
246 }
247
248 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs)
249 {
250 int i;
251 NetClientState *nc;
252
253 n->mergeable_rx_bufs = mergeable_rx_bufs;
254
255 n->guest_hdr_len = n->mergeable_rx_bufs ?
256 sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr);
257
258 for (i = 0; i < n->max_queues; i++) {
259 nc = qemu_get_subqueue(n->nic, i);
260
261 if (peer_has_vnet_hdr(n) &&
262 tap_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
263 tap_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
264 n->host_hdr_len = n->guest_hdr_len;
265 }
266 }
267 }
268
269 static int peer_attach(VirtIONet *n, int index)
270 {
271 NetClientState *nc = qemu_get_subqueue(n->nic, index);
272
273 if (!nc->peer) {
274 return 0;
275 }
276
277 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
278 return 0;
279 }
280
281 return tap_enable(nc->peer);
282 }
283
284 static int peer_detach(VirtIONet *n, int index)
285 {
286 NetClientState *nc = qemu_get_subqueue(n->nic, index);
287
288 if (!nc->peer) {
289 return 0;
290 }
291
292 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
293 return 0;
294 }
295
296 return tap_disable(nc->peer);
297 }
298
299 static void virtio_net_set_queues(VirtIONet *n)
300 {
301 int i;
302
303 for (i = 0; i < n->max_queues; i++) {
304 if (i < n->curr_queues) {
305 assert(!peer_attach(n, i));
306 } else {
307 assert(!peer_detach(n, i));
308 }
309 }
310 }
311
312 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue, int ctrl);
313
314 static uint32_t virtio_net_get_features(VirtIODevice *vdev, uint32_t features)
315 {
316 VirtIONet *n = VIRTIO_NET(vdev);
317 NetClientState *nc = qemu_get_queue(n->nic);
318
319 features |= (1 << VIRTIO_NET_F_MAC);
320
321 if (!peer_has_vnet_hdr(n)) {
322 features &= ~(0x1 << VIRTIO_NET_F_CSUM);
323 features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO4);
324 features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO6);
325 features &= ~(0x1 << VIRTIO_NET_F_HOST_ECN);
326
327 features &= ~(0x1 << VIRTIO_NET_F_GUEST_CSUM);
328 features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO4);
329 features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO6);
330 features &= ~(0x1 << VIRTIO_NET_F_GUEST_ECN);
331 }
332
333 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
334 features &= ~(0x1 << VIRTIO_NET_F_GUEST_UFO);
335 features &= ~(0x1 << VIRTIO_NET_F_HOST_UFO);
336 }
337
338 if (!nc->peer || nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
339 return features;
340 }
341 if (!tap_get_vhost_net(nc->peer)) {
342 return features;
343 }
344 return vhost_net_get_features(tap_get_vhost_net(nc->peer), features);
345 }
346
347 static uint32_t virtio_net_bad_features(VirtIODevice *vdev)
348 {
349 uint32_t features = 0;
350
351 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
352 * but also these: */
353 features |= (1 << VIRTIO_NET_F_MAC);
354 features |= (1 << VIRTIO_NET_F_CSUM);
355 features |= (1 << VIRTIO_NET_F_HOST_TSO4);
356 features |= (1 << VIRTIO_NET_F_HOST_TSO6);
357 features |= (1 << VIRTIO_NET_F_HOST_ECN);
358
359 return features;
360 }
361
362 static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features)
363 {
364 VirtIONet *n = VIRTIO_NET(vdev);
365 int i;
366
367 virtio_net_set_multiqueue(n, !!(features & (1 << VIRTIO_NET_F_MQ)),
368 !!(features & (1 << VIRTIO_NET_F_CTRL_VQ)));
369
370 virtio_net_set_mrg_rx_bufs(n, !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF)));
371
372 if (n->has_vnet_hdr) {
373 tap_set_offload(qemu_get_subqueue(n->nic, 0)->peer,
374 (features >> VIRTIO_NET_F_GUEST_CSUM) & 1,
375 (features >> VIRTIO_NET_F_GUEST_TSO4) & 1,
376 (features >> VIRTIO_NET_F_GUEST_TSO6) & 1,
377 (features >> VIRTIO_NET_F_GUEST_ECN) & 1,
378 (features >> VIRTIO_NET_F_GUEST_UFO) & 1);
379 }
380
381 for (i = 0; i < n->max_queues; i++) {
382 NetClientState *nc = qemu_get_subqueue(n->nic, i);
383
384 if (!nc->peer || nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
385 continue;
386 }
387 if (!tap_get_vhost_net(nc->peer)) {
388 continue;
389 }
390 vhost_net_ack_features(tap_get_vhost_net(nc->peer), features);
391 }
392 }
393
394 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
395 struct iovec *iov, unsigned int iov_cnt)
396 {
397 uint8_t on;
398 size_t s;
399
400 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
401 if (s != sizeof(on)) {
402 return VIRTIO_NET_ERR;
403 }
404
405 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
406 n->promisc = on;
407 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
408 n->allmulti = on;
409 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
410 n->alluni = on;
411 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
412 n->nomulti = on;
413 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
414 n->nouni = on;
415 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
416 n->nobcast = on;
417 } else {
418 return VIRTIO_NET_ERR;
419 }
420
421 return VIRTIO_NET_OK;
422 }
423
424 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
425 struct iovec *iov, unsigned int iov_cnt)
426 {
427 struct virtio_net_ctrl_mac mac_data;
428 size_t s;
429
430 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
431 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
432 return VIRTIO_NET_ERR;
433 }
434 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
435 assert(s == sizeof(n->mac));
436 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
437 return VIRTIO_NET_OK;
438 }
439
440 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
441 return VIRTIO_NET_ERR;
442 }
443
444 n->mac_table.in_use = 0;
445 n->mac_table.first_multi = 0;
446 n->mac_table.uni_overflow = 0;
447 n->mac_table.multi_overflow = 0;
448 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
449
450 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
451 sizeof(mac_data.entries));
452 mac_data.entries = ldl_p(&mac_data.entries);
453 if (s != sizeof(mac_data.entries)) {
454 return VIRTIO_NET_ERR;
455 }
456 iov_discard_front(&iov, &iov_cnt, s);
457
458 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
459 return VIRTIO_NET_ERR;
460 }
461
462 if (mac_data.entries <= MAC_TABLE_ENTRIES) {
463 s = iov_to_buf(iov, iov_cnt, 0, n->mac_table.macs,
464 mac_data.entries * ETH_ALEN);
465 if (s != mac_data.entries * ETH_ALEN) {
466 return VIRTIO_NET_ERR;
467 }
468 n->mac_table.in_use += mac_data.entries;
469 } else {
470 n->mac_table.uni_overflow = 1;
471 }
472
473 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);
474
475 n->mac_table.first_multi = n->mac_table.in_use;
476
477 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
478 sizeof(mac_data.entries));
479 mac_data.entries = ldl_p(&mac_data.entries);
480 if (s != sizeof(mac_data.entries)) {
481 return VIRTIO_NET_ERR;
482 }
483
484 iov_discard_front(&iov, &iov_cnt, s);
485
486 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
487 return VIRTIO_NET_ERR;
488 }
489
490 if (n->mac_table.in_use + mac_data.entries <= MAC_TABLE_ENTRIES) {
491 s = iov_to_buf(iov, iov_cnt, 0, n->mac_table.macs,
492 mac_data.entries * ETH_ALEN);
493 if (s != mac_data.entries * ETH_ALEN) {
494 return VIRTIO_NET_ERR;
495 }
496 n->mac_table.in_use += mac_data.entries;
497 } else {
498 n->mac_table.multi_overflow = 1;
499 }
500
501 return VIRTIO_NET_OK;
502 }
503
504 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
505 struct iovec *iov, unsigned int iov_cnt)
506 {
507 uint16_t vid;
508 size_t s;
509
510 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
511 vid = lduw_p(&vid);
512 if (s != sizeof(vid)) {
513 return VIRTIO_NET_ERR;
514 }
515
516 if (vid >= MAX_VLAN)
517 return VIRTIO_NET_ERR;
518
519 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
520 n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
521 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
522 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
523 else
524 return VIRTIO_NET_ERR;
525
526 return VIRTIO_NET_OK;
527 }
528
529 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
530 struct iovec *iov, unsigned int iov_cnt)
531 {
532 VirtIODevice *vdev = VIRTIO_DEVICE(n);
533 struct virtio_net_ctrl_mq mq;
534 size_t s;
535 uint16_t queues;
536
537 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
538 if (s != sizeof(mq)) {
539 return VIRTIO_NET_ERR;
540 }
541
542 if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
543 return VIRTIO_NET_ERR;
544 }
545
546 queues = lduw_p(&mq.virtqueue_pairs);
547
548 if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
549 queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
550 queues > n->max_queues ||
551 !n->multiqueue) {
552 return VIRTIO_NET_ERR;
553 }
554
555 n->curr_queues = queues;
556 /* stop the backend before changing the number of queues to avoid handling a
557 * disabled queue */
558 virtio_net_set_status(vdev, vdev->status);
559 virtio_net_set_queues(n);
560
561 return VIRTIO_NET_OK;
562 }
563 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
564 {
565 VirtIONet *n = VIRTIO_NET(vdev);
566 struct virtio_net_ctrl_hdr ctrl;
567 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
568 VirtQueueElement elem;
569 size_t s;
570 struct iovec *iov;
571 unsigned int iov_cnt;
572
573 while (virtqueue_pop(vq, &elem)) {
574 if (iov_size(elem.in_sg, elem.in_num) < sizeof(status) ||
575 iov_size(elem.out_sg, elem.out_num) < sizeof(ctrl)) {
576 error_report("virtio-net ctrl missing headers");
577 exit(1);
578 }
579
580 iov = elem.out_sg;
581 iov_cnt = elem.out_num;
582 s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
583 iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
584 if (s != sizeof(ctrl)) {
585 status = VIRTIO_NET_ERR;
586 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
587 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt);
588 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
589 status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
590 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
591 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
592 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
593 status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
594 }
595
596 s = iov_from_buf(elem.in_sg, elem.in_num, 0, &status, sizeof(status));
597 assert(s == sizeof(status));
598
599 virtqueue_push(vq, &elem, sizeof(status));
600 virtio_notify(vdev, vq);
601 }
602 }
603
604 /* RX */
605
606 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
607 {
608 VirtIONet *n = VIRTIO_NET(vdev);
609 int queue_index = vq2q(virtio_get_queue_index(vq));
610
611 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
612 }
613
614 static int virtio_net_can_receive(NetClientState *nc)
615 {
616 VirtIONet *n = qemu_get_nic_opaque(nc);
617 VirtIODevice *vdev = VIRTIO_DEVICE(n);
618 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
619
620 if (!vdev->vm_running) {
621 return 0;
622 }
623
624 if (nc->queue_index >= n->curr_queues) {
625 return 0;
626 }
627
628 if (!virtio_queue_ready(q->rx_vq) ||
629 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
630 return 0;
631 }
632
633 return 1;
634 }
635
636 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
637 {
638 VirtIONet *n = q->n;
639 if (virtio_queue_empty(q->rx_vq) ||
640 (n->mergeable_rx_bufs &&
641 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
642 virtio_queue_set_notification(q->rx_vq, 1);
643
644 /* To avoid a race condition where the guest has made some buffers
645 * available after the above check but before notification was
646 * enabled, check for available buffers again.
647 */
648 if (virtio_queue_empty(q->rx_vq) ||
649 (n->mergeable_rx_bufs &&
650 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
651 return 0;
652 }
653 }
654
655 virtio_queue_set_notification(q->rx_vq, 0);
656 return 1;
657 }
658
659 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
660 * it never finds out that the packets don't have valid checksums. This
661 * causes dhclient to get upset. Fedora's carried a patch for ages to
662 * fix this with Xen but it hasn't appeared in an upstream release of
663 * dhclient yet.
664 *
665 * To avoid breaking existing guests, we catch udp packets and add
666 * checksums. This is terrible but it's better than hacking the guest
667 * kernels.
668 *
669 * N.B. if we introduce a zero-copy API, this operation is no longer free so
670 * we should provide a mechanism to disable it to avoid polluting the host
671 * cache.
672 */
673 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
674 uint8_t *buf, size_t size)
675 {
676 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
677 (size > 27 && size < 1500) && /* normal sized MTU */
678 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
679 (buf[23] == 17) && /* ip.protocol == UDP */
680 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
681 net_checksum_calculate(buf, size);
682 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
683 }
684 }
685
686 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
687 const void *buf, size_t size)
688 {
689 if (n->has_vnet_hdr) {
690 /* FIXME this cast is evil */
691 void *wbuf = (void *)buf;
692 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
693 size - n->host_hdr_len);
694 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
695 } else {
696 struct virtio_net_hdr hdr = {
697 .flags = 0,
698 .gso_type = VIRTIO_NET_HDR_GSO_NONE
699 };
700 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
701 }
702 }
703
704 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
705 {
706 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
707 static const uint8_t vlan[] = {0x81, 0x00};
708 uint8_t *ptr = (uint8_t *)buf;
709 int i;
710
711 if (n->promisc)
712 return 1;
713
714 ptr += n->host_hdr_len;
715
716 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
717 int vid = be16_to_cpup((uint16_t *)(ptr + 14)) & 0xfff;
718 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
719 return 0;
720 }
721
722 if (ptr[0] & 1) { // multicast
723 if (!memcmp(ptr, bcast, sizeof(bcast))) {
724 return !n->nobcast;
725 } else if (n->nomulti) {
726 return 0;
727 } else if (n->allmulti || n->mac_table.multi_overflow) {
728 return 1;
729 }
730
731 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
732 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
733 return 1;
734 }
735 }
736 } else { // unicast
737 if (n->nouni) {
738 return 0;
739 } else if (n->alluni || n->mac_table.uni_overflow) {
740 return 1;
741 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
742 return 1;
743 }
744
745 for (i = 0; i < n->mac_table.first_multi; i++) {
746 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
747 return 1;
748 }
749 }
750 }
751
752 return 0;
753 }
754
755 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size)
756 {
757 VirtIONet *n = qemu_get_nic_opaque(nc);
758 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
759 VirtIODevice *vdev = VIRTIO_DEVICE(n);
760 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
761 struct virtio_net_hdr_mrg_rxbuf mhdr;
762 unsigned mhdr_cnt = 0;
763 size_t offset, i, guest_offset;
764
765 if (!virtio_net_can_receive(nc)) {
766 return -1;
767 }
768
769 /* hdr_len refers to the header we supply to the guest */
770 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
771 return 0;
772 }
773
774 if (!receive_filter(n, buf, size))
775 return size;
776
777 offset = i = 0;
778
779 while (offset < size) {
780 VirtQueueElement elem;
781 int len, total;
782 const struct iovec *sg = elem.in_sg;
783
784 total = 0;
785
786 if (virtqueue_pop(q->rx_vq, &elem) == 0) {
787 if (i == 0)
788 return -1;
789 error_report("virtio-net unexpected empty queue: "
790 "i %zd mergeable %d offset %zd, size %zd, "
791 "guest hdr len %zd, host hdr len %zd guest features 0x%x",
792 i, n->mergeable_rx_bufs, offset, size,
793 n->guest_hdr_len, n->host_hdr_len, vdev->guest_features);
794 exit(1);
795 }
796
797 if (elem.in_num < 1) {
798 error_report("virtio-net receive queue contains no in buffers");
799 exit(1);
800 }
801
802 if (i == 0) {
803 assert(offset == 0);
804 if (n->mergeable_rx_bufs) {
805 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
806 sg, elem.in_num,
807 offsetof(typeof(mhdr), num_buffers),
808 sizeof(mhdr.num_buffers));
809 }
810
811 receive_header(n, sg, elem.in_num, buf, size);
812 offset = n->host_hdr_len;
813 total += n->guest_hdr_len;
814 guest_offset = n->guest_hdr_len;
815 } else {
816 guest_offset = 0;
817 }
818
819 /* copy in packet. ugh */
820 len = iov_from_buf(sg, elem.in_num, guest_offset,
821 buf + offset, size - offset);
822 total += len;
823 offset += len;
824 /* If buffers can't be merged, at this point we
825 * must have consumed the complete packet.
826 * Otherwise, drop it. */
827 if (!n->mergeable_rx_bufs && offset < size) {
828 #if 0
829 error_report("virtio-net truncated non-mergeable packet: "
830 "i %zd mergeable %d offset %zd, size %zd, "
831 "guest hdr len %zd, host hdr len %zd",
832 i, n->mergeable_rx_bufs,
833 offset, size, n->guest_hdr_len, n->host_hdr_len);
834 #endif
835 return size;
836 }
837
838 /* signal other side */
839 virtqueue_fill(q->rx_vq, &elem, total, i++);
840 }
841
842 if (mhdr_cnt) {
843 stw_p(&mhdr.num_buffers, i);
844 iov_from_buf(mhdr_sg, mhdr_cnt,
845 0,
846 &mhdr.num_buffers, sizeof mhdr.num_buffers);
847 }
848
849 virtqueue_flush(q->rx_vq, i);
850 virtio_notify(vdev, q->rx_vq);
851
852 return size;
853 }
854
855 static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
856
857 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
858 {
859 VirtIONet *n = qemu_get_nic_opaque(nc);
860 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
861 VirtIODevice *vdev = VIRTIO_DEVICE(n);
862
863 virtqueue_push(q->tx_vq, &q->async_tx.elem, 0);
864 virtio_notify(vdev, q->tx_vq);
865
866 q->async_tx.elem.out_num = q->async_tx.len = 0;
867
868 virtio_queue_set_notification(q->tx_vq, 1);
869 virtio_net_flush_tx(q);
870 }
871
872 /* TX */
873 static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
874 {
875 VirtIONet *n = q->n;
876 VirtIODevice *vdev = VIRTIO_DEVICE(n);
877 VirtQueueElement elem;
878 int32_t num_packets = 0;
879 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
880 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
881 return num_packets;
882 }
883
884 assert(vdev->vm_running);
885
886 if (q->async_tx.elem.out_num) {
887 virtio_queue_set_notification(q->tx_vq, 0);
888 return num_packets;
889 }
890
891 while (virtqueue_pop(q->tx_vq, &elem)) {
892 ssize_t ret, len;
893 unsigned int out_num = elem.out_num;
894 struct iovec *out_sg = &elem.out_sg[0];
895 struct iovec sg[VIRTQUEUE_MAX_SIZE];
896
897 if (out_num < 1) {
898 error_report("virtio-net header not in first element");
899 exit(1);
900 }
901
902 /*
903 * If host wants to see the guest header as is, we can
904 * pass it on unchanged. Otherwise, copy just the parts
905 * that host is interested in.
906 */
907 assert(n->host_hdr_len <= n->guest_hdr_len);
908 if (n->host_hdr_len != n->guest_hdr_len) {
909 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
910 out_sg, out_num,
911 0, n->host_hdr_len);
912 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
913 out_sg, out_num,
914 n->guest_hdr_len, -1);
915 out_num = sg_num;
916 out_sg = sg;
917 }
918
919 len = n->guest_hdr_len;
920
921 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
922 out_sg, out_num, virtio_net_tx_complete);
923 if (ret == 0) {
924 virtio_queue_set_notification(q->tx_vq, 0);
925 q->async_tx.elem = elem;
926 q->async_tx.len = len;
927 return -EBUSY;
928 }
929
930 len += ret;
931
932 virtqueue_push(q->tx_vq, &elem, 0);
933 virtio_notify(vdev, q->tx_vq);
934
935 if (++num_packets >= n->tx_burst) {
936 break;
937 }
938 }
939 return num_packets;
940 }
941
942 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
943 {
944 VirtIONet *n = VIRTIO_NET(vdev);
945 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
946
947 /* This happens when device was stopped but VCPU wasn't. */
948 if (!vdev->vm_running) {
949 q->tx_waiting = 1;
950 return;
951 }
952
953 if (q->tx_waiting) {
954 virtio_queue_set_notification(vq, 1);
955 qemu_del_timer(q->tx_timer);
956 q->tx_waiting = 0;
957 virtio_net_flush_tx(q);
958 } else {
959 qemu_mod_timer(q->tx_timer,
960 qemu_get_clock_ns(vm_clock) + n->tx_timeout);
961 q->tx_waiting = 1;
962 virtio_queue_set_notification(vq, 0);
963 }
964 }
965
966 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
967 {
968 VirtIONet *n = VIRTIO_NET(vdev);
969 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
970
971 if (unlikely(q->tx_waiting)) {
972 return;
973 }
974 q->tx_waiting = 1;
975 /* This happens when device was stopped but VCPU wasn't. */
976 if (!vdev->vm_running) {
977 return;
978 }
979 virtio_queue_set_notification(vq, 0);
980 qemu_bh_schedule(q->tx_bh);
981 }
982
983 static void virtio_net_tx_timer(void *opaque)
984 {
985 VirtIONetQueue *q = opaque;
986 VirtIONet *n = q->n;
987 VirtIODevice *vdev = VIRTIO_DEVICE(n);
988 assert(vdev->vm_running);
989
990 q->tx_waiting = 0;
991
992 /* Just in case the driver is not ready on more */
993 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
994 return;
995 }
996
997 virtio_queue_set_notification(q->tx_vq, 1);
998 virtio_net_flush_tx(q);
999 }
1000
1001 static void virtio_net_tx_bh(void *opaque)
1002 {
1003 VirtIONetQueue *q = opaque;
1004 VirtIONet *n = q->n;
1005 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1006 int32_t ret;
1007
1008 assert(vdev->vm_running);
1009
1010 q->tx_waiting = 0;
1011
1012 /* Just in case the driver is not ready on more */
1013 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
1014 return;
1015 }
1016
1017 ret = virtio_net_flush_tx(q);
1018 if (ret == -EBUSY) {
1019 return; /* Notification re-enable handled by tx_complete */
1020 }
1021
1022 /* If we flush a full burst of packets, assume there are
1023 * more coming and immediately reschedule */
1024 if (ret >= n->tx_burst) {
1025 qemu_bh_schedule(q->tx_bh);
1026 q->tx_waiting = 1;
1027 return;
1028 }
1029
1030 /* If less than a full burst, re-enable notification and flush
1031 * anything that may have come in while we weren't looking. If
1032 * we find something, assume the guest is still active and reschedule */
1033 virtio_queue_set_notification(q->tx_vq, 1);
1034 if (virtio_net_flush_tx(q) > 0) {
1035 virtio_queue_set_notification(q->tx_vq, 0);
1036 qemu_bh_schedule(q->tx_bh);
1037 q->tx_waiting = 1;
1038 }
1039 }
1040
1041 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue, int ctrl)
1042 {
1043 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1044 int i, max = multiqueue ? n->max_queues : 1;
1045
1046 n->multiqueue = multiqueue;
1047
1048 for (i = 2; i <= n->max_queues * 2 + 1; i++) {
1049 virtio_del_queue(vdev, i);
1050 }
1051
1052 for (i = 1; i < max; i++) {
1053 n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
1054 if (n->vqs[i].tx_timer) {
1055 n->vqs[i].tx_vq =
1056 virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
1057 n->vqs[i].tx_timer = qemu_new_timer_ns(vm_clock,
1058 virtio_net_tx_timer,
1059 &n->vqs[i]);
1060 } else {
1061 n->vqs[i].tx_vq =
1062 virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
1063 n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]);
1064 }
1065
1066 n->vqs[i].tx_waiting = 0;
1067 n->vqs[i].n = n;
1068 }
1069
1070 if (ctrl) {
1071 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
1072 }
1073
1074 virtio_net_set_queues(n);
1075 }
1076
1077 static void virtio_net_save(QEMUFile *f, void *opaque)
1078 {
1079 int i;
1080 VirtIONet *n = opaque;
1081 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1082
1083 /* At this point, backend must be stopped, otherwise
1084 * it might keep writing to memory. */
1085 assert(!n->vhost_started);
1086 virtio_save(vdev, f);
1087
1088 qemu_put_buffer(f, n->mac, ETH_ALEN);
1089 qemu_put_be32(f, n->vqs[0].tx_waiting);
1090 qemu_put_be32(f, n->mergeable_rx_bufs);
1091 qemu_put_be16(f, n->status);
1092 qemu_put_byte(f, n->promisc);
1093 qemu_put_byte(f, n->allmulti);
1094 qemu_put_be32(f, n->mac_table.in_use);
1095 qemu_put_buffer(f, n->mac_table.macs, n->mac_table.in_use * ETH_ALEN);
1096 qemu_put_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
1097 qemu_put_be32(f, n->has_vnet_hdr);
1098 qemu_put_byte(f, n->mac_table.multi_overflow);
1099 qemu_put_byte(f, n->mac_table.uni_overflow);
1100 qemu_put_byte(f, n->alluni);
1101 qemu_put_byte(f, n->nomulti);
1102 qemu_put_byte(f, n->nouni);
1103 qemu_put_byte(f, n->nobcast);
1104 qemu_put_byte(f, n->has_ufo);
1105 if (n->max_queues > 1) {
1106 qemu_put_be16(f, n->max_queues);
1107 qemu_put_be16(f, n->curr_queues);
1108 for (i = 1; i < n->curr_queues; i++) {
1109 qemu_put_be32(f, n->vqs[i].tx_waiting);
1110 }
1111 }
1112 }
1113
1114 static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
1115 {
1116 VirtIONet *n = opaque;
1117 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1118 int ret, i, link_down;
1119
1120 if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION)
1121 return -EINVAL;
1122
1123 ret = virtio_load(vdev, f);
1124 if (ret) {
1125 return ret;
1126 }
1127
1128 qemu_get_buffer(f, n->mac, ETH_ALEN);
1129 n->vqs[0].tx_waiting = qemu_get_be32(f);
1130
1131 virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f));
1132
1133 if (version_id >= 3)
1134 n->status = qemu_get_be16(f);
1135
1136 if (version_id >= 4) {
1137 if (version_id < 8) {
1138 n->promisc = qemu_get_be32(f);
1139 n->allmulti = qemu_get_be32(f);
1140 } else {
1141 n->promisc = qemu_get_byte(f);
1142 n->allmulti = qemu_get_byte(f);
1143 }
1144 }
1145
1146 if (version_id >= 5) {
1147 n->mac_table.in_use = qemu_get_be32(f);
1148 /* MAC_TABLE_ENTRIES may be different from the saved image */
1149 if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) {
1150 qemu_get_buffer(f, n->mac_table.macs,
1151 n->mac_table.in_use * ETH_ALEN);
1152 } else if (n->mac_table.in_use) {
1153 uint8_t *buf = g_malloc0(n->mac_table.in_use);
1154 qemu_get_buffer(f, buf, n->mac_table.in_use * ETH_ALEN);
1155 g_free(buf);
1156 n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1;
1157 n->mac_table.in_use = 0;
1158 }
1159 }
1160
1161 if (version_id >= 6)
1162 qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
1163
1164 if (version_id >= 7) {
1165 if (qemu_get_be32(f) && !peer_has_vnet_hdr(n)) {
1166 error_report("virtio-net: saved image requires vnet_hdr=on");
1167 return -1;
1168 }
1169
1170 if (n->has_vnet_hdr) {
1171 tap_set_offload(qemu_get_queue(n->nic)->peer,
1172 (vdev->guest_features >> VIRTIO_NET_F_GUEST_CSUM) & 1,
1173 (vdev->guest_features >> VIRTIO_NET_F_GUEST_TSO4) & 1,
1174 (vdev->guest_features >> VIRTIO_NET_F_GUEST_TSO6) & 1,
1175 (vdev->guest_features >> VIRTIO_NET_F_GUEST_ECN) & 1,
1176 (vdev->guest_features >> VIRTIO_NET_F_GUEST_UFO) & 1);
1177 }
1178 }
1179
1180 if (version_id >= 9) {
1181 n->mac_table.multi_overflow = qemu_get_byte(f);
1182 n->mac_table.uni_overflow = qemu_get_byte(f);
1183 }
1184
1185 if (version_id >= 10) {
1186 n->alluni = qemu_get_byte(f);
1187 n->nomulti = qemu_get_byte(f);
1188 n->nouni = qemu_get_byte(f);
1189 n->nobcast = qemu_get_byte(f);
1190 }
1191
1192 if (version_id >= 11) {
1193 if (qemu_get_byte(f) && !peer_has_ufo(n)) {
1194 error_report("virtio-net: saved image requires TUN_F_UFO support");
1195 return -1;
1196 }
1197 }
1198
1199 if (n->max_queues > 1) {
1200 if (n->max_queues != qemu_get_be16(f)) {
1201 error_report("virtio-net: different max_queues ");
1202 return -1;
1203 }
1204
1205 n->curr_queues = qemu_get_be16(f);
1206 for (i = 1; i < n->curr_queues; i++) {
1207 n->vqs[i].tx_waiting = qemu_get_be32(f);
1208 }
1209 }
1210
1211 virtio_net_set_queues(n);
1212
1213 /* Find the first multicast entry in the saved MAC filter */
1214 for (i = 0; i < n->mac_table.in_use; i++) {
1215 if (n->mac_table.macs[i * ETH_ALEN] & 1) {
1216 break;
1217 }
1218 }
1219 n->mac_table.first_multi = i;
1220
1221 /* nc.link_down can't be migrated, so infer link_down according
1222 * to link status bit in n->status */
1223 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
1224 for (i = 0; i < n->max_queues; i++) {
1225 qemu_get_subqueue(n->nic, i)->link_down = link_down;
1226 }
1227
1228 return 0;
1229 }
1230
1231 static void virtio_net_cleanup(NetClientState *nc)
1232 {
1233 VirtIONet *n = qemu_get_nic_opaque(nc);
1234
1235 n->nic = NULL;
1236 }
1237
1238 static NetClientInfo net_virtio_info = {
1239 .type = NET_CLIENT_OPTIONS_KIND_NIC,
1240 .size = sizeof(NICState),
1241 .can_receive = virtio_net_can_receive,
1242 .receive = virtio_net_receive,
1243 .cleanup = virtio_net_cleanup,
1244 .link_status_changed = virtio_net_set_link_status,
1245 };
1246
1247 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
1248 {
1249 VirtIONet *n = VIRTIO_NET(vdev);
1250 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1251 assert(n->vhost_started);
1252 return vhost_net_virtqueue_pending(tap_get_vhost_net(nc->peer), idx);
1253 }
1254
1255 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
1256 bool mask)
1257 {
1258 VirtIONet *n = VIRTIO_NET(vdev);
1259 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1260 assert(n->vhost_started);
1261 vhost_net_virtqueue_mask(tap_get_vhost_net(nc->peer),
1262 vdev, idx, mask);
1263 }
1264
1265 void virtio_net_set_config_size(VirtIONet *n, uint32_t host_features)
1266 {
1267 int i, config_size = 0;
1268 for (i = 0; feature_sizes[i].flags != 0; i++) {
1269 if (host_features & feature_sizes[i].flags) {
1270 config_size = MAX(feature_sizes[i].end, config_size);
1271 }
1272 }
1273 n->config_size = config_size;
1274 }
1275
1276 static VirtIODevice *virtio_net_common_init(DeviceState *dev, NICConf *conf,
1277 virtio_net_conf *net,
1278 uint32_t host_features,
1279 VirtIONet **pn)
1280 {
1281 VirtIONet *n = *pn;
1282 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1283 int i, config_size = 0;
1284
1285 /*
1286 * We have two cases here: the old virtio-net-pci device, and the
1287 * refactored virtio-net.
1288 */
1289 if (n == NULL) {
1290 /* virtio-net-pci */
1291 for (i = 0; feature_sizes[i].flags != 0; i++) {
1292 if (host_features & feature_sizes[i].flags) {
1293 config_size = MAX(feature_sizes[i].end, config_size);
1294 }
1295 }
1296 n = (VirtIONet *)virtio_common_init("virtio-net", VIRTIO_ID_NET,
1297 config_size, sizeof(VirtIONet));
1298 n->config_size = config_size;
1299 } else {
1300 /* virtio-net */
1301 virtio_init(VIRTIO_DEVICE(n), "virtio-net", VIRTIO_ID_NET,
1302 n->config_size);
1303 }
1304
1305 vdev->get_config = virtio_net_get_config;
1306 vdev->set_config = virtio_net_set_config;
1307 vdev->get_features = virtio_net_get_features;
1308 vdev->set_features = virtio_net_set_features;
1309 vdev->bad_features = virtio_net_bad_features;
1310 vdev->reset = virtio_net_reset;
1311 vdev->set_status = virtio_net_set_status;
1312 vdev->guest_notifier_mask = virtio_net_guest_notifier_mask;
1313 vdev->guest_notifier_pending = virtio_net_guest_notifier_pending;
1314 n->max_queues = MAX(conf->queues, 1);
1315 n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
1316 n->vqs[0].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
1317 n->curr_queues = 1;
1318 n->vqs[0].n = n;
1319 n->tx_timeout = net->txtimer;
1320
1321 if (net->tx && strcmp(net->tx, "timer") && strcmp(net->tx, "bh")) {
1322 error_report("virtio-net: "
1323 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1324 net->tx);
1325 error_report("Defaulting to \"bh\"");
1326 }
1327
1328 if (net->tx && !strcmp(net->tx, "timer")) {
1329 n->vqs[0].tx_vq = virtio_add_queue(vdev, 256,
1330 virtio_net_handle_tx_timer);
1331 n->vqs[0].tx_timer = qemu_new_timer_ns(vm_clock, virtio_net_tx_timer,
1332 &n->vqs[0]);
1333 } else {
1334 n->vqs[0].tx_vq = virtio_add_queue(vdev, 256,
1335 virtio_net_handle_tx_bh);
1336 n->vqs[0].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[0]);
1337 }
1338 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
1339 qemu_macaddr_default_if_unset(&conf->macaddr);
1340 memcpy(&n->mac[0], &conf->macaddr, sizeof(n->mac));
1341 n->status = VIRTIO_NET_S_LINK_UP;
1342
1343 n->nic = qemu_new_nic(&net_virtio_info, conf, object_get_typename(OBJECT(dev)), dev->id, n);
1344 peer_test_vnet_hdr(n);
1345 if (peer_has_vnet_hdr(n)) {
1346 for (i = 0; i < n->max_queues; i++) {
1347 tap_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
1348 }
1349 n->host_hdr_len = sizeof(struct virtio_net_hdr);
1350 } else {
1351 n->host_hdr_len = 0;
1352 }
1353
1354 qemu_format_nic_info_str(qemu_get_queue(n->nic), conf->macaddr.a);
1355
1356 n->vqs[0].tx_waiting = 0;
1357 n->tx_burst = net->txburst;
1358 virtio_net_set_mrg_rx_bufs(n, 0);
1359 n->promisc = 1; /* for compatibility */
1360
1361 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
1362
1363 n->vlans = g_malloc0(MAX_VLAN >> 3);
1364
1365 n->qdev = dev;
1366 register_savevm(dev, "virtio-net", -1, VIRTIO_NET_VM_VERSION,
1367 virtio_net_save, virtio_net_load, n);
1368
1369 add_boot_device_path(conf->bootindex, dev, "/ethernet-phy@0");
1370
1371 return vdev;
1372 }
1373
1374 VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf,
1375 virtio_net_conf *net, uint32_t host_features)
1376 {
1377 VirtIONet *n = NULL;
1378 return virtio_net_common_init(dev, conf, net, host_features, &n);
1379 }
1380
1381 void virtio_net_exit(VirtIODevice *vdev)
1382 {
1383 VirtIONet *n = VIRTIO_NET(vdev);
1384 int i;
1385
1386 /* This will stop vhost backend if appropriate. */
1387 virtio_net_set_status(vdev, 0);
1388
1389 unregister_savevm(n->qdev, "virtio-net", n);
1390
1391 g_free(n->mac_table.macs);
1392 g_free(n->vlans);
1393
1394 for (i = 0; i < n->max_queues; i++) {
1395 VirtIONetQueue *q = &n->vqs[i];
1396 NetClientState *nc = qemu_get_subqueue(n->nic, i);
1397
1398 qemu_purge_queued_packets(nc);
1399
1400 if (q->tx_timer) {
1401 qemu_del_timer(q->tx_timer);
1402 qemu_free_timer(q->tx_timer);
1403 } else {
1404 qemu_bh_delete(q->tx_bh);
1405 }
1406 }
1407
1408 g_free(n->vqs);
1409 qemu_del_nic(n->nic);
1410 virtio_cleanup(vdev);
1411 }
1412
1413 static int virtio_net_device_init(VirtIODevice *vdev)
1414 {
1415 DeviceState *qdev = DEVICE(vdev);
1416 VirtIONet *n = VIRTIO_NET(vdev);
1417
1418 /*
1419 * Initially, the new VirtIONet device will have a config size =
1420 * sizeof(struct config), because we can't get host_features here.
1421 */
1422 if (virtio_net_common_init(qdev, &(n->nic_conf),
1423 &(n->net_conf), 0, &n) == NULL) {
1424 return -1;
1425 }
1426 return 0;
1427 }
1428
1429 static int virtio_net_device_exit(DeviceState *qdev)
1430 {
1431 VirtIONet *n = VIRTIO_NET(qdev);
1432 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1433 int i;
1434
1435 /* This will stop vhost backend if appropriate. */
1436 virtio_net_set_status(vdev, 0);
1437
1438 unregister_savevm(qdev, "virtio-net", n);
1439
1440 g_free(n->mac_table.macs);
1441 g_free(n->vlans);
1442
1443 for (i = 0; i < n->max_queues; i++) {
1444 VirtIONetQueue *q = &n->vqs[i];
1445 NetClientState *nc = qemu_get_subqueue(n->nic, i);
1446
1447 qemu_purge_queued_packets(nc);
1448
1449 if (q->tx_timer) {
1450 qemu_del_timer(q->tx_timer);
1451 qemu_free_timer(q->tx_timer);
1452 } else {
1453 qemu_bh_delete(q->tx_bh);
1454 }
1455 }
1456
1457 g_free(n->vqs);
1458 qemu_del_nic(n->nic);
1459 virtio_common_cleanup(vdev);
1460
1461 return 0;
1462 }
1463
1464 static void virtio_net_instance_init(Object *obj)
1465 {
1466 VirtIONet *n = VIRTIO_NET(obj);
1467
1468 /*
1469 * The default config_size is sizeof(struct virtio_net_config).
1470 * Can be overriden with virtio_net_set_config_size.
1471 */
1472 n->config_size = sizeof(struct virtio_net_config);
1473 }
1474
1475 static Property virtio_net_properties[] = {
1476 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
1477 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
1478 TX_TIMER_INTERVAL),
1479 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
1480 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
1481 DEFINE_PROP_END_OF_LIST(),
1482 };
1483
1484 static void virtio_net_class_init(ObjectClass *klass, void *data)
1485 {
1486 DeviceClass *dc = DEVICE_CLASS(klass);
1487 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1488 dc->exit = virtio_net_device_exit;
1489 dc->props = virtio_net_properties;
1490 vdc->init = virtio_net_device_init;
1491 vdc->get_config = virtio_net_get_config;
1492 vdc->set_config = virtio_net_set_config;
1493 vdc->get_features = virtio_net_get_features;
1494 vdc->set_features = virtio_net_set_features;
1495 vdc->bad_features = virtio_net_bad_features;
1496 vdc->reset = virtio_net_reset;
1497 vdc->set_status = virtio_net_set_status;
1498 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
1499 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
1500 }
1501
1502 static const TypeInfo virtio_net_info = {
1503 .name = TYPE_VIRTIO_NET,
1504 .parent = TYPE_VIRTIO_DEVICE,
1505 .instance_size = sizeof(VirtIONet),
1506 .instance_init = virtio_net_instance_init,
1507 .class_init = virtio_net_class_init,
1508 };
1509
1510 static void virtio_register_types(void)
1511 {
1512 type_register_static(&virtio_net_info);
1513 }
1514
1515 type_init(virtio_register_types)