]> git.proxmox.com Git - mirror_qemu.git/blame - hw/net/virtio-net.c
Merge tag 'pull-misc-2022-12-14' of https://repo.or.cz/qemu/armbru into staging
[mirror_qemu.git] / hw / net / virtio-net.c
CommitLineData
fbe78f4f
AL
1/*
2 * Virtio Network Device
3 *
4 * Copyright IBM, Corp. 2007
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
9b8bfe21 14#include "qemu/osdep.h"
9711cd0d 15#include "qemu/atomic.h"
1de7afc9 16#include "qemu/iov.h"
68b0a639 17#include "qemu/log.h"
db725815 18#include "qemu/main-loop.h"
0b8fa32f 19#include "qemu/module.h"
0d09e41a 20#include "hw/virtio/virtio.h"
1422e32d 21#include "net/net.h"
7200ac3c 22#include "net/checksum.h"
a8ed73f7 23#include "net/tap.h"
1de7afc9
PB
24#include "qemu/error-report.h"
25#include "qemu/timer.h"
9711cd0d
JF
26#include "qemu/option.h"
27#include "qemu/option_int.h"
28#include "qemu/config-file.h"
29#include "qapi/qmp/qdict.h"
0d09e41a
PB
30#include "hw/virtio/virtio-net.h"
31#include "net/vhost_net.h"
9d8c6a25 32#include "net/announce.h"
17ec5a86 33#include "hw/virtio/virtio-bus.h"
e688df6b 34#include "qapi/error.h"
9af23989 35#include "qapi/qapi-events-net.h"
a27bd6c7 36#include "hw/qdev-properties.h"
9711cd0d
JF
37#include "qapi/qapi-types-migration.h"
38#include "qapi/qapi-events-migration.h"
1399c60d 39#include "hw/virtio/virtio-access.h"
f8d806c9 40#include "migration/misc.h"
9473939e 41#include "standard-headers/linux/ethtool.h"
2f780b6a 42#include "sysemu/sysemu.h"
9d8c6a25 43#include "trace.h"
9711cd0d
JF
44#include "monitor/qdev.h"
45#include "hw/pci/pci.h"
4474e37a 46#include "net_rx_pkt.h"
108a6481 47#include "hw/virtio/vhost.h"
1b529d90 48#include "sysemu/qtest.h"
fbe78f4f 49
0ce0e8f4 50#define VIRTIO_NET_VM_VERSION 11
b6503ed9 51
f21c0ed9 52#define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
9d6271b8 53
1c0fbfa3
MT
54/* previously fixed value */
55#define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
9b02e161
WW
56#define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
57
441537f1 58/* for now, only allow larger queue_pairs; with virtio-1, guest can downsize */
1c0fbfa3 59#define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
9b02e161 60#define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
1c0fbfa3 61
2974e916
YB
62#define VIRTIO_NET_IP4_ADDR_SIZE 8 /* ipv4 saddr + daddr */
63
64#define VIRTIO_NET_TCP_FLAG 0x3F
65#define VIRTIO_NET_TCP_HDR_LENGTH 0xF000
66
67/* IPv4 max payload, 16 bits in the header */
68#define VIRTIO_NET_MAX_IP4_PAYLOAD (65535 - sizeof(struct ip_header))
69#define VIRTIO_NET_MAX_TCP_PAYLOAD 65535
70
71/* header length value in ip header without option */
72#define VIRTIO_NET_IP4_HEADER_LENGTH 5
73
74#define VIRTIO_NET_IP6_ADDR_SIZE 32 /* ipv6 saddr + daddr */
75#define VIRTIO_NET_MAX_IP6_PAYLOAD VIRTIO_NET_MAX_TCP_PAYLOAD
76
77/* Purge coalesced packets timer interval, This value affects the performance
78 a lot, and should be tuned carefully, '300000'(300us) is the recommended
79 value to pass the WHQL test, '50000' can gain 2x netperf throughput with
80 tso/gso/gro 'off'. */
81#define VIRTIO_NET_RSC_DEFAULT_INTERVAL 300000
82
59079029
YB
83#define VIRTIO_NET_RSS_SUPPORTED_HASHES (VIRTIO_NET_RSS_HASH_TYPE_IPv4 | \
84 VIRTIO_NET_RSS_HASH_TYPE_TCPv4 | \
85 VIRTIO_NET_RSS_HASH_TYPE_UDPv4 | \
86 VIRTIO_NET_RSS_HASH_TYPE_IPv6 | \
87 VIRTIO_NET_RSS_HASH_TYPE_TCPv6 | \
88 VIRTIO_NET_RSS_HASH_TYPE_UDPv6 | \
89 VIRTIO_NET_RSS_HASH_TYPE_IP_EX | \
90 VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | \
91 VIRTIO_NET_RSS_HASH_TYPE_UDP_EX)
92
ad6461ad 93static const VirtIOFeature feature_sizes[] = {
127833ee 94 {.flags = 1ULL << VIRTIO_NET_F_MAC,
5d5b33c0 95 .end = endof(struct virtio_net_config, mac)},
127833ee 96 {.flags = 1ULL << VIRTIO_NET_F_STATUS,
5d5b33c0 97 .end = endof(struct virtio_net_config, status)},
127833ee 98 {.flags = 1ULL << VIRTIO_NET_F_MQ,
5d5b33c0 99 .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
127833ee 100 {.flags = 1ULL << VIRTIO_NET_F_MTU,
5d5b33c0 101 .end = endof(struct virtio_net_config, mtu)},
9473939e 102 {.flags = 1ULL << VIRTIO_NET_F_SPEED_DUPLEX,
5d5b33c0 103 .end = endof(struct virtio_net_config, duplex)},
e22f0603 104 {.flags = (1ULL << VIRTIO_NET_F_RSS) | (1ULL << VIRTIO_NET_F_HASH_REPORT),
59079029 105 .end = endof(struct virtio_net_config, supported_hash_types)},
14f9b664
JL
106 {}
107};
108
d74c30c8
DT
109static const VirtIOConfigSizeParams cfg_size_params = {
110 .min_size = endof(struct virtio_net_config, mac),
111 .max_size = sizeof(struct virtio_net_config),
112 .feature_sizes = feature_sizes
113};
114
fed699f9 115static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
0c87e93e
JW
116{
117 VirtIONet *n = qemu_get_nic_opaque(nc);
118
fed699f9 119 return &n->vqs[nc->queue_index];
0c87e93e 120}
fed699f9
JW
121
122static int vq2q(int queue_index)
123{
124 return queue_index / 2;
125}
126
4fdf69ab
KX
127static void flush_or_purge_queued_packets(NetClientState *nc)
128{
129 if (!nc->peer) {
130 return;
131 }
132
133 qemu_flush_or_purge_queued_packets(nc->peer, true);
134 assert(!virtio_net_get_subqueue(nc)->async_tx.elem);
135}
136
fbe78f4f
AL
137/* TODO
138 * - we could suppress RX interrupt if we were so inclined.
139 */
140
0f03eca6 141static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
fbe78f4f 142{
17a0ca55 143 VirtIONet *n = VIRTIO_NET(vdev);
fbe78f4f 144 struct virtio_net_config netcfg;
c546ecf2 145 NetClientState *nc = qemu_get_queue(n->nic);
fb592882 146 static const MACAddr zero = { .a = { 0, 0, 0, 0, 0, 0 } };
fbe78f4f 147
108a6481
CL
148 int ret = 0;
149 memset(&netcfg, 0 , sizeof(struct virtio_net_config));
1399c60d 150 virtio_stw_p(vdev, &netcfg.status, n->status);
441537f1 151 virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queue_pairs);
a93e599d 152 virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
79674068 153 memcpy(netcfg.mac, n->mac, ETH_ALEN);
9473939e
JB
154 virtio_stl_p(vdev, &netcfg.speed, n->net_conf.speed);
155 netcfg.duplex = n->net_conf.duplex;
59079029
YB
156 netcfg.rss_max_key_size = VIRTIO_NET_RSS_MAX_KEY_SIZE;
157 virtio_stw_p(vdev, &netcfg.rss_max_indirection_table_length,
e22f0603
YB
158 virtio_host_has_feature(vdev, VIRTIO_NET_F_RSS) ?
159 VIRTIO_NET_RSS_MAX_TABLE_LEN : 1);
59079029
YB
160 virtio_stl_p(vdev, &netcfg.supported_hash_types,
161 VIRTIO_NET_RSS_SUPPORTED_HASHES);
14f9b664 162 memcpy(config, &netcfg, n->config_size);
108a6481 163
c546ecf2
JW
164 /*
165 * Is this VDPA? No peer means not VDPA: there's no way to
166 * disconnect/reconnect a VDPA peer.
167 */
168 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
108a6481 169 ret = vhost_net_get_config(get_vhost_net(nc->peer), (uint8_t *)&netcfg,
c546ecf2
JW
170 n->config_size);
171 if (ret != -1) {
fb592882
CL
172 /*
173 * Some NIC/kernel combinations present 0 as the mac address. As
174 * that is not a legal address, try to proceed with the
175 * address from the QEMU command line in the hope that the
176 * address has been configured correctly elsewhere - just not
177 * reported by the device.
178 */
179 if (memcmp(&netcfg.mac, &zero, sizeof(zero)) == 0) {
180 info_report("Zero hardware mac address detected. Ignoring.");
181 memcpy(netcfg.mac, n->mac, ETH_ALEN);
182 }
c546ecf2
JW
183 memcpy(config, &netcfg, n->config_size);
184 }
108a6481 185 }
fbe78f4f
AL
186}
187
0f03eca6
AL
188static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
189{
17a0ca55 190 VirtIONet *n = VIRTIO_NET(vdev);
14f9b664 191 struct virtio_net_config netcfg = {};
c546ecf2 192 NetClientState *nc = qemu_get_queue(n->nic);
0f03eca6 193
14f9b664 194 memcpy(&netcfg, config, n->config_size);
0f03eca6 195
95129d6f
CH
196 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
197 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
c1943a3f 198 memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
79674068 199 memcpy(n->mac, netcfg.mac, ETH_ALEN);
b356f76d 200 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
0f03eca6 201 }
108a6481 202
c546ecf2
JW
203 /*
204 * Is this VDPA? No peer means not VDPA: there's no way to
205 * disconnect/reconnect a VDPA peer.
206 */
207 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
208 vhost_net_set_config(get_vhost_net(nc->peer),
209 (uint8_t *)&netcfg, 0, n->config_size,
210 VHOST_SET_CONFIG_TYPE_MASTER);
108a6481 211 }
0f03eca6
AL
212}
213
783e7706
MT
214static bool virtio_net_started(VirtIONet *n, uint8_t status)
215{
17a0ca55 216 VirtIODevice *vdev = VIRTIO_DEVICE(n);
783e7706 217 return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
17a0ca55 218 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
783e7706
MT
219}
220
b2c929f0
DDAG
221static void virtio_net_announce_notify(VirtIONet *net)
222{
223 VirtIODevice *vdev = VIRTIO_DEVICE(net);
224 trace_virtio_net_announce_notify();
225
226 net->status |= VIRTIO_NET_S_ANNOUNCE;
227 virtio_notify_config(vdev);
228}
229
f57fcf70
JW
230static void virtio_net_announce_timer(void *opaque)
231{
232 VirtIONet *n = opaque;
9d8c6a25 233 trace_virtio_net_announce_timer(n->announce_timer.round);
f57fcf70 234
9d8c6a25 235 n->announce_timer.round--;
b2c929f0
DDAG
236 virtio_net_announce_notify(n);
237}
238
239static void virtio_net_announce(NetClientState *nc)
240{
241 VirtIONet *n = qemu_get_nic_opaque(nc);
242 VirtIODevice *vdev = VIRTIO_DEVICE(n);
243
244 /*
245 * Make sure the virtio migration announcement timer isn't running
246 * If it is, let it trigger announcement so that we do not cause
247 * confusion.
248 */
249 if (n->announce_timer.round) {
250 return;
251 }
252
253 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
254 virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
255 virtio_net_announce_notify(n);
256 }
f57fcf70
JW
257}
258
783e7706 259static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
afbaa7b4 260{
17a0ca55 261 VirtIODevice *vdev = VIRTIO_DEVICE(n);
b356f76d 262 NetClientState *nc = qemu_get_queue(n->nic);
441537f1 263 int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
aa858194
SWL
264 int cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
265 n->max_ncs - n->max_queue_pairs : 0;
b356f76d 266
ed8b4afe 267 if (!get_vhost_net(nc->peer)) {
afbaa7b4
MT
268 return;
269 }
fed699f9 270
8c1ac475
RK
271 if ((virtio_net_started(n, status) && !nc->peer->link_down) ==
272 !!n->vhost_started) {
afbaa7b4
MT
273 return;
274 }
275 if (!n->vhost_started) {
086abc1c
MT
276 int r, i;
277
1bfa316c
GK
278 if (n->needs_vnet_hdr_swap) {
279 error_report("backend does not support %s vnet headers; "
280 "falling back on userspace virtio",
281 virtio_is_big_endian(vdev) ? "BE" : "LE");
282 return;
283 }
284
086abc1c
MT
285 /* Any packets outstanding? Purge them to avoid touching rings
286 * when vhost is running.
287 */
441537f1 288 for (i = 0; i < queue_pairs; i++) {
086abc1c
MT
289 NetClientState *qnc = qemu_get_subqueue(n->nic, i);
290
291 /* Purge both directions: TX and RX. */
292 qemu_net_queue_purge(qnc->peer->incoming_queue, qnc);
293 qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
294 }
295
a93e599d
MC
296 if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) {
297 r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu);
298 if (r < 0) {
299 error_report("%uBytes MTU not supported by the backend",
300 n->net_conf.mtu);
301
302 return;
303 }
304 }
305
1830b80f 306 n->vhost_started = 1;
22288fe5 307 r = vhost_net_start(vdev, n->nic->ncs, queue_pairs, cvq);
afbaa7b4 308 if (r < 0) {
e7b43f7e
SH
309 error_report("unable to start vhost net: %d: "
310 "falling back on userspace virtio", -r);
1830b80f 311 n->vhost_started = 0;
afbaa7b4
MT
312 }
313 } else {
22288fe5 314 vhost_net_stop(vdev, n->nic->ncs, queue_pairs, cvq);
afbaa7b4
MT
315 n->vhost_started = 0;
316 }
317}
318
1bfa316c
GK
319static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev,
320 NetClientState *peer,
321 bool enable)
322{
323 if (virtio_is_big_endian(vdev)) {
324 return qemu_set_vnet_be(peer, enable);
325 } else {
326 return qemu_set_vnet_le(peer, enable);
327 }
328}
329
330static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs,
441537f1 331 int queue_pairs, bool enable)
1bfa316c
GK
332{
333 int i;
334
441537f1 335 for (i = 0; i < queue_pairs; i++) {
1bfa316c
GK
336 if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 &&
337 enable) {
338 while (--i >= 0) {
339 virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false);
340 }
341
342 return true;
343 }
344 }
345
346 return false;
347}
348
349static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
350{
351 VirtIODevice *vdev = VIRTIO_DEVICE(n);
441537f1 352 int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
1bfa316c
GK
353
354 if (virtio_net_started(n, status)) {
355 /* Before using the device, we tell the network backend about the
356 * endianness to use when parsing vnet headers. If the backend
357 * can't do it, we fallback onto fixing the headers in the core
358 * virtio-net code.
359 */
360 n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs,
441537f1 361 queue_pairs, true);
1bfa316c
GK
362 } else if (virtio_net_started(n, vdev->status)) {
363 /* After using the device, we need to reset the network backend to
364 * the default (guest native endianness), otherwise the guest may
365 * lose network connectivity if it is rebooted into a different
366 * endianness.
367 */
441537f1 368 virtio_net_set_vnet_endian(vdev, n->nic->ncs, queue_pairs, false);
1bfa316c
GK
369 }
370}
371
283e2c2a
YB
372static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq)
373{
374 unsigned int dropped = virtqueue_drop_all(vq);
375 if (dropped) {
376 virtio_notify(vdev, vq);
377 }
378}
379
783e7706
MT
380static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
381{
17a0ca55 382 VirtIONet *n = VIRTIO_NET(vdev);
fed699f9
JW
383 VirtIONetQueue *q;
384 int i;
385 uint8_t queue_status;
783e7706 386
1bfa316c 387 virtio_net_vnet_endian_status(n, status);
783e7706
MT
388 virtio_net_vhost_status(n, status);
389
441537f1 390 for (i = 0; i < n->max_queue_pairs; i++) {
38705bb5
FZ
391 NetClientState *ncs = qemu_get_subqueue(n->nic, i);
392 bool queue_started;
fed699f9 393 q = &n->vqs[i];
783e7706 394
441537f1 395 if ((!n->multiqueue && i != 0) || i >= n->curr_queue_pairs) {
fed699f9 396 queue_status = 0;
783e7706 397 } else {
fed699f9 398 queue_status = status;
783e7706 399 }
38705bb5
FZ
400 queue_started =
401 virtio_net_started(n, queue_status) && !n->vhost_started;
402
403 if (queue_started) {
404 qemu_flush_queued_packets(ncs);
405 }
fed699f9
JW
406
407 if (!q->tx_waiting) {
408 continue;
409 }
410
38705bb5 411 if (queue_started) {
fed699f9 412 if (q->tx_timer) {
bc72ad67
AB
413 timer_mod(q->tx_timer,
414 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
fed699f9
JW
415 } else {
416 qemu_bh_schedule(q->tx_bh);
417 }
783e7706 418 } else {
fed699f9 419 if (q->tx_timer) {
bc72ad67 420 timer_del(q->tx_timer);
fed699f9
JW
421 } else {
422 qemu_bh_cancel(q->tx_bh);
423 }
283e2c2a 424 if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 &&
70e53e6e
JW
425 (queue_status & VIRTIO_CONFIG_S_DRIVER_OK) &&
426 vdev->vm_running) {
283e2c2a
YB
427 /* if tx is waiting we are likely have some packets in tx queue
428 * and disabled notification */
429 q->tx_waiting = 0;
430 virtio_queue_set_notification(q->tx_vq, 1);
431 virtio_net_drop_tx_queue_data(vdev, q->tx_vq);
432 }
783e7706
MT
433 }
434 }
435}
436
4e68f7a0 437static void virtio_net_set_link_status(NetClientState *nc)
554c97dd 438{
cc1f0f45 439 VirtIONet *n = qemu_get_nic_opaque(nc);
17a0ca55 440 VirtIODevice *vdev = VIRTIO_DEVICE(n);
554c97dd
AL
441 uint16_t old_status = n->status;
442
eb6b6c12 443 if (nc->link_down)
554c97dd
AL
444 n->status &= ~VIRTIO_NET_S_LINK_UP;
445 else
446 n->status |= VIRTIO_NET_S_LINK_UP;
447
448 if (n->status != old_status)
17a0ca55 449 virtio_notify_config(vdev);
afbaa7b4 450
17a0ca55 451 virtio_net_set_status(vdev, vdev->status);
554c97dd
AL
452}
453
b1be4280
AK
454static void rxfilter_notify(NetClientState *nc)
455{
b1be4280
AK
456 VirtIONet *n = qemu_get_nic_opaque(nc);
457
458 if (nc->rxfilter_notify_enabled) {
ddfb0baa 459 char *path = object_get_canonical_path(OBJECT(n->qdev));
7480874a 460 qapi_event_send_nic_rx_filter_changed(n->netclient_name, path);
96e35046 461 g_free(path);
b1be4280
AK
462
463 /* disable event notification to avoid events flooding */
464 nc->rxfilter_notify_enabled = 0;
465 }
466}
467
f7bc8ef8
AK
468static intList *get_vlan_table(VirtIONet *n)
469{
54aa3de7 470 intList *list;
f7bc8ef8
AK
471 int i, j;
472
473 list = NULL;
474 for (i = 0; i < MAX_VLAN >> 5; i++) {
475 for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
476 if (n->vlans[i] & (1U << j)) {
54aa3de7 477 QAPI_LIST_PREPEND(list, (i << 5) + j);
f7bc8ef8
AK
478 }
479 }
480 }
481
482 return list;
483}
484
b1be4280
AK
485static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
486{
487 VirtIONet *n = qemu_get_nic_opaque(nc);
f7bc8ef8 488 VirtIODevice *vdev = VIRTIO_DEVICE(n);
b1be4280 489 RxFilterInfo *info;
54aa3de7 490 strList *str_list;
f7bc8ef8 491 int i;
b1be4280
AK
492
493 info = g_malloc0(sizeof(*info));
494 info->name = g_strdup(nc->name);
495 info->promiscuous = n->promisc;
496
497 if (n->nouni) {
498 info->unicast = RX_STATE_NONE;
499 } else if (n->alluni) {
500 info->unicast = RX_STATE_ALL;
501 } else {
502 info->unicast = RX_STATE_NORMAL;
503 }
504
505 if (n->nomulti) {
506 info->multicast = RX_STATE_NONE;
507 } else if (n->allmulti) {
508 info->multicast = RX_STATE_ALL;
509 } else {
510 info->multicast = RX_STATE_NORMAL;
511 }
512
513 info->broadcast_allowed = n->nobcast;
514 info->multicast_overflow = n->mac_table.multi_overflow;
515 info->unicast_overflow = n->mac_table.uni_overflow;
516
b0575ba4 517 info->main_mac = qemu_mac_strdup_printf(n->mac);
b1be4280
AK
518
519 str_list = NULL;
520 for (i = 0; i < n->mac_table.first_multi; i++) {
54aa3de7
EB
521 QAPI_LIST_PREPEND(str_list,
522 qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN));
b1be4280
AK
523 }
524 info->unicast_table = str_list;
525
526 str_list = NULL;
527 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
54aa3de7
EB
528 QAPI_LIST_PREPEND(str_list,
529 qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN));
b1be4280
AK
530 }
531 info->multicast_table = str_list;
f7bc8ef8 532 info->vlan_table = get_vlan_table(n);
b1be4280 533
95129d6f 534 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
f7bc8ef8
AK
535 info->vlan = RX_STATE_ALL;
536 } else if (!info->vlan_table) {
537 info->vlan = RX_STATE_NONE;
538 } else {
539 info->vlan = RX_STATE_NORMAL;
b1be4280 540 }
b1be4280
AK
541
542 /* enable event notification after query */
543 nc->rxfilter_notify_enabled = 1;
544
545 return info;
546}
547
7dc6be52
XZ
548static void virtio_net_queue_reset(VirtIODevice *vdev, uint32_t queue_index)
549{
550 VirtIONet *n = VIRTIO_NET(vdev);
f47af0af
XZ
551 NetClientState *nc;
552
553 /* validate queue_index and skip for cvq */
554 if (queue_index >= n->max_queue_pairs * 2) {
555 return;
556 }
557
558 nc = qemu_get_subqueue(n->nic, vq2q(queue_index));
7dc6be52
XZ
559
560 if (!nc->peer) {
561 return;
562 }
563
564 if (get_vhost_net(nc->peer) &&
565 nc->peer->info->type == NET_CLIENT_DRIVER_TAP) {
566 vhost_net_virtqueue_reset(vdev, nc, queue_index);
567 }
568
569 flush_or_purge_queued_packets(nc);
570}
571
7f863302
KX
572static void virtio_net_queue_enable(VirtIODevice *vdev, uint32_t queue_index)
573{
574 VirtIONet *n = VIRTIO_NET(vdev);
f47af0af 575 NetClientState *nc;
7f863302
KX
576 int r;
577
f47af0af
XZ
578 /* validate queue_index and skip for cvq */
579 if (queue_index >= n->max_queue_pairs * 2) {
580 return;
581 }
582
583 nc = qemu_get_subqueue(n->nic, vq2q(queue_index));
584
7f863302
KX
585 if (!nc->peer || !vdev->vhost_started) {
586 return;
587 }
588
589 if (get_vhost_net(nc->peer) &&
590 nc->peer->info->type == NET_CLIENT_DRIVER_TAP) {
591 r = vhost_net_virtqueue_restart(vdev, nc, queue_index);
592 if (r < 0) {
593 error_report("unable to restart vhost net virtqueue: %d, "
594 "when resetting the queue", queue_index);
595 }
596 }
597}
598
002437cd
AL
599static void virtio_net_reset(VirtIODevice *vdev)
600{
17a0ca55 601 VirtIONet *n = VIRTIO_NET(vdev);
94b52958 602 int i;
002437cd
AL
603
604 /* Reset back to compatibility mode */
605 n->promisc = 1;
606 n->allmulti = 0;
015cb166
AW
607 n->alluni = 0;
608 n->nomulti = 0;
609 n->nouni = 0;
610 n->nobcast = 0;
fed699f9 611 /* multiqueue is disabled by default */
441537f1 612 n->curr_queue_pairs = 1;
9d8c6a25
DDAG
613 timer_del(n->announce_timer.tm);
614 n->announce_timer.round = 0;
f57fcf70 615 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
b6503ed9 616
f21c0ed9 617 /* Flush any MAC and VLAN filter table state */
b6503ed9 618 n->mac_table.in_use = 0;
2d9aba39 619 n->mac_table.first_multi = 0;
8fd2a2f1
AW
620 n->mac_table.multi_overflow = 0;
621 n->mac_table.uni_overflow = 0;
b6503ed9 622 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
41dc8a67 623 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
702d66a8 624 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
f21c0ed9 625 memset(n->vlans, 0, MAX_VLAN >> 3);
94b52958
GK
626
627 /* Flush any async TX */
441537f1 628 for (i = 0; i < n->max_queue_pairs; i++) {
4fdf69ab 629 flush_or_purge_queued_packets(qemu_get_subqueue(n->nic, i));
94b52958 630 }
002437cd
AL
631}
632
6e371ab8 633static void peer_test_vnet_hdr(VirtIONet *n)
3a330134 634{
b356f76d
JW
635 NetClientState *nc = qemu_get_queue(n->nic);
636 if (!nc->peer) {
6e371ab8 637 return;
b356f76d 638 }
3a330134 639
d6085e3a 640 n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
6e371ab8 641}
3a330134 642
6e371ab8
MT
643static int peer_has_vnet_hdr(VirtIONet *n)
644{
3a330134
MM
645 return n->has_vnet_hdr;
646}
647
0ce0e8f4
MM
648static int peer_has_ufo(VirtIONet *n)
649{
650 if (!peer_has_vnet_hdr(n))
651 return 0;
652
d6085e3a 653 n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
0ce0e8f4
MM
654
655 return n->has_ufo;
656}
657
bb9d17f8 658static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
e22f0603 659 int version_1, int hash_report)
ff3a8066 660{
fed699f9
JW
661 int i;
662 NetClientState *nc;
663
ff3a8066
MT
664 n->mergeable_rx_bufs = mergeable_rx_bufs;
665
bb9d17f8 666 if (version_1) {
e22f0603
YB
667 n->guest_hdr_len = hash_report ?
668 sizeof(struct virtio_net_hdr_v1_hash) :
669 sizeof(struct virtio_net_hdr_mrg_rxbuf);
670 n->rss_data.populate_hash = !!hash_report;
bb9d17f8
CH
671 } else {
672 n->guest_hdr_len = n->mergeable_rx_bufs ?
673 sizeof(struct virtio_net_hdr_mrg_rxbuf) :
674 sizeof(struct virtio_net_hdr);
675 }
ff3a8066 676
441537f1 677 for (i = 0; i < n->max_queue_pairs; i++) {
fed699f9
JW
678 nc = qemu_get_subqueue(n->nic, i);
679
680 if (peer_has_vnet_hdr(n) &&
d6085e3a
SH
681 qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
682 qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
fed699f9
JW
683 n->host_hdr_len = n->guest_hdr_len;
684 }
ff3a8066
MT
685 }
686}
687
2eef278b
MT
688static int virtio_net_max_tx_queue_size(VirtIONet *n)
689{
690 NetClientState *peer = n->nic_conf.peers.ncs[0];
691
692 /*
0ea5778f
EP
693 * Backends other than vhost-user or vhost-vdpa don't support max queue
694 * size.
2eef278b
MT
695 */
696 if (!peer) {
697 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
698 }
699
0ea5778f
EP
700 switch(peer->info->type) {
701 case NET_CLIENT_DRIVER_VHOST_USER:
702 case NET_CLIENT_DRIVER_VHOST_VDPA:
703 return VIRTQUEUE_MAX_SIZE;
704 default:
2eef278b 705 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
0ea5778f 706 };
2eef278b
MT
707}
708
fed699f9
JW
709static int peer_attach(VirtIONet *n, int index)
710{
711 NetClientState *nc = qemu_get_subqueue(n->nic, index);
712
713 if (!nc->peer) {
714 return 0;
715 }
716
f394b2e2 717 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
7263a0ad
CO
718 vhost_set_vring_enable(nc->peer, 1);
719 }
720
f394b2e2 721 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
fed699f9
JW
722 return 0;
723 }
724
441537f1 725 if (n->max_queue_pairs == 1) {
1074b879
JW
726 return 0;
727 }
728
fed699f9
JW
729 return tap_enable(nc->peer);
730}
731
732static int peer_detach(VirtIONet *n, int index)
733{
734 NetClientState *nc = qemu_get_subqueue(n->nic, index);
735
736 if (!nc->peer) {
737 return 0;
738 }
739
f394b2e2 740 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
7263a0ad
CO
741 vhost_set_vring_enable(nc->peer, 0);
742 }
743
f394b2e2 744 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
fed699f9
JW
745 return 0;
746 }
747
748 return tap_disable(nc->peer);
749}
750
441537f1 751static void virtio_net_set_queue_pairs(VirtIONet *n)
fed699f9
JW
752{
753 int i;
ddfa83ea 754 int r;
fed699f9 755
68b5f314
YB
756 if (n->nic->peer_deleted) {
757 return;
758 }
759
441537f1
JW
760 for (i = 0; i < n->max_queue_pairs; i++) {
761 if (i < n->curr_queue_pairs) {
ddfa83ea
JS
762 r = peer_attach(n, i);
763 assert(!r);
fed699f9 764 } else {
ddfa83ea
JS
765 r = peer_detach(n, i);
766 assert(!r);
fed699f9
JW
767 }
768 }
769}
770
ec57db16 771static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
fed699f9 772
9d5b731d
JW
773static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
774 Error **errp)
fbe78f4f 775{
17a0ca55 776 VirtIONet *n = VIRTIO_NET(vdev);
b356f76d 777 NetClientState *nc = qemu_get_queue(n->nic);
fbe78f4f 778
da3e8a23
SZ
779 /* Firstly sync all virtio-net possible supported features */
780 features |= n->host_features;
781
0cd09c3a 782 virtio_add_feature(&features, VIRTIO_NET_F_MAC);
c9f79a3f 783
6e371ab8 784 if (!peer_has_vnet_hdr(n)) {
0cd09c3a
CH
785 virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
786 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
787 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
788 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
8172539d 789
0cd09c3a
CH
790 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
791 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
792 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
793 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
e22f0603
YB
794
795 virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
8172539d 796 }
3a330134 797
8172539d 798 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
0cd09c3a
CH
799 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
800 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
3a330134
MM
801 }
802
ed8b4afe 803 if (!get_vhost_net(nc->peer)) {
93a97dc5 804 virtio_add_feature(&features, VIRTIO_F_RING_RESET);
9bc6304c
MT
805 return features;
806 }
2974e916 807
0145c393
AM
808 if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
809 virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
810 }
75ebec11
MC
811 features = vhost_net_get_features(get_vhost_net(nc->peer), features);
812 vdev->backend_features = features;
813
814 if (n->mtu_bypass_backend &&
815 (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) {
816 features |= (1ULL << VIRTIO_NET_F_MTU);
817 }
818
819 return features;
fbe78f4f
AL
820}
821
019a3edb 822static uint64_t virtio_net_bad_features(VirtIODevice *vdev)
8eca6b1b 823{
019a3edb 824 uint64_t features = 0;
8eca6b1b
AL
825
826 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
827 * but also these: */
0cd09c3a
CH
828 virtio_add_feature(&features, VIRTIO_NET_F_MAC);
829 virtio_add_feature(&features, VIRTIO_NET_F_CSUM);
830 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4);
831 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6);
832 virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN);
8eca6b1b 833
8172539d 834 return features;
8eca6b1b
AL
835}
836
644c9858
DF
837static void virtio_net_apply_guest_offloads(VirtIONet *n)
838{
ad37bb3b 839 qemu_set_offload(qemu_get_queue(n->nic)->peer,
644c9858
DF
840 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
841 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
842 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
843 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
844 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)));
845}
846
847static uint64_t virtio_net_guest_offloads_by_features(uint32_t features)
848{
849 static const uint64_t guest_offloads_mask =
850 (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
851 (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
852 (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
853 (1ULL << VIRTIO_NET_F_GUEST_ECN) |
854 (1ULL << VIRTIO_NET_F_GUEST_UFO);
855
856 return guest_offloads_mask & features;
857}
858
859static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
860{
861 VirtIODevice *vdev = VIRTIO_DEVICE(n);
862 return virtio_net_guest_offloads_by_features(vdev->guest_features);
863}
864
f5e1847b
JQ
865typedef struct {
866 VirtIONet *n;
12b2fad7
KW
867 DeviceState *dev;
868} FailoverDevice;
f5e1847b
JQ
869
870/**
12b2fad7 871 * Set the failover primary device
f5e1847b
JQ
872 *
873 * @opaque: FailoverId to setup
874 * @opts: opts for device we are handling
875 * @errp: returns an error if this function fails
876 */
12b2fad7 877static int failover_set_primary(DeviceState *dev, void *opaque)
f5e1847b 878{
12b2fad7
KW
879 FailoverDevice *fdev = opaque;
880 PCIDevice *pci_dev = (PCIDevice *)
881 object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE);
f5e1847b 882
12b2fad7
KW
883 if (!pci_dev) {
884 return 0;
885 }
886
887 if (!g_strcmp0(pci_dev->failover_pair_id, fdev->n->netclient_name)) {
888 fdev->dev = dev;
f5e1847b
JQ
889 return 1;
890 }
891
892 return 0;
893}
894
21e8709b
JQ
895/**
896 * Find the primary device for this failover virtio-net
897 *
898 * @n: VirtIONet device
899 * @errp: returns an error if this function fails
900 */
901static DeviceState *failover_find_primary_device(VirtIONet *n)
902{
12b2fad7
KW
903 FailoverDevice fdev = {
904 .n = n,
905 };
21e8709b 906
12b2fad7
KW
907 qbus_walk_children(sysbus_get_default(), failover_set_primary, NULL,
908 NULL, NULL, &fdev);
909 return fdev.dev;
21e8709b
JQ
910}
911
9711cd0d
JF
912static void failover_add_primary(VirtIONet *n, Error **errp)
913{
914 Error *err = NULL;
21e8709b 915 DeviceState *dev = failover_find_primary_device(n);
9711cd0d 916
21e8709b 917 if (dev) {
117378bf
JF
918 return;
919 }
920
259a10db 921 if (!n->primary_opts) {
97ca9c59
LV
922 error_setg(errp, "Primary device not found");
923 error_append_hint(errp, "Virtio-net failover will not work. Make "
924 "sure primary device has parameter"
925 " failover_pair_id=%s\n", n->netclient_name);
3abad4a2
JQ
926 return;
927 }
259a10db 928
f3558b1b
KW
929 dev = qdev_device_add_from_qdict(n->primary_opts,
930 n->primary_opts_from_json,
931 &err);
97ca9c59 932 if (err) {
f3558b1b 933 qobject_unref(n->primary_opts);
259a10db 934 n->primary_opts = NULL;
9711cd0d 935 } else {
97ca9c59 936 object_unref(OBJECT(dev));
1c775d65 937 }
2155ceaf 938 error_propagate(errp, err);
9711cd0d
JF
939}
940
d5aaa1b0 941static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
fbe78f4f 942{
17a0ca55 943 VirtIONet *n = VIRTIO_NET(vdev);
9711cd0d 944 Error *err = NULL;
fed699f9
JW
945 int i;
946
75ebec11
MC
947 if (n->mtu_bypass_backend &&
948 !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) {
949 features &= ~(1ULL << VIRTIO_NET_F_MTU);
950 }
951
ef546f12 952 virtio_net_set_multiqueue(n,
59079029 953 virtio_has_feature(features, VIRTIO_NET_F_RSS) ||
95129d6f 954 virtio_has_feature(features, VIRTIO_NET_F_MQ));
fbe78f4f 955
ef546f12 956 virtio_net_set_mrg_rx_bufs(n,
95129d6f
CH
957 virtio_has_feature(features,
958 VIRTIO_NET_F_MRG_RXBUF),
959 virtio_has_feature(features,
e22f0603
YB
960 VIRTIO_F_VERSION_1),
961 virtio_has_feature(features,
962 VIRTIO_NET_F_HASH_REPORT));
f5436dd9 963
2974e916
YB
964 n->rsc4_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
965 virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO4);
966 n->rsc6_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
967 virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO6);
e22f0603 968 n->rss_data.redirect = virtio_has_feature(features, VIRTIO_NET_F_RSS);
2974e916 969
f5436dd9 970 if (n->has_vnet_hdr) {
644c9858
DF
971 n->curr_guest_offloads =
972 virtio_net_guest_offloads_by_features(features);
973 virtio_net_apply_guest_offloads(n);
f5436dd9 974 }
fed699f9 975
441537f1 976 for (i = 0; i < n->max_queue_pairs; i++) {
fed699f9
JW
977 NetClientState *nc = qemu_get_subqueue(n->nic, i);
978
ed8b4afe 979 if (!get_vhost_net(nc->peer)) {
fed699f9
JW
980 continue;
981 }
ed8b4afe 982 vhost_net_ack_features(get_vhost_net(nc->peer), features);
dc14a397 983 }
0b1eaa88 984
95129d6f 985 if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
0b1eaa88
SF
986 memset(n->vlans, 0, MAX_VLAN >> 3);
987 } else {
988 memset(n->vlans, 0xff, MAX_VLAN >> 3);
989 }
9711cd0d
JF
990
991 if (virtio_has_feature(features, VIRTIO_NET_F_STANDBY)) {
992 qapi_event_send_failover_negotiated(n->netclient_name);
e2bde83e 993 qatomic_set(&n->failover_primary_hidden, false);
9711cd0d
JF
994 failover_add_primary(n, &err);
995 if (err) {
1b529d90
LV
996 if (!qtest_enabled()) {
997 warn_report_err(err);
998 } else {
999 error_free(err);
1000 }
9711cd0d
JF
1001 }
1002 }
fbe78f4f
AL
1003}
1004
002437cd 1005static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
921ac5d0 1006 struct iovec *iov, unsigned int iov_cnt)
002437cd
AL
1007{
1008 uint8_t on;
921ac5d0 1009 size_t s;
b1be4280 1010 NetClientState *nc = qemu_get_queue(n->nic);
002437cd 1011
921ac5d0
MT
1012 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
1013 if (s != sizeof(on)) {
1014 return VIRTIO_NET_ERR;
002437cd
AL
1015 }
1016
dd23454b 1017 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
002437cd 1018 n->promisc = on;
dd23454b 1019 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
002437cd 1020 n->allmulti = on;
dd23454b 1021 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
015cb166 1022 n->alluni = on;
dd23454b 1023 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
015cb166 1024 n->nomulti = on;
dd23454b 1025 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
015cb166 1026 n->nouni = on;
dd23454b 1027 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
015cb166 1028 n->nobcast = on;
921ac5d0 1029 } else {
002437cd 1030 return VIRTIO_NET_ERR;
921ac5d0 1031 }
002437cd 1032
b1be4280
AK
1033 rxfilter_notify(nc);
1034
002437cd
AL
1035 return VIRTIO_NET_OK;
1036}
1037
644c9858
DF
1038static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
1039 struct iovec *iov, unsigned int iov_cnt)
1040{
1041 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1042 uint64_t offloads;
1043 size_t s;
1044
95129d6f 1045 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
644c9858
DF
1046 return VIRTIO_NET_ERR;
1047 }
1048
1049 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
1050 if (s != sizeof(offloads)) {
1051 return VIRTIO_NET_ERR;
1052 }
1053
1054 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
1055 uint64_t supported_offloads;
1056
189ae6bb
JW
1057 offloads = virtio_ldq_p(vdev, &offloads);
1058
644c9858
DF
1059 if (!n->has_vnet_hdr) {
1060 return VIRTIO_NET_ERR;
1061 }
1062
2974e916
YB
1063 n->rsc4_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) &&
1064 virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO4);
1065 n->rsc6_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) &&
1066 virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO6);
1067 virtio_clear_feature(&offloads, VIRTIO_NET_F_RSC_EXT);
1068
644c9858
DF
1069 supported_offloads = virtio_net_supported_guest_offloads(n);
1070 if (offloads & ~supported_offloads) {
1071 return VIRTIO_NET_ERR;
1072 }
1073
1074 n->curr_guest_offloads = offloads;
1075 virtio_net_apply_guest_offloads(n);
1076
1077 return VIRTIO_NET_OK;
1078 } else {
1079 return VIRTIO_NET_ERR;
1080 }
1081}
1082
b6503ed9 1083static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
921ac5d0 1084 struct iovec *iov, unsigned int iov_cnt)
b6503ed9 1085{
1399c60d 1086 VirtIODevice *vdev = VIRTIO_DEVICE(n);
b6503ed9 1087 struct virtio_net_ctrl_mac mac_data;
921ac5d0 1088 size_t s;
b1be4280 1089 NetClientState *nc = qemu_get_queue(n->nic);
b6503ed9 1090
c1943a3f
AK
1091 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
1092 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
1093 return VIRTIO_NET_ERR;
1094 }
1095 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
1096 assert(s == sizeof(n->mac));
b356f76d 1097 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
b1be4280
AK
1098 rxfilter_notify(nc);
1099
c1943a3f
AK
1100 return VIRTIO_NET_OK;
1101 }
1102
921ac5d0 1103 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
b6503ed9 1104 return VIRTIO_NET_ERR;
921ac5d0 1105 }
b6503ed9 1106
cae2e556
AK
1107 int in_use = 0;
1108 int first_multi = 0;
1109 uint8_t uni_overflow = 0;
1110 uint8_t multi_overflow = 0;
1111 uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
b6503ed9 1112
921ac5d0
MT
1113 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
1114 sizeof(mac_data.entries));
1399c60d 1115 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
921ac5d0 1116 if (s != sizeof(mac_data.entries)) {
b1be4280 1117 goto error;
921ac5d0
MT
1118 }
1119 iov_discard_front(&iov, &iov_cnt, s);
b6503ed9 1120
921ac5d0 1121 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
b1be4280 1122 goto error;
921ac5d0 1123 }
b6503ed9
AL
1124
1125 if (mac_data.entries <= MAC_TABLE_ENTRIES) {
cae2e556 1126 s = iov_to_buf(iov, iov_cnt, 0, macs,
921ac5d0
MT
1127 mac_data.entries * ETH_ALEN);
1128 if (s != mac_data.entries * ETH_ALEN) {
b1be4280 1129 goto error;
921ac5d0 1130 }
cae2e556 1131 in_use += mac_data.entries;
b6503ed9 1132 } else {
cae2e556 1133 uni_overflow = 1;
b6503ed9
AL
1134 }
1135
921ac5d0
MT
1136 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);
1137
cae2e556 1138 first_multi = in_use;
2d9aba39 1139
921ac5d0
MT
1140 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
1141 sizeof(mac_data.entries));
1399c60d 1142 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
921ac5d0 1143 if (s != sizeof(mac_data.entries)) {
b1be4280 1144 goto error;
921ac5d0
MT
1145 }
1146
1147 iov_discard_front(&iov, &iov_cnt, s);
b6503ed9 1148
921ac5d0 1149 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
b1be4280 1150 goto error;
921ac5d0 1151 }
b6503ed9 1152
edc24385 1153 if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
cae2e556 1154 s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
921ac5d0
MT
1155 mac_data.entries * ETH_ALEN);
1156 if (s != mac_data.entries * ETH_ALEN) {
b1be4280 1157 goto error;
8fd2a2f1 1158 }
cae2e556 1159 in_use += mac_data.entries;
921ac5d0 1160 } else {
cae2e556 1161 multi_overflow = 1;
b6503ed9
AL
1162 }
1163
cae2e556
AK
1164 n->mac_table.in_use = in_use;
1165 n->mac_table.first_multi = first_multi;
1166 n->mac_table.uni_overflow = uni_overflow;
1167 n->mac_table.multi_overflow = multi_overflow;
1168 memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN);
1169 g_free(macs);
b1be4280
AK
1170 rxfilter_notify(nc);
1171
b6503ed9 1172 return VIRTIO_NET_OK;
b1be4280
AK
1173
1174error:
cae2e556 1175 g_free(macs);
b1be4280 1176 return VIRTIO_NET_ERR;
b6503ed9
AL
1177}
1178
f21c0ed9 1179static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
921ac5d0 1180 struct iovec *iov, unsigned int iov_cnt)
f21c0ed9 1181{
1399c60d 1182 VirtIODevice *vdev = VIRTIO_DEVICE(n);
f21c0ed9 1183 uint16_t vid;
921ac5d0 1184 size_t s;
b1be4280 1185 NetClientState *nc = qemu_get_queue(n->nic);
f21c0ed9 1186
921ac5d0 1187 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
1399c60d 1188 vid = virtio_lduw_p(vdev, &vid);
921ac5d0 1189 if (s != sizeof(vid)) {
f21c0ed9
AL
1190 return VIRTIO_NET_ERR;
1191 }
1192
f21c0ed9
AL
1193 if (vid >= MAX_VLAN)
1194 return VIRTIO_NET_ERR;
1195
1196 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
1197 n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
1198 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
1199 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
1200 else
1201 return VIRTIO_NET_ERR;
1202
b1be4280
AK
1203 rxfilter_notify(nc);
1204
f21c0ed9
AL
1205 return VIRTIO_NET_OK;
1206}
1207
f57fcf70
JW
1208static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
1209 struct iovec *iov, unsigned int iov_cnt)
1210{
9d8c6a25 1211 trace_virtio_net_handle_announce(n->announce_timer.round);
f57fcf70
JW
1212 if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK &&
1213 n->status & VIRTIO_NET_S_ANNOUNCE) {
1214 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
9d8c6a25
DDAG
1215 if (n->announce_timer.round) {
1216 qemu_announce_timer_step(&n->announce_timer);
f57fcf70
JW
1217 }
1218 return VIRTIO_NET_OK;
1219 } else {
1220 return VIRTIO_NET_ERR;
1221 }
1222}
1223
0145c393
AM
1224static void virtio_net_detach_epbf_rss(VirtIONet *n);
1225
59079029
YB
1226static void virtio_net_disable_rss(VirtIONet *n)
1227{
1228 if (n->rss_data.enabled) {
1229 trace_virtio_net_rss_disable();
1230 }
1231 n->rss_data.enabled = false;
0145c393
AM
1232
1233 virtio_net_detach_epbf_rss(n);
1234}
1235
1236static bool virtio_net_attach_ebpf_to_backend(NICState *nic, int prog_fd)
1237{
1238 NetClientState *nc = qemu_get_peer(qemu_get_queue(nic), 0);
1239 if (nc == NULL || nc->info->set_steering_ebpf == NULL) {
1240 return false;
1241 }
1242
1243 return nc->info->set_steering_ebpf(nc, prog_fd);
1244}
1245
1246static void rss_data_to_rss_config(struct VirtioNetRssData *data,
1247 struct EBPFRSSConfig *config)
1248{
1249 config->redirect = data->redirect;
1250 config->populate_hash = data->populate_hash;
1251 config->hash_types = data->hash_types;
1252 config->indirections_len = data->indirections_len;
1253 config->default_queue = data->default_queue;
1254}
1255
1256static bool virtio_net_attach_epbf_rss(VirtIONet *n)
1257{
1258 struct EBPFRSSConfig config = {};
1259
1260 if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
1261 return false;
1262 }
1263
1264 rss_data_to_rss_config(&n->rss_data, &config);
1265
1266 if (!ebpf_rss_set_all(&n->ebpf_rss, &config,
1267 n->rss_data.indirections_table, n->rss_data.key)) {
1268 return false;
1269 }
1270
1271 if (!virtio_net_attach_ebpf_to_backend(n->nic, n->ebpf_rss.program_fd)) {
1272 return false;
1273 }
1274
1275 return true;
1276}
1277
1278static void virtio_net_detach_epbf_rss(VirtIONet *n)
1279{
1280 virtio_net_attach_ebpf_to_backend(n->nic, -1);
1281}
1282
1283static bool virtio_net_load_ebpf(VirtIONet *n)
1284{
1285 if (!virtio_net_attach_ebpf_to_backend(n->nic, -1)) {
1286 /* backend does't support steering ebpf */
1287 return false;
1288 }
1289
1290 return ebpf_rss_load(&n->ebpf_rss);
1291}
1292
1293static void virtio_net_unload_ebpf(VirtIONet *n)
1294{
1295 virtio_net_attach_ebpf_to_backend(n->nic, -1);
1296 ebpf_rss_unload(&n->ebpf_rss);
59079029
YB
1297}
1298
1299static uint16_t virtio_net_handle_rss(VirtIONet *n,
e22f0603
YB
1300 struct iovec *iov,
1301 unsigned int iov_cnt,
1302 bool do_rss)
59079029
YB
1303{
1304 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1305 struct virtio_net_rss_config cfg;
1306 size_t s, offset = 0, size_get;
441537f1 1307 uint16_t queue_pairs, i;
59079029
YB
1308 struct {
1309 uint16_t us;
1310 uint8_t b;
1311 } QEMU_PACKED temp;
1312 const char *err_msg = "";
1313 uint32_t err_value = 0;
1314
e22f0603 1315 if (do_rss && !virtio_vdev_has_feature(vdev, VIRTIO_NET_F_RSS)) {
59079029
YB
1316 err_msg = "RSS is not negotiated";
1317 goto error;
1318 }
e22f0603
YB
1319 if (!do_rss && !virtio_vdev_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT)) {
1320 err_msg = "Hash report is not negotiated";
1321 goto error;
1322 }
59079029
YB
1323 size_get = offsetof(struct virtio_net_rss_config, indirection_table);
1324 s = iov_to_buf(iov, iov_cnt, offset, &cfg, size_get);
1325 if (s != size_get) {
1326 err_msg = "Short command buffer";
1327 err_value = (uint32_t)s;
1328 goto error;
1329 }
1330 n->rss_data.hash_types = virtio_ldl_p(vdev, &cfg.hash_types);
1331 n->rss_data.indirections_len =
1332 virtio_lduw_p(vdev, &cfg.indirection_table_mask);
1333 n->rss_data.indirections_len++;
e22f0603
YB
1334 if (!do_rss) {
1335 n->rss_data.indirections_len = 1;
1336 }
59079029
YB
1337 if (!is_power_of_2(n->rss_data.indirections_len)) {
1338 err_msg = "Invalid size of indirection table";
1339 err_value = n->rss_data.indirections_len;
1340 goto error;
1341 }
1342 if (n->rss_data.indirections_len > VIRTIO_NET_RSS_MAX_TABLE_LEN) {
1343 err_msg = "Too large indirection table";
1344 err_value = n->rss_data.indirections_len;
1345 goto error;
1346 }
e22f0603
YB
1347 n->rss_data.default_queue = do_rss ?
1348 virtio_lduw_p(vdev, &cfg.unclassified_queue) : 0;
441537f1 1349 if (n->rss_data.default_queue >= n->max_queue_pairs) {
59079029
YB
1350 err_msg = "Invalid default queue";
1351 err_value = n->rss_data.default_queue;
1352 goto error;
1353 }
1354 offset += size_get;
1355 size_get = sizeof(uint16_t) * n->rss_data.indirections_len;
1356 g_free(n->rss_data.indirections_table);
1357 n->rss_data.indirections_table = g_malloc(size_get);
1358 if (!n->rss_data.indirections_table) {
1359 err_msg = "Can't allocate indirections table";
1360 err_value = n->rss_data.indirections_len;
1361 goto error;
1362 }
1363 s = iov_to_buf(iov, iov_cnt, offset,
1364 n->rss_data.indirections_table, size_get);
1365 if (s != size_get) {
1366 err_msg = "Short indirection table buffer";
1367 err_value = (uint32_t)s;
1368 goto error;
1369 }
1370 for (i = 0; i < n->rss_data.indirections_len; ++i) {
1371 uint16_t val = n->rss_data.indirections_table[i];
1372 n->rss_data.indirections_table[i] = virtio_lduw_p(vdev, &val);
1373 }
1374 offset += size_get;
1375 size_get = sizeof(temp);
1376 s = iov_to_buf(iov, iov_cnt, offset, &temp, size_get);
1377 if (s != size_get) {
441537f1 1378 err_msg = "Can't get queue_pairs";
59079029
YB
1379 err_value = (uint32_t)s;
1380 goto error;
1381 }
441537f1
JW
1382 queue_pairs = do_rss ? virtio_lduw_p(vdev, &temp.us) : n->curr_queue_pairs;
1383 if (queue_pairs == 0 || queue_pairs > n->max_queue_pairs) {
1384 err_msg = "Invalid number of queue_pairs";
1385 err_value = queue_pairs;
59079029
YB
1386 goto error;
1387 }
1388 if (temp.b > VIRTIO_NET_RSS_MAX_KEY_SIZE) {
1389 err_msg = "Invalid key size";
1390 err_value = temp.b;
1391 goto error;
1392 }
1393 if (!temp.b && n->rss_data.hash_types) {
1394 err_msg = "No key provided";
1395 err_value = 0;
1396 goto error;
1397 }
1398 if (!temp.b && !n->rss_data.hash_types) {
1399 virtio_net_disable_rss(n);
441537f1 1400 return queue_pairs;
59079029
YB
1401 }
1402 offset += size_get;
1403 size_get = temp.b;
1404 s = iov_to_buf(iov, iov_cnt, offset, n->rss_data.key, size_get);
1405 if (s != size_get) {
1406 err_msg = "Can get key buffer";
1407 err_value = (uint32_t)s;
1408 goto error;
1409 }
1410 n->rss_data.enabled = true;
0145c393
AM
1411
1412 if (!n->rss_data.populate_hash) {
1413 if (!virtio_net_attach_epbf_rss(n)) {
1414 /* EBPF must be loaded for vhost */
1415 if (get_vhost_net(qemu_get_queue(n->nic)->peer)) {
1416 warn_report("Can't load eBPF RSS for vhost");
1417 goto error;
1418 }
1419 /* fallback to software RSS */
1420 warn_report("Can't load eBPF RSS - fallback to software RSS");
1421 n->rss_data.enabled_software_rss = true;
1422 }
1423 } else {
1424 /* use software RSS for hash populating */
1425 /* and detach eBPF if was loaded before */
1426 virtio_net_detach_epbf_rss(n);
1427 n->rss_data.enabled_software_rss = true;
1428 }
1429
59079029
YB
1430 trace_virtio_net_rss_enable(n->rss_data.hash_types,
1431 n->rss_data.indirections_len,
1432 temp.b);
441537f1 1433 return queue_pairs;
59079029
YB
1434error:
1435 trace_virtio_net_rss_error(err_msg, err_value);
1436 virtio_net_disable_rss(n);
1437 return 0;
1438}
1439
fed699f9 1440static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
f8f7c533 1441 struct iovec *iov, unsigned int iov_cnt)
fed699f9 1442{
17a0ca55 1443 VirtIODevice *vdev = VIRTIO_DEVICE(n);
441537f1 1444 uint16_t queue_pairs;
2a7888cc 1445 NetClientState *nc = qemu_get_queue(n->nic);
fed699f9 1446
59079029 1447 virtio_net_disable_rss(n);
e22f0603 1448 if (cmd == VIRTIO_NET_CTRL_MQ_HASH_CONFIG) {
441537f1
JW
1449 queue_pairs = virtio_net_handle_rss(n, iov, iov_cnt, false);
1450 return queue_pairs ? VIRTIO_NET_OK : VIRTIO_NET_ERR;
e22f0603 1451 }
59079029 1452 if (cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
441537f1 1453 queue_pairs = virtio_net_handle_rss(n, iov, iov_cnt, true);
59079029
YB
1454 } else if (cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
1455 struct virtio_net_ctrl_mq mq;
1456 size_t s;
1457 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ)) {
1458 return VIRTIO_NET_ERR;
1459 }
1460 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
1461 if (s != sizeof(mq)) {
1462 return VIRTIO_NET_ERR;
1463 }
441537f1 1464 queue_pairs = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
fed699f9 1465
59079029 1466 } else {
fed699f9
JW
1467 return VIRTIO_NET_ERR;
1468 }
1469
441537f1
JW
1470 if (queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
1471 queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
1472 queue_pairs > n->max_queue_pairs ||
fed699f9
JW
1473 !n->multiqueue) {
1474 return VIRTIO_NET_ERR;
1475 }
1476
ca8717f9 1477 n->curr_queue_pairs = queue_pairs;
2a7888cc 1478 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
ca8717f9
EP
1479 /*
1480 * Avoid updating the backend for a vdpa device: We're only interested
1481 * in updating the device model queues.
1482 */
1483 return VIRTIO_NET_OK;
2a7888cc 1484 }
441537f1 1485 /* stop the backend before changing the number of queue_pairs to avoid handling a
fed699f9 1486 * disabled queue */
17a0ca55 1487 virtio_net_set_status(vdev, vdev->status);
441537f1 1488 virtio_net_set_queue_pairs(n);
fed699f9
JW
1489
1490 return VIRTIO_NET_OK;
1491}
ba7eadb5 1492
640b8a1c
EP
1493size_t virtio_net_handle_ctrl_iov(VirtIODevice *vdev,
1494 const struct iovec *in_sg, unsigned in_num,
1495 const struct iovec *out_sg,
1496 unsigned out_num)
3d11d36c 1497{
17a0ca55 1498 VirtIONet *n = VIRTIO_NET(vdev);
3d11d36c
AL
1499 struct virtio_net_ctrl_hdr ctrl;
1500 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
921ac5d0 1501 size_t s;
771b6ed3 1502 struct iovec *iov, *iov2;
640b8a1c
EP
1503
1504 if (iov_size(in_sg, in_num) < sizeof(status) ||
1505 iov_size(out_sg, out_num) < sizeof(ctrl)) {
1506 virtio_error(vdev, "virtio-net ctrl missing headers");
1507 return 0;
1508 }
1509
1510 iov2 = iov = g_memdup2(out_sg, sizeof(struct iovec) * out_num);
1511 s = iov_to_buf(iov, out_num, 0, &ctrl, sizeof(ctrl));
1512 iov_discard_front(&iov, &out_num, sizeof(ctrl));
1513 if (s != sizeof(ctrl)) {
1514 status = VIRTIO_NET_ERR;
1515 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
1516 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, out_num);
1517 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
1518 status = virtio_net_handle_mac(n, ctrl.cmd, iov, out_num);
1519 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
1520 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, out_num);
1521 } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
1522 status = virtio_net_handle_announce(n, ctrl.cmd, iov, out_num);
1523 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
1524 status = virtio_net_handle_mq(n, ctrl.cmd, iov, out_num);
1525 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
1526 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, out_num);
1527 }
1528
1529 s = iov_from_buf(in_sg, in_num, 0, &status, sizeof(status));
1530 assert(s == sizeof(status));
1531
1532 g_free(iov2);
1533 return sizeof(status);
1534}
1535
1536static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
1537{
1538 VirtQueueElement *elem;
3d11d36c 1539
51b19ebe 1540 for (;;) {
640b8a1c 1541 size_t written;
51b19ebe
PB
1542 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
1543 if (!elem) {
1544 break;
1545 }
640b8a1c
EP
1546
1547 written = virtio_net_handle_ctrl_iov(vdev, elem->in_sg, elem->in_num,
1548 elem->out_sg, elem->out_num);
1549 if (written > 0) {
1550 virtqueue_push(vq, elem, written);
1551 virtio_notify(vdev, vq);
1552 g_free(elem);
1553 } else {
ba7eadb5
GK
1554 virtqueue_detach_element(vq, elem, 0);
1555 g_free(elem);
1556 break;
3d11d36c 1557 }
3d11d36c
AL
1558 }
1559}
1560
fbe78f4f
AL
1561/* RX */
1562
1563static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
1564{
17a0ca55 1565 VirtIONet *n = VIRTIO_NET(vdev);
fed699f9 1566 int queue_index = vq2q(virtio_get_queue_index(vq));
8aeff62d 1567
fed699f9 1568 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
fbe78f4f
AL
1569}
1570
b8c4b67e 1571static bool virtio_net_can_receive(NetClientState *nc)
fbe78f4f 1572{
cc1f0f45 1573 VirtIONet *n = qemu_get_nic_opaque(nc);
17a0ca55 1574 VirtIODevice *vdev = VIRTIO_DEVICE(n);
fed699f9 1575 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
0c87e93e 1576
17a0ca55 1577 if (!vdev->vm_running) {
b8c4b67e 1578 return false;
95477323 1579 }
cdd5cc12 1580
441537f1 1581 if (nc->queue_index >= n->curr_queue_pairs) {
b8c4b67e 1582 return false;
fed699f9
JW
1583 }
1584
0c87e93e 1585 if (!virtio_queue_ready(q->rx_vq) ||
17a0ca55 1586 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
b8c4b67e 1587 return false;
0c87e93e 1588 }
fbe78f4f 1589
b8c4b67e 1590 return true;
cdd5cc12
MM
1591}
1592
0c87e93e 1593static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
cdd5cc12 1594{
0c87e93e
JW
1595 VirtIONet *n = q->n;
1596 if (virtio_queue_empty(q->rx_vq) ||
fbe78f4f 1597 (n->mergeable_rx_bufs &&
0c87e93e
JW
1598 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1599 virtio_queue_set_notification(q->rx_vq, 1);
06b12970
TL
1600
1601 /* To avoid a race condition where the guest has made some buffers
1602 * available after the above check but before notification was
1603 * enabled, check for available buffers again.
1604 */
0c87e93e 1605 if (virtio_queue_empty(q->rx_vq) ||
06b12970 1606 (n->mergeable_rx_bufs &&
0c87e93e 1607 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
06b12970 1608 return 0;
0c87e93e 1609 }
fbe78f4f
AL
1610 }
1611
0c87e93e 1612 virtio_queue_set_notification(q->rx_vq, 0);
fbe78f4f
AL
1613 return 1;
1614}
1615
1399c60d 1616static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
032a74a1 1617{
1399c60d
RR
1618 virtio_tswap16s(vdev, &hdr->hdr_len);
1619 virtio_tswap16s(vdev, &hdr->gso_size);
1620 virtio_tswap16s(vdev, &hdr->csum_start);
1621 virtio_tswap16s(vdev, &hdr->csum_offset);
032a74a1
CLG
1622}
1623
1d41b0c1
AL
1624/* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
1625 * it never finds out that the packets don't have valid checksums. This
1626 * causes dhclient to get upset. Fedora's carried a patch for ages to
1627 * fix this with Xen but it hasn't appeared in an upstream release of
1628 * dhclient yet.
1629 *
1630 * To avoid breaking existing guests, we catch udp packets and add
1631 * checksums. This is terrible but it's better than hacking the guest
1632 * kernels.
1633 *
1634 * N.B. if we introduce a zero-copy API, this operation is no longer free so
1635 * we should provide a mechanism to disable it to avoid polluting the host
1636 * cache.
1637 */
1638static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
22cc84db 1639 uint8_t *buf, size_t size)
1d41b0c1
AL
1640{
1641 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
1642 (size > 27 && size < 1500) && /* normal sized MTU */
1643 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
1644 (buf[23] == 17) && /* ip.protocol == UDP */
1645 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
f5746335 1646 net_checksum_calculate(buf, size, CSUM_UDP);
1d41b0c1
AL
1647 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
1648 }
1649}
1650
280598b7
MT
1651static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
1652 const void *buf, size_t size)
fbe78f4f 1653{
3a330134 1654 if (n->has_vnet_hdr) {
22cc84db
MT
1655 /* FIXME this cast is evil */
1656 void *wbuf = (void *)buf;
280598b7
MT
1657 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
1658 size - n->host_hdr_len);
1bfa316c
GK
1659
1660 if (n->needs_vnet_hdr_swap) {
1661 virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
1662 }
280598b7 1663 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
22cc84db
MT
1664 } else {
1665 struct virtio_net_hdr hdr = {
1666 .flags = 0,
1667 .gso_type = VIRTIO_NET_HDR_GSO_NONE
1668 };
1669 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
3a330134 1670 }
fbe78f4f
AL
1671}
1672
3831ab20
AL
1673static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
1674{
1675 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
f21c0ed9 1676 static const uint8_t vlan[] = {0x81, 0x00};
3831ab20 1677 uint8_t *ptr = (uint8_t *)buf;
b6503ed9 1678 int i;
3831ab20
AL
1679
1680 if (n->promisc)
1681 return 1;
1682
e043ebc6 1683 ptr += n->host_hdr_len;
3a330134 1684
f21c0ed9 1685 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
7542d3e7 1686 int vid = lduw_be_p(ptr + 14) & 0xfff;
f21c0ed9
AL
1687 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
1688 return 0;
1689 }
1690
bbe2f399
AW
1691 if (ptr[0] & 1) { // multicast
1692 if (!memcmp(ptr, bcast, sizeof(bcast))) {
015cb166
AW
1693 return !n->nobcast;
1694 } else if (n->nomulti) {
1695 return 0;
8fd2a2f1 1696 } else if (n->allmulti || n->mac_table.multi_overflow) {
bbe2f399
AW
1697 return 1;
1698 }
2d9aba39
AW
1699
1700 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
1701 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1702 return 1;
1703 }
1704 }
bbe2f399 1705 } else { // unicast
015cb166
AW
1706 if (n->nouni) {
1707 return 0;
1708 } else if (n->alluni || n->mac_table.uni_overflow) {
8fd2a2f1
AW
1709 return 1;
1710 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
bbe2f399
AW
1711 return 1;
1712 }
3831ab20 1713
2d9aba39
AW
1714 for (i = 0; i < n->mac_table.first_multi; i++) {
1715 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1716 return 1;
1717 }
1718 }
b6503ed9
AL
1719 }
1720
3831ab20
AL
1721 return 0;
1722}
1723
4474e37a
YB
1724static uint8_t virtio_net_get_hash_type(bool isip4,
1725 bool isip6,
1726 bool isudp,
1727 bool istcp,
1728 uint32_t types)
1729{
1730 if (isip4) {
1731 if (istcp && (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv4)) {
1732 return NetPktRssIpV4Tcp;
1733 }
1734 if (isudp && (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv4)) {
1735 return NetPktRssIpV4Udp;
1736 }
1737 if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
1738 return NetPktRssIpV4;
1739 }
1740 } else if (isip6) {
1741 uint32_t mask = VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
1742 VIRTIO_NET_RSS_HASH_TYPE_TCPv6;
1743
1744 if (istcp && (types & mask)) {
1745 return (types & VIRTIO_NET_RSS_HASH_TYPE_TCP_EX) ?
1746 NetPktRssIpV6TcpEx : NetPktRssIpV6Tcp;
1747 }
1748 mask = VIRTIO_NET_RSS_HASH_TYPE_UDP_EX | VIRTIO_NET_RSS_HASH_TYPE_UDPv6;
1749 if (isudp && (types & mask)) {
1750 return (types & VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) ?
1751 NetPktRssIpV6UdpEx : NetPktRssIpV6Udp;
1752 }
1753 mask = VIRTIO_NET_RSS_HASH_TYPE_IP_EX | VIRTIO_NET_RSS_HASH_TYPE_IPv6;
1754 if (types & mask) {
1755 return (types & VIRTIO_NET_RSS_HASH_TYPE_IP_EX) ?
1756 NetPktRssIpV6Ex : NetPktRssIpV6;
1757 }
1758 }
1759 return 0xff;
1760}
1761
e22f0603
YB
1762static void virtio_set_packet_hash(const uint8_t *buf, uint8_t report,
1763 uint32_t hash)
1764{
1765 struct virtio_net_hdr_v1_hash *hdr = (void *)buf;
1766 hdr->hash_value = hash;
1767 hdr->hash_report = report;
1768}
1769
4474e37a
YB
1770static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
1771 size_t size)
1772{
1773 VirtIONet *n = qemu_get_nic_opaque(nc);
e22f0603 1774 unsigned int index = nc->queue_index, new_index = index;
4474e37a
YB
1775 struct NetRxPkt *pkt = n->rx_pkt;
1776 uint8_t net_hash_type;
1777 uint32_t hash;
1778 bool isip4, isip6, isudp, istcp;
e22f0603
YB
1779 static const uint8_t reports[NetPktRssIpV6UdpEx + 1] = {
1780 VIRTIO_NET_HASH_REPORT_IPv4,
1781 VIRTIO_NET_HASH_REPORT_TCPv4,
1782 VIRTIO_NET_HASH_REPORT_TCPv6,
1783 VIRTIO_NET_HASH_REPORT_IPv6,
1784 VIRTIO_NET_HASH_REPORT_IPv6_EX,
1785 VIRTIO_NET_HASH_REPORT_TCPv6_EX,
1786 VIRTIO_NET_HASH_REPORT_UDPv4,
1787 VIRTIO_NET_HASH_REPORT_UDPv6,
1788 VIRTIO_NET_HASH_REPORT_UDPv6_EX
1789 };
4474e37a
YB
1790
1791 net_rx_pkt_set_protocols(pkt, buf + n->host_hdr_len,
1792 size - n->host_hdr_len);
1793 net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
1794 if (isip4 && (net_rx_pkt_get_ip4_info(pkt)->fragment)) {
1795 istcp = isudp = false;
1796 }
1797 if (isip6 && (net_rx_pkt_get_ip6_info(pkt)->fragment)) {
1798 istcp = isudp = false;
1799 }
1800 net_hash_type = virtio_net_get_hash_type(isip4, isip6, isudp, istcp,
1801 n->rss_data.hash_types);
1802 if (net_hash_type > NetPktRssIpV6UdpEx) {
e22f0603
YB
1803 if (n->rss_data.populate_hash) {
1804 virtio_set_packet_hash(buf, VIRTIO_NET_HASH_REPORT_NONE, 0);
1805 }
1806 return n->rss_data.redirect ? n->rss_data.default_queue : -1;
4474e37a
YB
1807 }
1808
1809 hash = net_rx_pkt_calc_rss_hash(pkt, net_hash_type, n->rss_data.key);
e22f0603
YB
1810
1811 if (n->rss_data.populate_hash) {
1812 virtio_set_packet_hash(buf, reports[net_hash_type], hash);
4474e37a 1813 }
e22f0603
YB
1814
1815 if (n->rss_data.redirect) {
1816 new_index = hash & (n->rss_data.indirections_len - 1);
1817 new_index = n->rss_data.indirections_table[new_index];
1818 }
1819
1820 return (index == new_index) ? -1 : new_index;
4474e37a
YB
1821}
1822
97cd965c 1823static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
4474e37a 1824 size_t size, bool no_rss)
fbe78f4f 1825{
cc1f0f45 1826 VirtIONet *n = qemu_get_nic_opaque(nc);
fed699f9 1827 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
17a0ca55 1828 VirtIODevice *vdev = VIRTIO_DEVICE(n);
bedd7e93
JW
1829 VirtQueueElement *elems[VIRTQUEUE_MAX_SIZE];
1830 size_t lens[VIRTQUEUE_MAX_SIZE];
63c58728
MT
1831 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
1832 struct virtio_net_hdr_mrg_rxbuf mhdr;
1833 unsigned mhdr_cnt = 0;
bedd7e93
JW
1834 size_t offset, i, guest_offset, j;
1835 ssize_t err;
fbe78f4f 1836
fed699f9 1837 if (!virtio_net_can_receive(nc)) {
cdd5cc12 1838 return -1;
b356f76d 1839 }
cdd5cc12 1840
0145c393 1841 if (!no_rss && n->rss_data.enabled && n->rss_data.enabled_software_rss) {
4474e37a
YB
1842 int index = virtio_net_process_rss(nc, buf, size);
1843 if (index >= 0) {
1844 NetClientState *nc2 = qemu_get_subqueue(n->nic, index);
1845 return virtio_net_receive_rcu(nc2, buf, size, true);
1846 }
1847 }
1848
940cda94 1849 /* hdr_len refers to the header we supply to the guest */
0c87e93e 1850 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
8aeff62d 1851 return 0;
0c87e93e 1852 }
fbe78f4f 1853
3831ab20 1854 if (!receive_filter(n, buf, size))
4f1c942b 1855 return size;
3831ab20 1856
fbe78f4f
AL
1857 offset = i = 0;
1858
1859 while (offset < size) {
51b19ebe 1860 VirtQueueElement *elem;
fbe78f4f 1861 int len, total;
51b19ebe 1862 const struct iovec *sg;
fbe78f4f 1863
22c253d9 1864 total = 0;
fbe78f4f 1865
bedd7e93
JW
1866 if (i == VIRTQUEUE_MAX_SIZE) {
1867 virtio_error(vdev, "virtio-net unexpected long buffer chain");
1868 err = size;
1869 goto err;
1870 }
1871
51b19ebe
PB
1872 elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
1873 if (!elem) {
ba10b9c0
GK
1874 if (i) {
1875 virtio_error(vdev, "virtio-net unexpected empty queue: "
1876 "i %zd mergeable %d offset %zd, size %zd, "
1877 "guest hdr len %zd, host hdr len %zd "
1878 "guest features 0x%" PRIx64,
1879 i, n->mergeable_rx_bufs, offset, size,
1880 n->guest_hdr_len, n->host_hdr_len,
1881 vdev->guest_features);
1882 }
bedd7e93
JW
1883 err = -1;
1884 goto err;
fbe78f4f
AL
1885 }
1886
51b19ebe 1887 if (elem->in_num < 1) {
ba10b9c0
GK
1888 virtio_error(vdev,
1889 "virtio-net receive queue contains no in buffers");
1890 virtqueue_detach_element(q->rx_vq, elem, 0);
1891 g_free(elem);
bedd7e93
JW
1892 err = -1;
1893 goto err;
fbe78f4f
AL
1894 }
1895
51b19ebe 1896 sg = elem->in_sg;
fbe78f4f 1897 if (i == 0) {
c8d28e7e 1898 assert(offset == 0);
63c58728
MT
1899 if (n->mergeable_rx_bufs) {
1900 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
51b19ebe 1901 sg, elem->in_num,
63c58728
MT
1902 offsetof(typeof(mhdr), num_buffers),
1903 sizeof(mhdr.num_buffers));
1904 }
fbe78f4f 1905
51b19ebe 1906 receive_header(n, sg, elem->in_num, buf, size);
e22f0603
YB
1907 if (n->rss_data.populate_hash) {
1908 offset = sizeof(mhdr);
1909 iov_from_buf(sg, elem->in_num, offset,
1910 buf + offset, n->host_hdr_len - sizeof(mhdr));
1911 }
c8d28e7e 1912 offset = n->host_hdr_len;
e35e23f6 1913 total += n->guest_hdr_len;
22cc84db
MT
1914 guest_offset = n->guest_hdr_len;
1915 } else {
1916 guest_offset = 0;
fbe78f4f
AL
1917 }
1918
1919 /* copy in packet. ugh */
51b19ebe 1920 len = iov_from_buf(sg, elem->in_num, guest_offset,
dcf6f5e1 1921 buf + offset, size - offset);
fbe78f4f 1922 total += len;
279a4253
MT
1923 offset += len;
1924 /* If buffers can't be merged, at this point we
1925 * must have consumed the complete packet.
1926 * Otherwise, drop it. */
1927 if (!n->mergeable_rx_bufs && offset < size) {
27e57efe 1928 virtqueue_unpop(q->rx_vq, elem, total);
51b19ebe 1929 g_free(elem);
bedd7e93
JW
1930 err = size;
1931 goto err;
279a4253 1932 }
fbe78f4f 1933
bedd7e93
JW
1934 elems[i] = elem;
1935 lens[i] = total;
1936 i++;
fbe78f4f
AL
1937 }
1938
63c58728 1939 if (mhdr_cnt) {
1399c60d 1940 virtio_stw_p(vdev, &mhdr.num_buffers, i);
63c58728
MT
1941 iov_from_buf(mhdr_sg, mhdr_cnt,
1942 0,
1943 &mhdr.num_buffers, sizeof mhdr.num_buffers);
44b15bc5 1944 }
fbe78f4f 1945
bedd7e93
JW
1946 for (j = 0; j < i; j++) {
1947 /* signal other side */
1948 virtqueue_fill(q->rx_vq, elems[j], lens[j], j);
1949 g_free(elems[j]);
1950 }
1951
0c87e93e 1952 virtqueue_flush(q->rx_vq, i);
17a0ca55 1953 virtio_notify(vdev, q->rx_vq);
4f1c942b
MM
1954
1955 return size;
bedd7e93
JW
1956
1957err:
1958 for (j = 0; j < i; j++) {
abe300d9 1959 virtqueue_detach_element(q->rx_vq, elems[j], lens[j]);
bedd7e93
JW
1960 g_free(elems[j]);
1961 }
1962
1963 return err;
fbe78f4f
AL
1964}
1965
2974e916 1966static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,
97cd965c
PB
1967 size_t size)
1968{
068ddfa9 1969 RCU_READ_LOCK_GUARD();
97cd965c 1970
4474e37a 1971 return virtio_net_receive_rcu(nc, buf, size, false);
97cd965c
PB
1972}
1973
2974e916
YB
1974static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain,
1975 const uint8_t *buf,
1976 VirtioNetRscUnit *unit)
1977{
1978 uint16_t ip_hdrlen;
1979 struct ip_header *ip;
1980
1981 ip = (struct ip_header *)(buf + chain->n->guest_hdr_len
1982 + sizeof(struct eth_header));
1983 unit->ip = (void *)ip;
1984 ip_hdrlen = (ip->ip_ver_len & 0xF) << 2;
1985 unit->ip_plen = &ip->ip_len;
1986 unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip) + ip_hdrlen);
1987 unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10;
1988 unit->payload = htons(*unit->ip_plen) - ip_hdrlen - unit->tcp_hdrlen;
1989}
1990
1991static void virtio_net_rsc_extract_unit6(VirtioNetRscChain *chain,
1992 const uint8_t *buf,
1993 VirtioNetRscUnit *unit)
1994{
1995 struct ip6_header *ip6;
1996
1997 ip6 = (struct ip6_header *)(buf + chain->n->guest_hdr_len
1998 + sizeof(struct eth_header));
1999 unit->ip = ip6;
2000 unit->ip_plen = &(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen);
78ee6bd0 2001 unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip)
2974e916
YB
2002 + sizeof(struct ip6_header));
2003 unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10;
2004
2005 /* There is a difference between payload lenght in ipv4 and v6,
2006 ip header is excluded in ipv6 */
2007 unit->payload = htons(*unit->ip_plen) - unit->tcp_hdrlen;
2008}
2009
2010static size_t virtio_net_rsc_drain_seg(VirtioNetRscChain *chain,
2011 VirtioNetRscSeg *seg)
2012{
2013 int ret;
dd3d85e8 2014 struct virtio_net_hdr_v1 *h;
2974e916 2015
dd3d85e8 2016 h = (struct virtio_net_hdr_v1 *)seg->buf;
2974e916
YB
2017 h->flags = 0;
2018 h->gso_type = VIRTIO_NET_HDR_GSO_NONE;
2019
2020 if (seg->is_coalesced) {
dd3d85e8
YB
2021 h->rsc.segments = seg->packets;
2022 h->rsc.dup_acks = seg->dup_ack;
2974e916
YB
2023 h->flags = VIRTIO_NET_HDR_F_RSC_INFO;
2024 if (chain->proto == ETH_P_IP) {
2025 h->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2026 } else {
2027 h->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2028 }
2029 }
2030
2031 ret = virtio_net_do_receive(seg->nc, seg->buf, seg->size);
2032 QTAILQ_REMOVE(&chain->buffers, seg, next);
2033 g_free(seg->buf);
2034 g_free(seg);
2035
2036 return ret;
2037}
2038
2039static void virtio_net_rsc_purge(void *opq)
2040{
2041 VirtioNetRscSeg *seg, *rn;
2042 VirtioNetRscChain *chain = (VirtioNetRscChain *)opq;
2043
2044 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn) {
2045 if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
2046 chain->stat.purge_failed++;
2047 continue;
2048 }
2049 }
2050
2051 chain->stat.timer++;
2052 if (!QTAILQ_EMPTY(&chain->buffers)) {
2053 timer_mod(chain->drain_timer,
2054 qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout);
2055 }
2056}
2057
2058static void virtio_net_rsc_cleanup(VirtIONet *n)
2059{
2060 VirtioNetRscChain *chain, *rn_chain;
2061 VirtioNetRscSeg *seg, *rn_seg;
2062
2063 QTAILQ_FOREACH_SAFE(chain, &n->rsc_chains, next, rn_chain) {
2064 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn_seg) {
2065 QTAILQ_REMOVE(&chain->buffers, seg, next);
2066 g_free(seg->buf);
2067 g_free(seg);
2068 }
2069
2974e916
YB
2070 timer_free(chain->drain_timer);
2071 QTAILQ_REMOVE(&n->rsc_chains, chain, next);
2072 g_free(chain);
2073 }
2074}
2075
2076static void virtio_net_rsc_cache_buf(VirtioNetRscChain *chain,
2077 NetClientState *nc,
2078 const uint8_t *buf, size_t size)
2079{
2080 uint16_t hdr_len;
2081 VirtioNetRscSeg *seg;
2082
2083 hdr_len = chain->n->guest_hdr_len;
b21e2380 2084 seg = g_new(VirtioNetRscSeg, 1);
2974e916
YB
2085 seg->buf = g_malloc(hdr_len + sizeof(struct eth_header)
2086 + sizeof(struct ip6_header) + VIRTIO_NET_MAX_TCP_PAYLOAD);
2087 memcpy(seg->buf, buf, size);
2088 seg->size = size;
2089 seg->packets = 1;
2090 seg->dup_ack = 0;
2091 seg->is_coalesced = 0;
2092 seg->nc = nc;
2093
2094 QTAILQ_INSERT_TAIL(&chain->buffers, seg, next);
2095 chain->stat.cache++;
2096
2097 switch (chain->proto) {
2098 case ETH_P_IP:
2099 virtio_net_rsc_extract_unit4(chain, seg->buf, &seg->unit);
2100 break;
2101 case ETH_P_IPV6:
2102 virtio_net_rsc_extract_unit6(chain, seg->buf, &seg->unit);
2103 break;
2104 default:
2105 g_assert_not_reached();
2106 }
2107}
2108
2109static int32_t virtio_net_rsc_handle_ack(VirtioNetRscChain *chain,
2110 VirtioNetRscSeg *seg,
2111 const uint8_t *buf,
2112 struct tcp_header *n_tcp,
2113 struct tcp_header *o_tcp)
2114{
2115 uint32_t nack, oack;
2116 uint16_t nwin, owin;
2117
2118 nack = htonl(n_tcp->th_ack);
2119 nwin = htons(n_tcp->th_win);
2120 oack = htonl(o_tcp->th_ack);
2121 owin = htons(o_tcp->th_win);
2122
2123 if ((nack - oack) >= VIRTIO_NET_MAX_TCP_PAYLOAD) {
2124 chain->stat.ack_out_of_win++;
2125 return RSC_FINAL;
2126 } else if (nack == oack) {
2127 /* duplicated ack or window probe */
2128 if (nwin == owin) {
2129 /* duplicated ack, add dup ack count due to whql test up to 1 */
2130 chain->stat.dup_ack++;
2131 return RSC_FINAL;
2132 } else {
2133 /* Coalesce window update */
2134 o_tcp->th_win = n_tcp->th_win;
2135 chain->stat.win_update++;
2136 return RSC_COALESCE;
2137 }
2138 } else {
2139 /* pure ack, go to 'C', finalize*/
2140 chain->stat.pure_ack++;
2141 return RSC_FINAL;
2142 }
2143}
2144
2145static int32_t virtio_net_rsc_coalesce_data(VirtioNetRscChain *chain,
2146 VirtioNetRscSeg *seg,
2147 const uint8_t *buf,
2148 VirtioNetRscUnit *n_unit)
2149{
2150 void *data;
2151 uint16_t o_ip_len;
2152 uint32_t nseq, oseq;
2153 VirtioNetRscUnit *o_unit;
2154
2155 o_unit = &seg->unit;
2156 o_ip_len = htons(*o_unit->ip_plen);
2157 nseq = htonl(n_unit->tcp->th_seq);
2158 oseq = htonl(o_unit->tcp->th_seq);
2159
2160 /* out of order or retransmitted. */
2161 if ((nseq - oseq) > VIRTIO_NET_MAX_TCP_PAYLOAD) {
2162 chain->stat.data_out_of_win++;
2163 return RSC_FINAL;
2164 }
2165
2166 data = ((uint8_t *)n_unit->tcp) + n_unit->tcp_hdrlen;
2167 if (nseq == oseq) {
2168 if ((o_unit->payload == 0) && n_unit->payload) {
2169 /* From no payload to payload, normal case, not a dup ack or etc */
2170 chain->stat.data_after_pure_ack++;
2171 goto coalesce;
2172 } else {
2173 return virtio_net_rsc_handle_ack(chain, seg, buf,
2174 n_unit->tcp, o_unit->tcp);
2175 }
2176 } else if ((nseq - oseq) != o_unit->payload) {
2177 /* Not a consistent packet, out of order */
2178 chain->stat.data_out_of_order++;
2179 return RSC_FINAL;
2180 } else {
2181coalesce:
2182 if ((o_ip_len + n_unit->payload) > chain->max_payload) {
2183 chain->stat.over_size++;
2184 return RSC_FINAL;
2185 }
2186
2187 /* Here comes the right data, the payload length in v4/v6 is different,
2188 so use the field value to update and record the new data len */
2189 o_unit->payload += n_unit->payload; /* update new data len */
2190
2191 /* update field in ip header */
2192 *o_unit->ip_plen = htons(o_ip_len + n_unit->payload);
2193
2194 /* Bring 'PUSH' big, the whql test guide says 'PUSH' can be coalesced
2195 for windows guest, while this may change the behavior for linux
2196 guest (only if it uses RSC feature). */
2197 o_unit->tcp->th_offset_flags = n_unit->tcp->th_offset_flags;
2198
2199 o_unit->tcp->th_ack = n_unit->tcp->th_ack;
2200 o_unit->tcp->th_win = n_unit->tcp->th_win;
2201
2202 memmove(seg->buf + seg->size, data, n_unit->payload);
2203 seg->size += n_unit->payload;
2204 seg->packets++;
2205 chain->stat.coalesced++;
2206 return RSC_COALESCE;
2207 }
2208}
2209
2210static int32_t virtio_net_rsc_coalesce4(VirtioNetRscChain *chain,
2211 VirtioNetRscSeg *seg,
2212 const uint8_t *buf, size_t size,
2213 VirtioNetRscUnit *unit)
2214{
2215 struct ip_header *ip1, *ip2;
2216
2217 ip1 = (struct ip_header *)(unit->ip);
2218 ip2 = (struct ip_header *)(seg->unit.ip);
2219 if ((ip1->ip_src ^ ip2->ip_src) || (ip1->ip_dst ^ ip2->ip_dst)
2220 || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport)
2221 || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) {
2222 chain->stat.no_match++;
2223 return RSC_NO_MATCH;
2224 }
2225
2226 return virtio_net_rsc_coalesce_data(chain, seg, buf, unit);
2227}
2228
2229static int32_t virtio_net_rsc_coalesce6(VirtioNetRscChain *chain,
2230 VirtioNetRscSeg *seg,
2231 const uint8_t *buf, size_t size,
2232 VirtioNetRscUnit *unit)
2233{
2234 struct ip6_header *ip1, *ip2;
2235
2236 ip1 = (struct ip6_header *)(unit->ip);
2237 ip2 = (struct ip6_header *)(seg->unit.ip);
2238 if (memcmp(&ip1->ip6_src, &ip2->ip6_src, sizeof(struct in6_address))
2239 || memcmp(&ip1->ip6_dst, &ip2->ip6_dst, sizeof(struct in6_address))
2240 || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport)
2241 || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) {
2242 chain->stat.no_match++;
2243 return RSC_NO_MATCH;
2244 }
2245
2246 return virtio_net_rsc_coalesce_data(chain, seg, buf, unit);
2247}
2248
2249/* Packets with 'SYN' should bypass, other flag should be sent after drain
2250 * to prevent out of order */
2251static int virtio_net_rsc_tcp_ctrl_check(VirtioNetRscChain *chain,
2252 struct tcp_header *tcp)
2253{
2254 uint16_t tcp_hdr;
2255 uint16_t tcp_flag;
2256
2257 tcp_flag = htons(tcp->th_offset_flags);
2258 tcp_hdr = (tcp_flag & VIRTIO_NET_TCP_HDR_LENGTH) >> 10;
2259 tcp_flag &= VIRTIO_NET_TCP_FLAG;
2974e916
YB
2260 if (tcp_flag & TH_SYN) {
2261 chain->stat.tcp_syn++;
2262 return RSC_BYPASS;
2263 }
2264
2265 if (tcp_flag & (TH_FIN | TH_URG | TH_RST | TH_ECE | TH_CWR)) {
2266 chain->stat.tcp_ctrl_drain++;
2267 return RSC_FINAL;
2268 }
2269
2270 if (tcp_hdr > sizeof(struct tcp_header)) {
2271 chain->stat.tcp_all_opt++;
2272 return RSC_FINAL;
2273 }
2274
2275 return RSC_CANDIDATE;
2276}
2277
2278static size_t virtio_net_rsc_do_coalesce(VirtioNetRscChain *chain,
2279 NetClientState *nc,
2280 const uint8_t *buf, size_t size,
2281 VirtioNetRscUnit *unit)
2282{
2283 int ret;
2284 VirtioNetRscSeg *seg, *nseg;
2285
2286 if (QTAILQ_EMPTY(&chain->buffers)) {
2287 chain->stat.empty_cache++;
2288 virtio_net_rsc_cache_buf(chain, nc, buf, size);
2289 timer_mod(chain->drain_timer,
2290 qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout);
2291 return size;
2292 }
2293
2294 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) {
2295 if (chain->proto == ETH_P_IP) {
2296 ret = virtio_net_rsc_coalesce4(chain, seg, buf, size, unit);
2297 } else {
2298 ret = virtio_net_rsc_coalesce6(chain, seg, buf, size, unit);
2299 }
2300
2301 if (ret == RSC_FINAL) {
2302 if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
2303 /* Send failed */
2304 chain->stat.final_failed++;
2305 return 0;
2306 }
2307
2308 /* Send current packet */
2309 return virtio_net_do_receive(nc, buf, size);
2310 } else if (ret == RSC_NO_MATCH) {
2311 continue;
2312 } else {
2313 /* Coalesced, mark coalesced flag to tell calc cksum for ipv4 */
2314 seg->is_coalesced = 1;
2315 return size;
2316 }
2317 }
2318
2319 chain->stat.no_match_cache++;
2320 virtio_net_rsc_cache_buf(chain, nc, buf, size);
2321 return size;
2322}
2323
2324/* Drain a connection data, this is to avoid out of order segments */
2325static size_t virtio_net_rsc_drain_flow(VirtioNetRscChain *chain,
2326 NetClientState *nc,
2327 const uint8_t *buf, size_t size,
2328 uint16_t ip_start, uint16_t ip_size,
2329 uint16_t tcp_port)
2330{
2331 VirtioNetRscSeg *seg, *nseg;
2332 uint32_t ppair1, ppair2;
2333
2334 ppair1 = *(uint32_t *)(buf + tcp_port);
2335 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) {
2336 ppair2 = *(uint32_t *)(seg->buf + tcp_port);
2337 if (memcmp(buf + ip_start, seg->buf + ip_start, ip_size)
2338 || (ppair1 != ppair2)) {
2339 continue;
2340 }
2341 if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
2342 chain->stat.drain_failed++;
2343 }
2344
2345 break;
2346 }
2347
2348 return virtio_net_do_receive(nc, buf, size);
2349}
2350
2351static int32_t virtio_net_rsc_sanity_check4(VirtioNetRscChain *chain,
2352 struct ip_header *ip,
2353 const uint8_t *buf, size_t size)
2354{
2355 uint16_t ip_len;
2356
2357 /* Not an ipv4 packet */
2358 if (((ip->ip_ver_len & 0xF0) >> 4) != IP_HEADER_VERSION_4) {
2359 chain->stat.ip_option++;
2360 return RSC_BYPASS;
2361 }
2362
2363 /* Don't handle packets with ip option */
2364 if ((ip->ip_ver_len & 0xF) != VIRTIO_NET_IP4_HEADER_LENGTH) {
2365 chain->stat.ip_option++;
2366 return RSC_BYPASS;
2367 }
2368
2369 if (ip->ip_p != IPPROTO_TCP) {
2370 chain->stat.bypass_not_tcp++;
2371 return RSC_BYPASS;
2372 }
2373
2374 /* Don't handle packets with ip fragment */
2375 if (!(htons(ip->ip_off) & IP_DF)) {
2376 chain->stat.ip_frag++;
2377 return RSC_BYPASS;
2378 }
2379
2380 /* Don't handle packets with ecn flag */
2381 if (IPTOS_ECN(ip->ip_tos)) {
2382 chain->stat.ip_ecn++;
2383 return RSC_BYPASS;
2384 }
2385
2386 ip_len = htons(ip->ip_len);
2387 if (ip_len < (sizeof(struct ip_header) + sizeof(struct tcp_header))
2388 || ip_len > (size - chain->n->guest_hdr_len -
2389 sizeof(struct eth_header))) {
2390 chain->stat.ip_hacked++;
2391 return RSC_BYPASS;
2392 }
2393
2394 return RSC_CANDIDATE;
2395}
2396
2397static size_t virtio_net_rsc_receive4(VirtioNetRscChain *chain,
2398 NetClientState *nc,
2399 const uint8_t *buf, size_t size)
2400{
2401 int32_t ret;
2402 uint16_t hdr_len;
2403 VirtioNetRscUnit unit;
2404
2405 hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
2406
2407 if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header)
2408 + sizeof(struct tcp_header))) {
2409 chain->stat.bypass_not_tcp++;
2410 return virtio_net_do_receive(nc, buf, size);
2411 }
2412
2413 virtio_net_rsc_extract_unit4(chain, buf, &unit);
2414 if (virtio_net_rsc_sanity_check4(chain, unit.ip, buf, size)
2415 != RSC_CANDIDATE) {
2416 return virtio_net_do_receive(nc, buf, size);
2417 }
2418
2419 ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp);
2420 if (ret == RSC_BYPASS) {
2421 return virtio_net_do_receive(nc, buf, size);
2422 } else if (ret == RSC_FINAL) {
2423 return virtio_net_rsc_drain_flow(chain, nc, buf, size,
2424 ((hdr_len + sizeof(struct eth_header)) + 12),
2425 VIRTIO_NET_IP4_ADDR_SIZE,
2426 hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header));
2427 }
2428
2429 return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit);
2430}
2431
2432static int32_t virtio_net_rsc_sanity_check6(VirtioNetRscChain *chain,
2433 struct ip6_header *ip6,
2434 const uint8_t *buf, size_t size)
2435{
2436 uint16_t ip_len;
2437
2438 if (((ip6->ip6_ctlun.ip6_un1.ip6_un1_flow & 0xF0) >> 4)
2439 != IP_HEADER_VERSION_6) {
2440 return RSC_BYPASS;
2441 }
2442
2443 /* Both option and protocol is checked in this */
2444 if (ip6->ip6_ctlun.ip6_un1.ip6_un1_nxt != IPPROTO_TCP) {
2445 chain->stat.bypass_not_tcp++;
2446 return RSC_BYPASS;
2447 }
2448
2449 ip_len = htons(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen);
2450 if (ip_len < sizeof(struct tcp_header) ||
2451 ip_len > (size - chain->n->guest_hdr_len - sizeof(struct eth_header)
2452 - sizeof(struct ip6_header))) {
2453 chain->stat.ip_hacked++;
2454 return RSC_BYPASS;
2455 }
2456
2457 /* Don't handle packets with ecn flag */
2458 if (IP6_ECN(ip6->ip6_ctlun.ip6_un3.ip6_un3_ecn)) {
2459 chain->stat.ip_ecn++;
2460 return RSC_BYPASS;
2461 }
2462
2463 return RSC_CANDIDATE;
2464}
2465
2466static size_t virtio_net_rsc_receive6(void *opq, NetClientState *nc,
2467 const uint8_t *buf, size_t size)
2468{
2469 int32_t ret;
2470 uint16_t hdr_len;
2471 VirtioNetRscChain *chain;
2472 VirtioNetRscUnit unit;
2473
3d558330 2474 chain = opq;
2974e916
YB
2475 hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
2476
2477 if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip6_header)
2478 + sizeof(tcp_header))) {
2479 return virtio_net_do_receive(nc, buf, size);
2480 }
2481
2482 virtio_net_rsc_extract_unit6(chain, buf, &unit);
2483 if (RSC_CANDIDATE != virtio_net_rsc_sanity_check6(chain,
2484 unit.ip, buf, size)) {
2485 return virtio_net_do_receive(nc, buf, size);
2486 }
2487
2488 ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp);
2489 if (ret == RSC_BYPASS) {
2490 return virtio_net_do_receive(nc, buf, size);
2491 } else if (ret == RSC_FINAL) {
2492 return virtio_net_rsc_drain_flow(chain, nc, buf, size,
2493 ((hdr_len + sizeof(struct eth_header)) + 8),
2494 VIRTIO_NET_IP6_ADDR_SIZE,
2495 hdr_len + sizeof(struct eth_header)
2496 + sizeof(struct ip6_header));
2497 }
2498
2499 return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit);
2500}
2501
2502static VirtioNetRscChain *virtio_net_rsc_lookup_chain(VirtIONet *n,
2503 NetClientState *nc,
2504 uint16_t proto)
2505{
2506 VirtioNetRscChain *chain;
2507
2508 if ((proto != (uint16_t)ETH_P_IP) && (proto != (uint16_t)ETH_P_IPV6)) {
2509 return NULL;
2510 }
2511
2512 QTAILQ_FOREACH(chain, &n->rsc_chains, next) {
2513 if (chain->proto == proto) {
2514 return chain;
2515 }
2516 }
2517
2518 chain = g_malloc(sizeof(*chain));
2519 chain->n = n;
2520 chain->proto = proto;
2521 if (proto == (uint16_t)ETH_P_IP) {
2522 chain->max_payload = VIRTIO_NET_MAX_IP4_PAYLOAD;
2523 chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2524 } else {
2525 chain->max_payload = VIRTIO_NET_MAX_IP6_PAYLOAD;
2526 chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2527 }
2528 chain->drain_timer = timer_new_ns(QEMU_CLOCK_HOST,
2529 virtio_net_rsc_purge, chain);
2530 memset(&chain->stat, 0, sizeof(chain->stat));
2531
2532 QTAILQ_INIT(&chain->buffers);
2533 QTAILQ_INSERT_TAIL(&n->rsc_chains, chain, next);
2534
2535 return chain;
2536}
2537
2538static ssize_t virtio_net_rsc_receive(NetClientState *nc,
2539 const uint8_t *buf,
2540 size_t size)
2541{
2542 uint16_t proto;
2543 VirtioNetRscChain *chain;
2544 struct eth_header *eth;
2545 VirtIONet *n;
2546
2547 n = qemu_get_nic_opaque(nc);
2548 if (size < (n->host_hdr_len + sizeof(struct eth_header))) {
2549 return virtio_net_do_receive(nc, buf, size);
2550 }
2551
2552 eth = (struct eth_header *)(buf + n->guest_hdr_len);
2553 proto = htons(eth->h_proto);
2554
2555 chain = virtio_net_rsc_lookup_chain(n, nc, proto);
2556 if (chain) {
2557 chain->stat.received++;
2558 if (proto == (uint16_t)ETH_P_IP && n->rsc4_enabled) {
2559 return virtio_net_rsc_receive4(chain, nc, buf, size);
2560 } else if (proto == (uint16_t)ETH_P_IPV6 && n->rsc6_enabled) {
2561 return virtio_net_rsc_receive6(chain, nc, buf, size);
2562 }
2563 }
2564 return virtio_net_do_receive(nc, buf, size);
2565}
2566
2567static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf,
2568 size_t size)
2569{
2570 VirtIONet *n = qemu_get_nic_opaque(nc);
2571 if ((n->rsc4_enabled || n->rsc6_enabled)) {
2572 return virtio_net_rsc_receive(nc, buf, size);
2573 } else {
2574 return virtio_net_do_receive(nc, buf, size);
2575 }
2576}
2577
0c87e93e 2578static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
6243375f 2579
4e68f7a0 2580static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
6243375f 2581{
cc1f0f45 2582 VirtIONet *n = qemu_get_nic_opaque(nc);
fed699f9 2583 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
17a0ca55 2584 VirtIODevice *vdev = VIRTIO_DEVICE(n);
df8d0708 2585 int ret;
6243375f 2586
51b19ebe 2587 virtqueue_push(q->tx_vq, q->async_tx.elem, 0);
17a0ca55 2588 virtio_notify(vdev, q->tx_vq);
6243375f 2589
51b19ebe
PB
2590 g_free(q->async_tx.elem);
2591 q->async_tx.elem = NULL;
6243375f 2592
0c87e93e 2593 virtio_queue_set_notification(q->tx_vq, 1);
df8d0708 2594 ret = virtio_net_flush_tx(q);
7550a822 2595 if (ret >= n->tx_burst) {
df8d0708
LV
2596 /*
2597 * the flush has been stopped by tx_burst
2598 * we will not receive notification for the
2599 * remainining part, so re-schedule
2600 */
2601 virtio_queue_set_notification(q->tx_vq, 0);
7550a822
LV
2602 if (q->tx_bh) {
2603 qemu_bh_schedule(q->tx_bh);
2604 } else {
2605 timer_mod(q->tx_timer,
2606 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2607 }
df8d0708
LV
2608 q->tx_waiting = 1;
2609 }
6243375f
MM
2610}
2611
fbe78f4f 2612/* TX */
0c87e93e 2613static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
fbe78f4f 2614{
0c87e93e 2615 VirtIONet *n = q->n;
17a0ca55 2616 VirtIODevice *vdev = VIRTIO_DEVICE(n);
51b19ebe 2617 VirtQueueElement *elem;
e3f30488 2618 int32_t num_packets = 0;
fed699f9 2619 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
17a0ca55 2620 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
e3f30488
AW
2621 return num_packets;
2622 }
fbe78f4f 2623
51b19ebe 2624 if (q->async_tx.elem) {
0c87e93e 2625 virtio_queue_set_notification(q->tx_vq, 0);
e3f30488 2626 return num_packets;
6243375f
MM
2627 }
2628
51b19ebe 2629 for (;;) {
bd89dd98 2630 ssize_t ret;
51b19ebe
PB
2631 unsigned int out_num;
2632 struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg;
feb93f36 2633 struct virtio_net_hdr_mrg_rxbuf mhdr;
fbe78f4f 2634
51b19ebe
PB
2635 elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
2636 if (!elem) {
2637 break;
2638 }
2639
2640 out_num = elem->out_num;
2641 out_sg = elem->out_sg;
7b80d08e 2642 if (out_num < 1) {
fa5e56c2
GK
2643 virtio_error(vdev, "virtio-net header not in first element");
2644 virtqueue_detach_element(q->tx_vq, elem, 0);
2645 g_free(elem);
2646 return -EINVAL;
fbe78f4f
AL
2647 }
2648
032a74a1 2649 if (n->has_vnet_hdr) {
feb93f36
JW
2650 if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
2651 n->guest_hdr_len) {
fa5e56c2
GK
2652 virtio_error(vdev, "virtio-net header incorrect");
2653 virtqueue_detach_element(q->tx_vq, elem, 0);
2654 g_free(elem);
2655 return -EINVAL;
032a74a1 2656 }
1bfa316c 2657 if (n->needs_vnet_hdr_swap) {
feb93f36
JW
2658 virtio_net_hdr_swap(vdev, (void *) &mhdr);
2659 sg2[0].iov_base = &mhdr;
2660 sg2[0].iov_len = n->guest_hdr_len;
2661 out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1,
2662 out_sg, out_num,
2663 n->guest_hdr_len, -1);
2664 if (out_num == VIRTQUEUE_MAX_SIZE) {
2665 goto drop;
7d37435b 2666 }
feb93f36
JW
2667 out_num += 1;
2668 out_sg = sg2;
7d37435b 2669 }
032a74a1 2670 }
14761f9c
MT
2671 /*
2672 * If host wants to see the guest header as is, we can
2673 * pass it on unchanged. Otherwise, copy just the parts
2674 * that host is interested in.
2675 */
2676 assert(n->host_hdr_len <= n->guest_hdr_len);
2677 if (n->host_hdr_len != n->guest_hdr_len) {
2678 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
2679 out_sg, out_num,
2680 0, n->host_hdr_len);
2681 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
2682 out_sg, out_num,
2683 n->guest_hdr_len, -1);
2684 out_num = sg_num;
2685 out_sg = sg;
fbe78f4f
AL
2686 }
2687
fed699f9
JW
2688 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
2689 out_sg, out_num, virtio_net_tx_complete);
6243375f 2690 if (ret == 0) {
0c87e93e
JW
2691 virtio_queue_set_notification(q->tx_vq, 0);
2692 q->async_tx.elem = elem;
e3f30488 2693 return -EBUSY;
6243375f
MM
2694 }
2695
feb93f36 2696drop:
51b19ebe 2697 virtqueue_push(q->tx_vq, elem, 0);
17a0ca55 2698 virtio_notify(vdev, q->tx_vq);
51b19ebe 2699 g_free(elem);
e3f30488
AW
2700
2701 if (++num_packets >= n->tx_burst) {
2702 break;
2703 }
fbe78f4f 2704 }
e3f30488 2705 return num_packets;
fbe78f4f
AL
2706}
2707
7550a822
LV
2708static void virtio_net_tx_timer(void *opaque);
2709
a697a334 2710static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
fbe78f4f 2711{
17a0ca55 2712 VirtIONet *n = VIRTIO_NET(vdev);
fed699f9 2713 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
fbe78f4f 2714
283e2c2a
YB
2715 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
2716 virtio_net_drop_tx_queue_data(vdev, vq);
2717 return;
2718 }
2719
783e7706 2720 /* This happens when device was stopped but VCPU wasn't. */
17a0ca55 2721 if (!vdev->vm_running) {
0c87e93e 2722 q->tx_waiting = 1;
783e7706
MT
2723 return;
2724 }
2725
0c87e93e 2726 if (q->tx_waiting) {
7550a822 2727 /* We already have queued packets, immediately flush */
bc72ad67 2728 timer_del(q->tx_timer);
7550a822 2729 virtio_net_tx_timer(q);
fbe78f4f 2730 } else {
7550a822 2731 /* re-arm timer to flush it (and more) on next tick */
bc72ad67 2732 timer_mod(q->tx_timer,
7550a822 2733 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
0c87e93e 2734 q->tx_waiting = 1;
fbe78f4f
AL
2735 virtio_queue_set_notification(vq, 0);
2736 }
2737}
2738
a697a334
AW
2739static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
2740{
17a0ca55 2741 VirtIONet *n = VIRTIO_NET(vdev);
fed699f9 2742 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
a697a334 2743
283e2c2a
YB
2744 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
2745 virtio_net_drop_tx_queue_data(vdev, vq);
2746 return;
2747 }
2748
0c87e93e 2749 if (unlikely(q->tx_waiting)) {
a697a334
AW
2750 return;
2751 }
0c87e93e 2752 q->tx_waiting = 1;
783e7706 2753 /* This happens when device was stopped but VCPU wasn't. */
17a0ca55 2754 if (!vdev->vm_running) {
783e7706
MT
2755 return;
2756 }
a697a334 2757 virtio_queue_set_notification(vq, 0);
0c87e93e 2758 qemu_bh_schedule(q->tx_bh);
a697a334
AW
2759}
2760
fbe78f4f
AL
2761static void virtio_net_tx_timer(void *opaque)
2762{
0c87e93e
JW
2763 VirtIONetQueue *q = opaque;
2764 VirtIONet *n = q->n;
17a0ca55 2765 VirtIODevice *vdev = VIRTIO_DEVICE(n);
7550a822
LV
2766 int ret;
2767
e8bcf842
MT
2768 /* This happens when device was stopped but BH wasn't. */
2769 if (!vdev->vm_running) {
2770 /* Make sure tx waiting is set, so we'll run when restarted. */
2771 assert(q->tx_waiting);
2772 return;
2773 }
fbe78f4f 2774
0c87e93e 2775 q->tx_waiting = 0;
fbe78f4f
AL
2776
2777 /* Just in case the driver is not ready on more */
17a0ca55 2778 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
fbe78f4f 2779 return;
17a0ca55 2780 }
fbe78f4f 2781
7550a822
LV
2782 ret = virtio_net_flush_tx(q);
2783 if (ret == -EBUSY || ret == -EINVAL) {
2784 return;
2785 }
2786 /*
2787 * If we flush a full burst of packets, assume there are
2788 * more coming and immediately rearm
2789 */
2790 if (ret >= n->tx_burst) {
2791 q->tx_waiting = 1;
2792 timer_mod(q->tx_timer,
2793 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2794 return;
2795 }
2796 /*
2797 * If less than a full burst, re-enable notification and flush
2798 * anything that may have come in while we weren't looking. If
2799 * we find something, assume the guest is still active and rearm
2800 */
0c87e93e 2801 virtio_queue_set_notification(q->tx_vq, 1);
7550a822
LV
2802 ret = virtio_net_flush_tx(q);
2803 if (ret > 0) {
2804 virtio_queue_set_notification(q->tx_vq, 0);
2805 q->tx_waiting = 1;
2806 timer_mod(q->tx_timer,
2807 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2808 }
fbe78f4f
AL
2809}
2810
a697a334
AW
2811static void virtio_net_tx_bh(void *opaque)
2812{
0c87e93e
JW
2813 VirtIONetQueue *q = opaque;
2814 VirtIONet *n = q->n;
17a0ca55 2815 VirtIODevice *vdev = VIRTIO_DEVICE(n);
a697a334
AW
2816 int32_t ret;
2817
e8bcf842
MT
2818 /* This happens when device was stopped but BH wasn't. */
2819 if (!vdev->vm_running) {
2820 /* Make sure tx waiting is set, so we'll run when restarted. */
2821 assert(q->tx_waiting);
2822 return;
2823 }
783e7706 2824
0c87e93e 2825 q->tx_waiting = 0;
a697a334
AW
2826
2827 /* Just in case the driver is not ready on more */
17a0ca55 2828 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
a697a334 2829 return;
17a0ca55 2830 }
a697a334 2831
0c87e93e 2832 ret = virtio_net_flush_tx(q);
fa5e56c2
GK
2833 if (ret == -EBUSY || ret == -EINVAL) {
2834 return; /* Notification re-enable handled by tx_complete or device
2835 * broken */
a697a334
AW
2836 }
2837
2838 /* If we flush a full burst of packets, assume there are
2839 * more coming and immediately reschedule */
2840 if (ret >= n->tx_burst) {
0c87e93e
JW
2841 qemu_bh_schedule(q->tx_bh);
2842 q->tx_waiting = 1;
a697a334
AW
2843 return;
2844 }
2845
2846 /* If less than a full burst, re-enable notification and flush
2847 * anything that may have come in while we weren't looking. If
2848 * we find something, assume the guest is still active and reschedule */
0c87e93e 2849 virtio_queue_set_notification(q->tx_vq, 1);
fa5e56c2
GK
2850 ret = virtio_net_flush_tx(q);
2851 if (ret == -EINVAL) {
2852 return;
2853 } else if (ret > 0) {
0c87e93e
JW
2854 virtio_queue_set_notification(q->tx_vq, 0);
2855 qemu_bh_schedule(q->tx_bh);
2856 q->tx_waiting = 1;
a697a334
AW
2857 }
2858}
2859
f9d6dbf0
WC
2860static void virtio_net_add_queue(VirtIONet *n, int index)
2861{
2862 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2863
1c0fbfa3
MT
2864 n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
2865 virtio_net_handle_rx);
9b02e161 2866
f9d6dbf0
WC
2867 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
2868 n->vqs[index].tx_vq =
9b02e161
WW
2869 virtio_add_queue(vdev, n->net_conf.tx_queue_size,
2870 virtio_net_handle_tx_timer);
f9d6dbf0
WC
2871 n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2872 virtio_net_tx_timer,
2873 &n->vqs[index]);
2874 } else {
2875 n->vqs[index].tx_vq =
9b02e161
WW
2876 virtio_add_queue(vdev, n->net_conf.tx_queue_size,
2877 virtio_net_handle_tx_bh);
f9d6dbf0
WC
2878 n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
2879 }
2880
2881 n->vqs[index].tx_waiting = 0;
2882 n->vqs[index].n = n;
2883}
2884
2885static void virtio_net_del_queue(VirtIONet *n, int index)
2886{
2887 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2888 VirtIONetQueue *q = &n->vqs[index];
2889 NetClientState *nc = qemu_get_subqueue(n->nic, index);
2890
2891 qemu_purge_queued_packets(nc);
2892
2893 virtio_del_queue(vdev, index * 2);
2894 if (q->tx_timer) {
f9d6dbf0 2895 timer_free(q->tx_timer);
f989c30c 2896 q->tx_timer = NULL;
f9d6dbf0
WC
2897 } else {
2898 qemu_bh_delete(q->tx_bh);
f989c30c 2899 q->tx_bh = NULL;
f9d6dbf0 2900 }
f989c30c 2901 q->tx_waiting = 0;
f9d6dbf0
WC
2902 virtio_del_queue(vdev, index * 2 + 1);
2903}
2904
441537f1 2905static void virtio_net_change_num_queue_pairs(VirtIONet *n, int new_max_queue_pairs)
f9d6dbf0
WC
2906{
2907 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2908 int old_num_queues = virtio_get_num_queues(vdev);
441537f1 2909 int new_num_queues = new_max_queue_pairs * 2 + 1;
f9d6dbf0
WC
2910 int i;
2911
2912 assert(old_num_queues >= 3);
2913 assert(old_num_queues % 2 == 1);
2914
2915 if (old_num_queues == new_num_queues) {
2916 return;
2917 }
2918
2919 /*
2920 * We always need to remove and add ctrl vq if
2921 * old_num_queues != new_num_queues. Remove ctrl_vq first,
20f86a75 2922 * and then we only enter one of the following two loops.
f9d6dbf0
WC
2923 */
2924 virtio_del_queue(vdev, old_num_queues - 1);
2925
2926 for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) {
2927 /* new_num_queues < old_num_queues */
2928 virtio_net_del_queue(n, i / 2);
2929 }
2930
2931 for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
2932 /* new_num_queues > old_num_queues */
2933 virtio_net_add_queue(n, i / 2);
2934 }
2935
2936 /* add ctrl_vq last */
2937 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
2938}
2939
ec57db16 2940static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
fed699f9 2941{
441537f1 2942 int max = multiqueue ? n->max_queue_pairs : 1;
f9d6dbf0 2943
fed699f9 2944 n->multiqueue = multiqueue;
441537f1 2945 virtio_net_change_num_queue_pairs(n, max);
fed699f9 2946
441537f1 2947 virtio_net_set_queue_pairs(n);
fed699f9
JW
2948}
2949
982b78c5 2950static int virtio_net_post_load_device(void *opaque, int version_id)
037dab2f 2951{
982b78c5
DDAG
2952 VirtIONet *n = opaque;
2953 VirtIODevice *vdev = VIRTIO_DEVICE(n);
037dab2f 2954 int i, link_down;
fbe78f4f 2955
9d8c6a25 2956 trace_virtio_net_post_load_device();
982b78c5 2957 virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs,
95129d6f 2958 virtio_vdev_has_feature(vdev,
e22f0603
YB
2959 VIRTIO_F_VERSION_1),
2960 virtio_vdev_has_feature(vdev,
2961 VIRTIO_NET_F_HASH_REPORT));
fbe78f4f 2962
76010cb3 2963 /* MAC_TABLE_ENTRIES may be different from the saved image */
982b78c5 2964 if (n->mac_table.in_use > MAC_TABLE_ENTRIES) {
76010cb3 2965 n->mac_table.in_use = 0;
b6503ed9 2966 }
0ce0e8f4 2967
982b78c5 2968 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
6c666823
MT
2969 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
2970 }
2971
7788c3f2
MS
2972 /*
2973 * curr_guest_offloads will be later overwritten by the
2974 * virtio_set_features_nocheck call done from the virtio_load.
2975 * Here we make sure it is preserved and restored accordingly
2976 * in the virtio_net_post_load_virtio callback.
2977 */
2978 n->saved_guest_offloads = n->curr_guest_offloads;
6c666823 2979
441537f1 2980 virtio_net_set_queue_pairs(n);
5f800801 2981
2d9aba39
AW
2982 /* Find the first multicast entry in the saved MAC filter */
2983 for (i = 0; i < n->mac_table.in_use; i++) {
2984 if (n->mac_table.macs[i * ETH_ALEN] & 1) {
2985 break;
2986 }
2987 }
2988 n->mac_table.first_multi = i;
98991481
AK
2989
2990 /* nc.link_down can't be migrated, so infer link_down according
2991 * to link status bit in n->status */
5f800801 2992 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
441537f1 2993 for (i = 0; i < n->max_queue_pairs; i++) {
5f800801
JW
2994 qemu_get_subqueue(n->nic, i)->link_down = link_down;
2995 }
98991481 2996
6c666823
MT
2997 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
2998 virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
9d8c6a25
DDAG
2999 qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(),
3000 QEMU_CLOCK_VIRTUAL,
3001 virtio_net_announce_timer, n);
3002 if (n->announce_timer.round) {
3003 timer_mod(n->announce_timer.tm,
3004 qemu_clock_get_ms(n->announce_timer.type));
3005 } else {
944458b6 3006 qemu_announce_timer_del(&n->announce_timer, false);
9d8c6a25 3007 }
6c666823
MT
3008 }
3009
e41b7114 3010 if (n->rss_data.enabled) {
0145c393
AM
3011 n->rss_data.enabled_software_rss = n->rss_data.populate_hash;
3012 if (!n->rss_data.populate_hash) {
3013 if (!virtio_net_attach_epbf_rss(n)) {
3014 if (get_vhost_net(qemu_get_queue(n->nic)->peer)) {
3015 warn_report("Can't post-load eBPF RSS for vhost");
3016 } else {
3017 warn_report("Can't post-load eBPF RSS - "
3018 "fallback to software RSS");
3019 n->rss_data.enabled_software_rss = true;
3020 }
3021 }
3022 }
3023
e41b7114
YB
3024 trace_virtio_net_rss_enable(n->rss_data.hash_types,
3025 n->rss_data.indirections_len,
3026 sizeof(n->rss_data.key));
3027 } else {
3028 trace_virtio_net_rss_disable();
3029 }
fbe78f4f
AL
3030 return 0;
3031}
3032
7788c3f2
MS
3033static int virtio_net_post_load_virtio(VirtIODevice *vdev)
3034{
3035 VirtIONet *n = VIRTIO_NET(vdev);
3036 /*
3037 * The actual needed state is now in saved_guest_offloads,
3038 * see virtio_net_post_load_device for detail.
3039 * Restore it back and apply the desired offloads.
3040 */
3041 n->curr_guest_offloads = n->saved_guest_offloads;
3042 if (peer_has_vnet_hdr(n)) {
3043 virtio_net_apply_guest_offloads(n);
3044 }
3045
3046 return 0;
3047}
3048
982b78c5
DDAG
3049/* tx_waiting field of a VirtIONetQueue */
3050static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = {
3051 .name = "virtio-net-queue-tx_waiting",
3052 .fields = (VMStateField[]) {
3053 VMSTATE_UINT32(tx_waiting, VirtIONetQueue),
3054 VMSTATE_END_OF_LIST()
3055 },
3056};
3057
441537f1 3058static bool max_queue_pairs_gt_1(void *opaque, int version_id)
982b78c5 3059{
441537f1 3060 return VIRTIO_NET(opaque)->max_queue_pairs > 1;
982b78c5
DDAG
3061}
3062
3063static bool has_ctrl_guest_offloads(void *opaque, int version_id)
3064{
3065 return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque),
3066 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
3067}
3068
3069static bool mac_table_fits(void *opaque, int version_id)
3070{
3071 return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES;
3072}
3073
3074static bool mac_table_doesnt_fit(void *opaque, int version_id)
3075{
3076 return !mac_table_fits(opaque, version_id);
3077}
3078
3079/* This temporary type is shared by all the WITH_TMP methods
3080 * although only some fields are used by each.
3081 */
3082struct VirtIONetMigTmp {
3083 VirtIONet *parent;
3084 VirtIONetQueue *vqs_1;
441537f1 3085 uint16_t curr_queue_pairs_1;
982b78c5
DDAG
3086 uint8_t has_ufo;
3087 uint32_t has_vnet_hdr;
3088};
3089
3090/* The 2nd and subsequent tx_waiting flags are loaded later than
441537f1 3091 * the 1st entry in the queue_pairs and only if there's more than one
982b78c5
DDAG
3092 * entry. We use the tmp mechanism to calculate a temporary
3093 * pointer and count and also validate the count.
3094 */
3095
44b1ff31 3096static int virtio_net_tx_waiting_pre_save(void *opaque)
982b78c5
DDAG
3097{
3098 struct VirtIONetMigTmp *tmp = opaque;
3099
3100 tmp->vqs_1 = tmp->parent->vqs + 1;
441537f1
JW
3101 tmp->curr_queue_pairs_1 = tmp->parent->curr_queue_pairs - 1;
3102 if (tmp->parent->curr_queue_pairs == 0) {
3103 tmp->curr_queue_pairs_1 = 0;
982b78c5 3104 }
44b1ff31
DDAG
3105
3106 return 0;
982b78c5
DDAG
3107}
3108
3109static int virtio_net_tx_waiting_pre_load(void *opaque)
3110{
3111 struct VirtIONetMigTmp *tmp = opaque;
3112
3113 /* Reuse the pointer setup from save */
3114 virtio_net_tx_waiting_pre_save(opaque);
3115
441537f1
JW
3116 if (tmp->parent->curr_queue_pairs > tmp->parent->max_queue_pairs) {
3117 error_report("virtio-net: curr_queue_pairs %x > max_queue_pairs %x",
3118 tmp->parent->curr_queue_pairs, tmp->parent->max_queue_pairs);
982b78c5
DDAG
3119
3120 return -EINVAL;
3121 }
3122
3123 return 0; /* all good */
3124}
3125
3126static const VMStateDescription vmstate_virtio_net_tx_waiting = {
3127 .name = "virtio-net-tx_waiting",
3128 .pre_load = virtio_net_tx_waiting_pre_load,
3129 .pre_save = virtio_net_tx_waiting_pre_save,
3130 .fields = (VMStateField[]) {
3131 VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp,
441537f1 3132 curr_queue_pairs_1,
982b78c5
DDAG
3133 vmstate_virtio_net_queue_tx_waiting,
3134 struct VirtIONetQueue),
3135 VMSTATE_END_OF_LIST()
3136 },
3137};
3138
3139/* the 'has_ufo' flag is just tested; if the incoming stream has the
3140 * flag set we need to check that we have it
3141 */
3142static int virtio_net_ufo_post_load(void *opaque, int version_id)
3143{
3144 struct VirtIONetMigTmp *tmp = opaque;
3145
3146 if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) {
3147 error_report("virtio-net: saved image requires TUN_F_UFO support");
3148 return -EINVAL;
3149 }
3150
3151 return 0;
3152}
3153
44b1ff31 3154static int virtio_net_ufo_pre_save(void *opaque)
982b78c5
DDAG
3155{
3156 struct VirtIONetMigTmp *tmp = opaque;
3157
3158 tmp->has_ufo = tmp->parent->has_ufo;
44b1ff31
DDAG
3159
3160 return 0;
982b78c5
DDAG
3161}
3162
3163static const VMStateDescription vmstate_virtio_net_has_ufo = {
3164 .name = "virtio-net-ufo",
3165 .post_load = virtio_net_ufo_post_load,
3166 .pre_save = virtio_net_ufo_pre_save,
3167 .fields = (VMStateField[]) {
3168 VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp),
3169 VMSTATE_END_OF_LIST()
3170 },
3171};
3172
3173/* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the
3174 * flag set we need to check that we have it
3175 */
3176static int virtio_net_vnet_post_load(void *opaque, int version_id)
3177{
3178 struct VirtIONetMigTmp *tmp = opaque;
3179
3180 if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) {
3181 error_report("virtio-net: saved image requires vnet_hdr=on");
3182 return -EINVAL;
3183 }
3184
3185 return 0;
3186}
3187
44b1ff31 3188static int virtio_net_vnet_pre_save(void *opaque)
982b78c5
DDAG
3189{
3190 struct VirtIONetMigTmp *tmp = opaque;
3191
3192 tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr;
44b1ff31
DDAG
3193
3194 return 0;
982b78c5
DDAG
3195}
3196
3197static const VMStateDescription vmstate_virtio_net_has_vnet = {
3198 .name = "virtio-net-vnet",
3199 .post_load = virtio_net_vnet_post_load,
3200 .pre_save = virtio_net_vnet_pre_save,
3201 .fields = (VMStateField[]) {
3202 VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp),
3203 VMSTATE_END_OF_LIST()
3204 },
3205};
3206
e41b7114
YB
3207static bool virtio_net_rss_needed(void *opaque)
3208{
3209 return VIRTIO_NET(opaque)->rss_data.enabled;
3210}
3211
3212static const VMStateDescription vmstate_virtio_net_rss = {
3213 .name = "virtio-net-device/rss",
3214 .version_id = 1,
3215 .minimum_version_id = 1,
3216 .needed = virtio_net_rss_needed,
3217 .fields = (VMStateField[]) {
3218 VMSTATE_BOOL(rss_data.enabled, VirtIONet),
3219 VMSTATE_BOOL(rss_data.redirect, VirtIONet),
3220 VMSTATE_BOOL(rss_data.populate_hash, VirtIONet),
3221 VMSTATE_UINT32(rss_data.hash_types, VirtIONet),
3222 VMSTATE_UINT16(rss_data.indirections_len, VirtIONet),
3223 VMSTATE_UINT16(rss_data.default_queue, VirtIONet),
3224 VMSTATE_UINT8_ARRAY(rss_data.key, VirtIONet,
3225 VIRTIO_NET_RSS_MAX_KEY_SIZE),
3226 VMSTATE_VARRAY_UINT16_ALLOC(rss_data.indirections_table, VirtIONet,
3227 rss_data.indirections_len, 0,
3228 vmstate_info_uint16, uint16_t),
3229 VMSTATE_END_OF_LIST()
3230 },
3231};
3232
982b78c5
DDAG
3233static const VMStateDescription vmstate_virtio_net_device = {
3234 .name = "virtio-net-device",
3235 .version_id = VIRTIO_NET_VM_VERSION,
3236 .minimum_version_id = VIRTIO_NET_VM_VERSION,
3237 .post_load = virtio_net_post_load_device,
3238 .fields = (VMStateField[]) {
3239 VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN),
3240 VMSTATE_STRUCT_POINTER(vqs, VirtIONet,
3241 vmstate_virtio_net_queue_tx_waiting,
3242 VirtIONetQueue),
3243 VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet),
3244 VMSTATE_UINT16(status, VirtIONet),
3245 VMSTATE_UINT8(promisc, VirtIONet),
3246 VMSTATE_UINT8(allmulti, VirtIONet),
3247 VMSTATE_UINT32(mac_table.in_use, VirtIONet),
3248
3249 /* Guarded pair: If it fits we load it, else we throw it away
3250 * - can happen if source has a larger MAC table.; post-load
3251 * sets flags in this case.
3252 */
3253 VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet,
3254 0, mac_table_fits, mac_table.in_use,
3255 ETH_ALEN),
3256 VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0,
3257 mac_table.in_use, ETH_ALEN),
3258
3259 /* Note: This is an array of uint32's that's always been saved as a
3260 * buffer; hold onto your endiannesses; it's actually used as a bitmap
3261 * but based on the uint.
3262 */
3263 VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3),
3264 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
3265 vmstate_virtio_net_has_vnet),
3266 VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet),
3267 VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet),
3268 VMSTATE_UINT8(alluni, VirtIONet),
3269 VMSTATE_UINT8(nomulti, VirtIONet),
3270 VMSTATE_UINT8(nouni, VirtIONet),
3271 VMSTATE_UINT8(nobcast, VirtIONet),
3272 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
3273 vmstate_virtio_net_has_ufo),
441537f1 3274 VMSTATE_SINGLE_TEST(max_queue_pairs, VirtIONet, max_queue_pairs_gt_1, 0,
982b78c5 3275 vmstate_info_uint16_equal, uint16_t),
441537f1 3276 VMSTATE_UINT16_TEST(curr_queue_pairs, VirtIONet, max_queue_pairs_gt_1),
982b78c5
DDAG
3277 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
3278 vmstate_virtio_net_tx_waiting),
3279 VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet,
3280 has_ctrl_guest_offloads),
3281 VMSTATE_END_OF_LIST()
3282 },
e41b7114
YB
3283 .subsections = (const VMStateDescription * []) {
3284 &vmstate_virtio_net_rss,
3285 NULL
3286 }
982b78c5
DDAG
3287};
3288
eb6b6c12 3289static NetClientInfo net_virtio_info = {
f394b2e2 3290 .type = NET_CLIENT_DRIVER_NIC,
eb6b6c12
MM
3291 .size = sizeof(NICState),
3292 .can_receive = virtio_net_can_receive,
3293 .receive = virtio_net_receive,
eb6b6c12 3294 .link_status_changed = virtio_net_set_link_status,
b1be4280 3295 .query_rx_filter = virtio_net_query_rxfilter,
b2c929f0 3296 .announce = virtio_net_announce,
eb6b6c12
MM
3297};
3298
f56a1247
MT
3299static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
3300{
17a0ca55 3301 VirtIONet *n = VIRTIO_NET(vdev);
68b0a639 3302 NetClientState *nc;
f56a1247 3303 assert(n->vhost_started);
68b0a639
SWL
3304 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ) && idx == 2) {
3305 /* Must guard against invalid features and bogus queue index
3306 * from being set by malicious guest, or penetrated through
3307 * buggy migration stream.
3308 */
3309 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
3310 qemu_log_mask(LOG_GUEST_ERROR,
3311 "%s: bogus vq index ignored\n", __func__);
3312 return false;
3313 }
3314 nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
3315 } else {
3316 nc = qemu_get_subqueue(n->nic, vq2q(idx));
3317 }
ed8b4afe 3318 return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
f56a1247
MT
3319}
3320
3321static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
3322 bool mask)
3323{
17a0ca55 3324 VirtIONet *n = VIRTIO_NET(vdev);
68b0a639 3325 NetClientState *nc;
f56a1247 3326 assert(n->vhost_started);
68b0a639
SWL
3327 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ) && idx == 2) {
3328 /* Must guard against invalid features and bogus queue index
3329 * from being set by malicious guest, or penetrated through
3330 * buggy migration stream.
3331 */
3332 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
3333 qemu_log_mask(LOG_GUEST_ERROR,
3334 "%s: bogus vq index ignored\n", __func__);
3335 return;
3336 }
3337 nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
3338 } else {
3339 nc = qemu_get_subqueue(n->nic, vq2q(idx));
3340 }
a882b571
MT
3341 vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
3342 vdev, idx, mask);
f56a1247
MT
3343}
3344
019a3edb 3345static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
fbe78f4f 3346{
0cd09c3a 3347 virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
a93e599d 3348
d74c30c8 3349 n->config_size = virtio_get_config_size(&cfg_size_params, host_features);
17ec5a86
FK
3350}
3351
8a253ec2
FK
3352void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
3353 const char *type)
3354{
3355 /*
3356 * The name can be NULL, the netclient name will be type.x.
3357 */
3358 assert(type != NULL);
3359
9e288406 3360 g_free(n->netclient_name);
9e288406 3361 g_free(n->netclient_type);
80e0090a 3362 n->netclient_name = g_strdup(name);
8a253ec2
FK
3363 n->netclient_type = g_strdup(type);
3364}
3365
0e9a65c5 3366static bool failover_unplug_primary(VirtIONet *n, DeviceState *dev)
9711cd0d
JF
3367{
3368 HotplugHandler *hotplug_ctrl;
3369 PCIDevice *pci_dev;
3370 Error *err = NULL;
3371
0e9a65c5 3372 hotplug_ctrl = qdev_get_hotplug_handler(dev);
9711cd0d 3373 if (hotplug_ctrl) {
0e9a65c5 3374 pci_dev = PCI_DEVICE(dev);
9711cd0d 3375 pci_dev->partially_hotplugged = true;
0e9a65c5 3376 hotplug_handler_unplug_request(hotplug_ctrl, dev, &err);
9711cd0d
JF
3377 if (err) {
3378 error_report_err(err);
3379 return false;
3380 }
3381 } else {
3382 return false;
3383 }
3384 return true;
3385}
3386
0e9a65c5
JQ
3387static bool failover_replug_primary(VirtIONet *n, DeviceState *dev,
3388 Error **errp)
9711cd0d 3389{
5a0948d3 3390 Error *err = NULL;
9711cd0d 3391 HotplugHandler *hotplug_ctrl;
0e9a65c5 3392 PCIDevice *pdev = PCI_DEVICE(dev);
78274682 3393 BusState *primary_bus;
9711cd0d
JF
3394
3395 if (!pdev->partially_hotplugged) {
3396 return true;
3397 }
0e9a65c5 3398 primary_bus = dev->parent_bus;
78274682 3399 if (!primary_bus) {
150ab54a 3400 error_setg(errp, "virtio_net: couldn't find primary bus");
5a0948d3 3401 return false;
9711cd0d 3402 }
0e9a65c5 3403 qdev_set_parent_bus(dev, primary_bus, &error_abort);
e2bde83e 3404 qatomic_set(&n->failover_primary_hidden, false);
0e9a65c5 3405 hotplug_ctrl = qdev_get_hotplug_handler(dev);
150ab54a 3406 if (hotplug_ctrl) {
0e9a65c5 3407 hotplug_handler_pre_plug(hotplug_ctrl, dev, &err);
5a0948d3
MA
3408 if (err) {
3409 goto out;
3410 }
0e9a65c5 3411 hotplug_handler_plug(hotplug_ctrl, dev, &err);
150ab54a 3412 }
109c20ea 3413 pdev->partially_hotplugged = false;
150ab54a
JF
3414
3415out:
5a0948d3
MA
3416 error_propagate(errp, err);
3417 return !err;
9711cd0d
JF
3418}
3419
07a5d816 3420static void virtio_net_handle_migration_primary(VirtIONet *n, MigrationState *s)
9711cd0d
JF
3421{
3422 bool should_be_hidden;
3423 Error *err = NULL;
07a5d816 3424 DeviceState *dev = failover_find_primary_device(n);
9711cd0d 3425
07a5d816
JQ
3426 if (!dev) {
3427 return;
9711cd0d
JF
3428 }
3429
07a5d816
JQ
3430 should_be_hidden = qatomic_read(&n->failover_primary_hidden);
3431
4dbac1ae 3432 if (migration_in_setup(s) && !should_be_hidden) {
07a5d816
JQ
3433 if (failover_unplug_primary(n, dev)) {
3434 vmstate_unregister(VMSTATE_IF(dev), qdev_get_vmsd(dev), dev);
3435 qapi_event_send_unplug_primary(dev->id);
e2bde83e 3436 qatomic_set(&n->failover_primary_hidden, true);
9711cd0d
JF
3437 } else {
3438 warn_report("couldn't unplug primary device");
3439 }
3440 } else if (migration_has_failed(s)) {
150ab54a 3441 /* We already unplugged the device let's plug it back */
07a5d816 3442 if (!failover_replug_primary(n, dev, &err)) {
9711cd0d
JF
3443 if (err) {
3444 error_report_err(err);
3445 }
3446 }
3447 }
3448}
3449
3450static void virtio_net_migration_state_notifier(Notifier *notifier, void *data)
3451{
3452 MigrationState *s = data;
3453 VirtIONet *n = container_of(notifier, VirtIONet, migration_state);
3454 virtio_net_handle_migration_primary(n, s);
3455}
3456
b91ad981 3457static bool failover_hide_primary_device(DeviceListener *listener,
f3558b1b
KW
3458 const QDict *device_opts,
3459 bool from_json,
3460 Error **errp)
9711cd0d
JF
3461{
3462 VirtIONet *n = container_of(listener, VirtIONet, primary_listener);
4f0303ae 3463 const char *standby_id;
9711cd0d 3464
4d0e59ac 3465 if (!device_opts) {
89631fed 3466 return false;
4d0e59ac 3467 }
bcfc906b
LV
3468
3469 if (!qdict_haskey(device_opts, "failover_pair_id")) {
3470 return false;
3471 }
3472
3473 if (!qdict_haskey(device_opts, "id")) {
3474 error_setg(errp, "Device with failover_pair_id needs to have id");
3475 return false;
3476 }
3477
3478 standby_id = qdict_get_str(device_opts, "failover_pair_id");
89631fed
JQ
3479 if (g_strcmp0(standby_id, n->netclient_name) != 0) {
3480 return false;
9711cd0d
JF
3481 }
3482
7fe7791e
LV
3483 /*
3484 * The hide helper can be called several times for a given device.
3485 * Check there is only one primary for a virtio-net device but
3486 * don't duplicate the qdict several times if it's called for the same
3487 * device.
3488 */
259a10db 3489 if (n->primary_opts) {
7fe7791e
LV
3490 const char *old, *new;
3491 /* devices with failover_pair_id always have an id */
3492 old = qdict_get_str(n->primary_opts, "id");
3493 new = qdict_get_str(device_opts, "id");
3494 if (strcmp(old, new) != 0) {
3495 error_setg(errp, "Cannot attach more than one primary device to "
3496 "'%s': '%s' and '%s'", n->netclient_name, old, new);
3497 return false;
3498 }
3499 } else {
3500 n->primary_opts = qdict_clone_shallow(device_opts);
3501 n->primary_opts_from_json = from_json;
259a10db
KW
3502 }
3503
e2bde83e 3504 /* failover_primary_hidden is set during feature negotiation */
3abad4a2 3505 return qatomic_read(&n->failover_primary_hidden);
9711cd0d
JF
3506}
3507
e6f746b3 3508static void virtio_net_device_realize(DeviceState *dev, Error **errp)
17ec5a86 3509{
e6f746b3 3510 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
284a32f0 3511 VirtIONet *n = VIRTIO_NET(dev);
b1be4280 3512 NetClientState *nc;
284a32f0 3513 int i;
1773d9ee 3514
a93e599d 3515 if (n->net_conf.mtu) {
127833ee 3516 n->host_features |= (1ULL << VIRTIO_NET_F_MTU);
a93e599d
MC
3517 }
3518
9473939e
JB
3519 if (n->net_conf.duplex_str) {
3520 if (strncmp(n->net_conf.duplex_str, "half", 5) == 0) {
3521 n->net_conf.duplex = DUPLEX_HALF;
3522 } else if (strncmp(n->net_conf.duplex_str, "full", 5) == 0) {
3523 n->net_conf.duplex = DUPLEX_FULL;
3524 } else {
3525 error_setg(errp, "'duplex' must be 'half' or 'full'");
843c4cfc 3526 return;
9473939e
JB
3527 }
3528 n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX);
3529 } else {
3530 n->net_conf.duplex = DUPLEX_UNKNOWN;
3531 }
3532
3533 if (n->net_conf.speed < SPEED_UNKNOWN) {
3534 error_setg(errp, "'speed' must be between 0 and INT_MAX");
843c4cfc
MA
3535 return;
3536 }
3537 if (n->net_conf.speed >= 0) {
9473939e
JB
3538 n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX);
3539 }
3540
9711cd0d 3541 if (n->failover) {
b91ad981 3542 n->primary_listener.hide_device = failover_hide_primary_device;
e2bde83e 3543 qatomic_set(&n->failover_primary_hidden, true);
9711cd0d
JF
3544 device_listener_register(&n->primary_listener);
3545 n->migration_state.notify = virtio_net_migration_state_notifier;
3546 add_migration_state_change_notifier(&n->migration_state);
3547 n->host_features |= (1ULL << VIRTIO_NET_F_STANDBY);
3548 }
3549
da3e8a23 3550 virtio_net_set_config_size(n, n->host_features);
3857cd5c 3551 virtio_init(vdev, VIRTIO_ID_NET, n->config_size);
fbe78f4f 3552
1c0fbfa3
MT
3553 /*
3554 * We set a lower limit on RX queue size to what it always was.
3555 * Guests that want a smaller ring can always resize it without
3556 * help from us (using virtio 1 and up).
3557 */
3558 if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE ||
3559 n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE ||
5f997fd1 3560 !is_power_of_2(n->net_conf.rx_queue_size)) {
1c0fbfa3
MT
3561 error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), "
3562 "must be a power of 2 between %d and %d.",
3563 n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE,
3564 VIRTQUEUE_MAX_SIZE);
3565 virtio_cleanup(vdev);
3566 return;
3567 }
3568
9b02e161
WW
3569 if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
3570 n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
3571 !is_power_of_2(n->net_conf.tx_queue_size)) {
3572 error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
3573 "must be a power of 2 between %d and %d",
3574 n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
3575 VIRTQUEUE_MAX_SIZE);
3576 virtio_cleanup(vdev);
3577 return;
3578 }
3579
22288fe5
JW
3580 n->max_ncs = MAX(n->nic_conf.peers.queues, 1);
3581
3582 /*
3583 * Figure out the datapath queue pairs since the backend could
3584 * provide control queue via peers as well.
3585 */
3586 if (n->nic_conf.peers.queues) {
3587 for (i = 0; i < n->max_ncs; i++) {
3588 if (n->nic_conf.peers.ncs[i]->is_datapath) {
3589 ++n->max_queue_pairs;
3590 }
3591 }
3592 }
3593 n->max_queue_pairs = MAX(n->max_queue_pairs, 1);
3594
441537f1 3595 if (n->max_queue_pairs * 2 + 1 > VIRTIO_QUEUE_MAX) {
22288fe5 3596 error_setg(errp, "Invalid number of queue pairs (= %" PRIu32 "), "
631b22ea 3597 "must be a positive integer less than %d.",
441537f1 3598 n->max_queue_pairs, (VIRTIO_QUEUE_MAX - 1) / 2);
7e0e736e
JW
3599 virtio_cleanup(vdev);
3600 return;
3601 }
b21e2380 3602 n->vqs = g_new0(VirtIONetQueue, n->max_queue_pairs);
441537f1 3603 n->curr_queue_pairs = 1;
1773d9ee 3604 n->tx_timeout = n->net_conf.txtimer;
a697a334 3605
1773d9ee
FK
3606 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
3607 && strcmp(n->net_conf.tx, "bh")) {
0765691e
MA
3608 warn_report("virtio-net: "
3609 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
3610 n->net_conf.tx);
3611 error_printf("Defaulting to \"bh\"");
a697a334
AW
3612 }
3613
2eef278b
MT
3614 n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n),
3615 n->net_conf.tx_queue_size);
9b02e161 3616
441537f1 3617 for (i = 0; i < n->max_queue_pairs; i++) {
f9d6dbf0 3618 virtio_net_add_queue(n, i);
a697a334 3619 }
da51a335 3620
17a0ca55 3621 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
1773d9ee
FK
3622 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
3623 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
554c97dd 3624 n->status = VIRTIO_NET_S_LINK_UP;
9d8c6a25
DDAG
3625 qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(),
3626 QEMU_CLOCK_VIRTUAL,
3627 virtio_net_announce_timer, n);
b2c929f0 3628 n->announce_timer.round = 0;
fbe78f4f 3629
8a253ec2
FK
3630 if (n->netclient_type) {
3631 /*
3632 * Happen when virtio_net_set_netclient_name has been called.
3633 */
3634 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
3635 n->netclient_type, n->netclient_name, n);
3636 } else {
3637 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
284a32f0 3638 object_get_typename(OBJECT(dev)), dev->id, n);
8a253ec2
FK
3639 }
3640
441537f1 3641 for (i = 0; i < n->max_queue_pairs; i++) {
d4c62930
BM
3642 n->nic->ncs[i].do_not_pad = true;
3643 }
3644
6e371ab8
MT
3645 peer_test_vnet_hdr(n);
3646 if (peer_has_vnet_hdr(n)) {
441537f1 3647 for (i = 0; i < n->max_queue_pairs; i++) {
d6085e3a 3648 qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
fed699f9 3649 }
6e371ab8
MT
3650 n->host_hdr_len = sizeof(struct virtio_net_hdr);
3651 } else {
3652 n->host_hdr_len = 0;
3653 }
eb6b6c12 3654
1773d9ee 3655 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a);
96d5e201 3656
fed699f9 3657 n->vqs[0].tx_waiting = 0;
1773d9ee 3658 n->tx_burst = n->net_conf.txburst;
e22f0603 3659 virtio_net_set_mrg_rx_bufs(n, 0, 0, 0);
002437cd 3660 n->promisc = 1; /* for compatibility */
fbe78f4f 3661
7267c094 3662 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
b6503ed9 3663
7267c094 3664 n->vlans = g_malloc0(MAX_VLAN >> 3);
f21c0ed9 3665
b1be4280
AK
3666 nc = qemu_get_queue(n->nic);
3667 nc->rxfilter_notify_enabled = 1;
3668
e87936ea
CL
3669 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
3670 struct virtio_net_config netcfg = {};
3671 memcpy(&netcfg.mac, &n->nic_conf.macaddr, ETH_ALEN);
3672 vhost_net_set_config(get_vhost_net(nc->peer),
3673 (uint8_t *)&netcfg, 0, ETH_ALEN, VHOST_SET_CONFIG_TYPE_MASTER);
3674 }
2974e916 3675 QTAILQ_INIT(&n->rsc_chains);
284a32f0 3676 n->qdev = dev;
4474e37a
YB
3677
3678 net_rx_pkt_init(&n->rx_pkt, false);
0145c393
AM
3679
3680 if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
3681 virtio_net_load_ebpf(n);
3682 }
17ec5a86
FK
3683}
3684
b69c3c21 3685static void virtio_net_device_unrealize(DeviceState *dev)
17ec5a86 3686{
306ec6c3
AF
3687 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3688 VirtIONet *n = VIRTIO_NET(dev);
441537f1 3689 int i, max_queue_pairs;
17ec5a86 3690
0145c393
AM
3691 if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
3692 virtio_net_unload_ebpf(n);
3693 }
3694
17ec5a86
FK
3695 /* This will stop vhost backend if appropriate. */
3696 virtio_net_set_status(vdev, 0);
3697
9e288406
MA
3698 g_free(n->netclient_name);
3699 n->netclient_name = NULL;
3700 g_free(n->netclient_type);
3701 n->netclient_type = NULL;
8a253ec2 3702
17ec5a86
FK
3703 g_free(n->mac_table.macs);
3704 g_free(n->vlans);
3705
9711cd0d 3706 if (n->failover) {
f3558b1b 3707 qobject_unref(n->primary_opts);
65018100 3708 device_listener_unregister(&n->primary_listener);
1e157667 3709 remove_migration_state_change_notifier(&n->migration_state);
f3558b1b
KW
3710 } else {
3711 assert(n->primary_opts == NULL);
9711cd0d
JF
3712 }
3713
441537f1
JW
3714 max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
3715 for (i = 0; i < max_queue_pairs; i++) {
f9d6dbf0 3716 virtio_net_del_queue(n, i);
17ec5a86 3717 }
d945d9f1 3718 /* delete also control vq */
441537f1 3719 virtio_del_queue(vdev, max_queue_pairs * 2);
944458b6 3720 qemu_announce_timer_del(&n->announce_timer, false);
17ec5a86
FK
3721 g_free(n->vqs);
3722 qemu_del_nic(n->nic);
2974e916 3723 virtio_net_rsc_cleanup(n);
59079029 3724 g_free(n->rss_data.indirections_table);
4474e37a 3725 net_rx_pkt_uninit(n->rx_pkt);
6a1a8cc7 3726 virtio_cleanup(vdev);
17ec5a86
FK
3727}
3728
3729static void virtio_net_instance_init(Object *obj)
3730{
3731 VirtIONet *n = VIRTIO_NET(obj);
3732
3733 /*
3734 * The default config_size is sizeof(struct virtio_net_config).
3735 * Can be overriden with virtio_net_set_config_size.
3736 */
3737 n->config_size = sizeof(struct virtio_net_config);
aa4197c3
GA
3738 device_add_bootindex_property(obj, &n->nic_conf.bootindex,
3739 "bootindex", "/ethernet-phy@0",
40c2281c 3740 DEVICE(n));
0145c393
AM
3741
3742 ebpf_rss_init(&n->ebpf_rss);
17ec5a86
FK
3743}
3744
44b1ff31 3745static int virtio_net_pre_save(void *opaque)
4d45dcfb
HP
3746{
3747 VirtIONet *n = opaque;
3748
3749 /* At this point, backend must be stopped, otherwise
3750 * it might keep writing to memory. */
3751 assert(!n->vhost_started);
44b1ff31
DDAG
3752
3753 return 0;
4d45dcfb
HP
3754}
3755
9711cd0d
JF
3756static bool primary_unplug_pending(void *opaque)
3757{
3758 DeviceState *dev = opaque;
21e8709b 3759 DeviceState *primary;
9711cd0d
JF
3760 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3761 VirtIONet *n = VIRTIO_NET(vdev);
3762
284f42a5
JF
3763 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
3764 return false;
3765 }
21e8709b
JQ
3766 primary = failover_find_primary_device(n);
3767 return primary ? primary->pending_deleted_event : false;
9711cd0d
JF
3768}
3769
3770static bool dev_unplug_pending(void *opaque)
3771{
3772 DeviceState *dev = opaque;
3773 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3774
3775 return vdc->primary_unplug_pending(dev);
3776}
3777
c255488d
JP
3778static struct vhost_dev *virtio_net_get_vhost(VirtIODevice *vdev)
3779{
3780 VirtIONet *n = VIRTIO_NET(vdev);
3781 NetClientState *nc = qemu_get_queue(n->nic);
3782 struct vhost_net *net = get_vhost_net(nc->peer);
3783 return &net->dev;
3784}
3785
4d45dcfb
HP
3786static const VMStateDescription vmstate_virtio_net = {
3787 .name = "virtio-net",
3788 .minimum_version_id = VIRTIO_NET_VM_VERSION,
3789 .version_id = VIRTIO_NET_VM_VERSION,
3790 .fields = (VMStateField[]) {
3791 VMSTATE_VIRTIO_DEVICE,
3792 VMSTATE_END_OF_LIST()
3793 },
3794 .pre_save = virtio_net_pre_save,
9711cd0d 3795 .dev_unplug_pending = dev_unplug_pending,
4d45dcfb 3796};
290c2428 3797
17ec5a86 3798static Property virtio_net_properties[] = {
127833ee
JB
3799 DEFINE_PROP_BIT64("csum", VirtIONet, host_features,
3800 VIRTIO_NET_F_CSUM, true),
3801 DEFINE_PROP_BIT64("guest_csum", VirtIONet, host_features,
87108bb2 3802 VIRTIO_NET_F_GUEST_CSUM, true),
127833ee
JB
3803 DEFINE_PROP_BIT64("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true),
3804 DEFINE_PROP_BIT64("guest_tso4", VirtIONet, host_features,
87108bb2 3805 VIRTIO_NET_F_GUEST_TSO4, true),
127833ee 3806 DEFINE_PROP_BIT64("guest_tso6", VirtIONet, host_features,
87108bb2 3807 VIRTIO_NET_F_GUEST_TSO6, true),
127833ee 3808 DEFINE_PROP_BIT64("guest_ecn", VirtIONet, host_features,
87108bb2 3809 VIRTIO_NET_F_GUEST_ECN, true),
127833ee 3810 DEFINE_PROP_BIT64("guest_ufo", VirtIONet, host_features,
87108bb2 3811 VIRTIO_NET_F_GUEST_UFO, true),
127833ee 3812 DEFINE_PROP_BIT64("guest_announce", VirtIONet, host_features,
87108bb2 3813 VIRTIO_NET_F_GUEST_ANNOUNCE, true),
127833ee 3814 DEFINE_PROP_BIT64("host_tso4", VirtIONet, host_features,
87108bb2 3815 VIRTIO_NET_F_HOST_TSO4, true),
127833ee 3816 DEFINE_PROP_BIT64("host_tso6", VirtIONet, host_features,
87108bb2 3817 VIRTIO_NET_F_HOST_TSO6, true),
127833ee 3818 DEFINE_PROP_BIT64("host_ecn", VirtIONet, host_features,
87108bb2 3819 VIRTIO_NET_F_HOST_ECN, true),
127833ee 3820 DEFINE_PROP_BIT64("host_ufo", VirtIONet, host_features,
87108bb2 3821 VIRTIO_NET_F_HOST_UFO, true),
127833ee 3822 DEFINE_PROP_BIT64("mrg_rxbuf", VirtIONet, host_features,
87108bb2 3823 VIRTIO_NET_F_MRG_RXBUF, true),
127833ee 3824 DEFINE_PROP_BIT64("status", VirtIONet, host_features,
87108bb2 3825 VIRTIO_NET_F_STATUS, true),
127833ee 3826 DEFINE_PROP_BIT64("ctrl_vq", VirtIONet, host_features,
87108bb2 3827 VIRTIO_NET_F_CTRL_VQ, true),
127833ee 3828 DEFINE_PROP_BIT64("ctrl_rx", VirtIONet, host_features,
87108bb2 3829 VIRTIO_NET_F_CTRL_RX, true),
127833ee 3830 DEFINE_PROP_BIT64("ctrl_vlan", VirtIONet, host_features,
87108bb2 3831 VIRTIO_NET_F_CTRL_VLAN, true),
127833ee 3832 DEFINE_PROP_BIT64("ctrl_rx_extra", VirtIONet, host_features,
87108bb2 3833 VIRTIO_NET_F_CTRL_RX_EXTRA, true),
127833ee 3834 DEFINE_PROP_BIT64("ctrl_mac_addr", VirtIONet, host_features,
87108bb2 3835 VIRTIO_NET_F_CTRL_MAC_ADDR, true),
127833ee 3836 DEFINE_PROP_BIT64("ctrl_guest_offloads", VirtIONet, host_features,
87108bb2 3837 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true),
127833ee 3838 DEFINE_PROP_BIT64("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false),
59079029
YB
3839 DEFINE_PROP_BIT64("rss", VirtIONet, host_features,
3840 VIRTIO_NET_F_RSS, false),
e22f0603
YB
3841 DEFINE_PROP_BIT64("hash", VirtIONet, host_features,
3842 VIRTIO_NET_F_HASH_REPORT, false),
2974e916
YB
3843 DEFINE_PROP_BIT64("guest_rsc_ext", VirtIONet, host_features,
3844 VIRTIO_NET_F_RSC_EXT, false),
3845 DEFINE_PROP_UINT32("rsc_interval", VirtIONet, rsc_timeout,
3846 VIRTIO_NET_RSC_DEFAULT_INTERVAL),
17ec5a86
FK
3847 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
3848 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
87108bb2 3849 TX_TIMER_INTERVAL),
17ec5a86
FK
3850 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
3851 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
1c0fbfa3
MT
3852 DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
3853 VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
9b02e161
WW
3854 DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
3855 VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
a93e599d 3856 DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
75ebec11
MC
3857 DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend,
3858 true),
9473939e
JB
3859 DEFINE_PROP_INT32("speed", VirtIONet, net_conf.speed, SPEED_UNKNOWN),
3860 DEFINE_PROP_STRING("duplex", VirtIONet, net_conf.duplex_str),
9711cd0d 3861 DEFINE_PROP_BOOL("failover", VirtIONet, failover, false),
17ec5a86
FK
3862 DEFINE_PROP_END_OF_LIST(),
3863};
3864
3865static void virtio_net_class_init(ObjectClass *klass, void *data)
3866{
3867 DeviceClass *dc = DEVICE_CLASS(klass);
3868 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
e6f746b3 3869
4f67d30b 3870 device_class_set_props(dc, virtio_net_properties);
290c2428 3871 dc->vmsd = &vmstate_virtio_net;
125ee0ed 3872 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
e6f746b3 3873 vdc->realize = virtio_net_device_realize;
306ec6c3 3874 vdc->unrealize = virtio_net_device_unrealize;
17ec5a86
FK
3875 vdc->get_config = virtio_net_get_config;
3876 vdc->set_config = virtio_net_set_config;
3877 vdc->get_features = virtio_net_get_features;
3878 vdc->set_features = virtio_net_set_features;
3879 vdc->bad_features = virtio_net_bad_features;
3880 vdc->reset = virtio_net_reset;
7dc6be52 3881 vdc->queue_reset = virtio_net_queue_reset;
7f863302 3882 vdc->queue_enable = virtio_net_queue_enable;
17ec5a86
FK
3883 vdc->set_status = virtio_net_set_status;
3884 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
3885 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
2a083ffd 3886 vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO);
7788c3f2 3887 vdc->post_load = virtio_net_post_load_virtio;
982b78c5 3888 vdc->vmsd = &vmstate_virtio_net_device;
9711cd0d 3889 vdc->primary_unplug_pending = primary_unplug_pending;
c255488d 3890 vdc->get_vhost = virtio_net_get_vhost;
17ec5a86
FK
3891}
3892
3893static const TypeInfo virtio_net_info = {
3894 .name = TYPE_VIRTIO_NET,
3895 .parent = TYPE_VIRTIO_DEVICE,
3896 .instance_size = sizeof(VirtIONet),
3897 .instance_init = virtio_net_instance_init,
3898 .class_init = virtio_net_class_init,
3899};
3900
3901static void virtio_register_types(void)
3902{
3903 type_register_static(&virtio_net_info);
3904}
3905
3906type_init(virtio_register_types)