]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/virtio_net.c
Revert "UBUNTU: SAUCE: {topost} net: hns3: give default option while dependency HNS3...
[mirror_ubuntu-bionic-kernel.git] / drivers / net / virtio_net.c
CommitLineData
48925e37 1/* A network driver using virtio.
296f96fc
RR
2 *
3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
adf8d3ff 16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
296f96fc
RR
17 */
18//#define DEBUG
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
a9ea3fc6 21#include <linux/ethtool.h>
296f96fc
RR
22#include <linux/module.h>
23#include <linux/virtio.h>
24#include <linux/virtio_net.h>
f600b690 25#include <linux/bpf.h>
a67edbf4 26#include <linux/bpf_trace.h>
296f96fc 27#include <linux/scatterlist.h>
e918085a 28#include <linux/if_vlan.h>
5a0e3ad6 29#include <linux/slab.h>
8de4b2f3 30#include <linux/cpu.h>
ab7db917 31#include <linux/average.h>
186b3c99 32#include <linux/filter.h>
d85b758f 33#include <net/route.h>
296f96fc 34
d34710e3 35static int napi_weight = NAPI_POLL_WEIGHT;
6c0cd7c0
DL
36module_param(napi_weight, int, 0444);
37
b92f1e67 38static bool csum = true, gso = true, napi_tx;
34a48579
RR
39module_param(csum, bool, 0444);
40module_param(gso, bool, 0444);
b92f1e67 41module_param(napi_tx, bool, 0644);
34a48579 42
296f96fc 43/* FIXME: MTU in config. */
5061de36 44#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
3f2c31d9 45#define GOOD_COPY_LEN 128
296f96fc 46
f6b10209
JW
47#define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
48
2de2f7f4
JF
49/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
50#define VIRTIO_XDP_HEADROOM 256
51
5377d758
JB
52/* RX packet size EWMA. The average packet size is used to determine the packet
53 * buffer size when refilling RX rings. As the entire RX ring may be refilled
54 * at once, the weight is chosen so that the EWMA will be insensitive to short-
55 * term, transient changes in packet size.
ab7db917 56 */
eb1e011a 57DECLARE_EWMA(pkt_len, 0, 64)
ab7db917 58
66846048 59#define VIRTNET_DRIVER_VERSION "1.0.0"
2a41f71d 60
7acd4329
CIK
61static const unsigned long guest_offloads[] = {
62 VIRTIO_NET_F_GUEST_TSO4,
63 VIRTIO_NET_F_GUEST_TSO6,
64 VIRTIO_NET_F_GUEST_ECN,
65 VIRTIO_NET_F_GUEST_UFO
66};
3f93522f 67
3fa2a1df 68struct virtnet_stats {
83a27052
ED
69 struct u64_stats_sync tx_syncp;
70 struct u64_stats_sync rx_syncp;
3fa2a1df 71 u64 tx_bytes;
72 u64 tx_packets;
73
74 u64 rx_bytes;
75 u64 rx_packets;
76};
77
e9d7417b
JW
78/* Internal representation of a send virtqueue */
79struct send_queue {
80 /* Virtqueue associated with this send _queue */
81 struct virtqueue *vq;
82
83 /* TX: fragments + linear part + virtio header */
84 struct scatterlist sg[MAX_SKB_FRAGS + 2];
986a4f4d
JW
85
86 /* Name of the send queue: output.$index */
87 char name[40];
b92f1e67
WB
88
89 struct napi_struct napi;
e9d7417b
JW
90};
91
92/* Internal representation of a receive virtqueue */
93struct receive_queue {
94 /* Virtqueue associated with this receive_queue */
95 struct virtqueue *vq;
96
296f96fc
RR
97 struct napi_struct napi;
98
f600b690
JF
99 struct bpf_prog __rcu *xdp_prog;
100
e9d7417b
JW
101 /* Chain pages by the private ptr. */
102 struct page *pages;
103
ab7db917 104 /* Average packet length for mergeable receive buffers. */
5377d758 105 struct ewma_pkt_len mrg_avg_pkt_len;
ab7db917 106
fb51879d
MD
107 /* Page frag for packet buffer allocation. */
108 struct page_frag alloc_frag;
109
e9d7417b
JW
110 /* RX: fragments + linear part + virtio header */
111 struct scatterlist sg[MAX_SKB_FRAGS + 2];
986a4f4d 112
d85b758f
MT
113 /* Min single buffer size for mergeable buffers case. */
114 unsigned int min_buf_len;
115
986a4f4d
JW
116 /* Name of this receive queue: input.$index */
117 char name[40];
e9d7417b
JW
118};
119
603343b9
MT
120/* Control VQ buffers: protected by the rtnl lock */
121struct control_buf {
122 struct virtio_net_ctrl_hdr hdr;
123 virtio_net_ctrl_ack status;
124 struct virtio_net_ctrl_mq mq;
125 u8 promisc;
126 u8 allmulti;
c285a7d5 127 __virtio16 vid;
603343b9
MT
128 u64 offloads;
129};
130
e9d7417b
JW
131struct virtnet_info {
132 struct virtio_device *vdev;
133 struct virtqueue *cvq;
134 struct net_device *dev;
986a4f4d
JW
135 struct send_queue *sq;
136 struct receive_queue *rq;
e9d7417b
JW
137 unsigned int status;
138
986a4f4d
JW
139 /* Max # of queue pairs supported by the device */
140 u16 max_queue_pairs;
141
142 /* # of queue pairs currently used by the driver */
143 u16 curr_queue_pairs;
144
672aafd5
JF
145 /* # of XDP queue pairs currently used by the driver */
146 u16 xdp_queue_pairs;
147
97402b96
HX
148 /* I like... big packets and I cannot lie! */
149 bool big_packets;
150
3f2c31d9
MM
151 /* Host will merge rx buffers for big packets (shake it! shake it!) */
152 bool mergeable_rx_bufs;
153
986a4f4d
JW
154 /* Has control virtqueue */
155 bool has_cvq;
156
e7428e95
MT
157 /* Host can handle any s/g split between our header and packet data */
158 bool any_header_sg;
159
012873d0
MT
160 /* Packet virtio header size */
161 u8 hdr_len;
162
3fa2a1df 163 /* Active statistics */
164 struct virtnet_stats __percpu *stats;
165
3161e453
RR
166 /* Work struct for refilling if we run low on memory. */
167 struct delayed_work refill;
168
586d17c5
JW
169 /* Work struct for config space updates */
170 struct work_struct config_work;
171
986a4f4d
JW
172 /* Does the affinity hint is set for virtqueues? */
173 bool affinity_hint_set;
47be2479 174
8017c279
SAS
175 /* CPU hotplug instances for online & dead */
176 struct hlist_node node;
177 struct hlist_node node_dead;
2ac46030 178
603343b9 179 struct control_buf *ctrl;
16032be5
NA
180
181 /* Ethtool settings */
182 u8 duplex;
183 u32 speed;
3f93522f
JW
184
185 unsigned long guest_offloads;
296f96fc
RR
186};
187
9ab86bbc 188struct padded_vnet_hdr {
012873d0 189 struct virtio_net_hdr_mrg_rxbuf hdr;
9ab86bbc 190 /*
012873d0
MT
191 * hdr is in a separate sg buffer, and data sg buffer shares same page
192 * with this header sg. This padding makes next sg 16 byte aligned
193 * after the header.
9ab86bbc 194 */
012873d0 195 char padding[4];
9ab86bbc
SM
196};
197
986a4f4d
JW
198/* Converting between virtqueue no. and kernel tx/rx queue no.
199 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
200 */
201static int vq2txq(struct virtqueue *vq)
202{
9d0ca6ed 203 return (vq->index - 1) / 2;
986a4f4d
JW
204}
205
206static int txq2vq(int txq)
207{
208 return txq * 2 + 1;
209}
210
211static int vq2rxq(struct virtqueue *vq)
212{
9d0ca6ed 213 return vq->index / 2;
986a4f4d
JW
214}
215
216static int rxq2vq(int rxq)
217{
218 return rxq * 2;
219}
220
012873d0 221static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
296f96fc 222{
012873d0 223 return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
296f96fc
RR
224}
225
9ab86bbc
SM
226/*
227 * private is used to chain pages for big packets, put the whole
228 * most recent used list in the beginning for reuse
229 */
e9d7417b 230static void give_pages(struct receive_queue *rq, struct page *page)
0a888fd1 231{
9ab86bbc 232 struct page *end;
0a888fd1 233
e9d7417b 234 /* Find end of list, sew whole thing into vi->rq.pages. */
9ab86bbc 235 for (end = page; end->private; end = (struct page *)end->private);
e9d7417b
JW
236 end->private = (unsigned long)rq->pages;
237 rq->pages = page;
0a888fd1
MM
238}
239
e9d7417b 240static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
fb6813f4 241{
e9d7417b 242 struct page *p = rq->pages;
fb6813f4 243
9ab86bbc 244 if (p) {
e9d7417b 245 rq->pages = (struct page *)p->private;
9ab86bbc
SM
246 /* clear private here, it is used to chain pages */
247 p->private = 0;
248 } else
fb6813f4
RR
249 p = alloc_page(gfp_mask);
250 return p;
251}
252
e4e8452a
WB
253static void virtqueue_napi_schedule(struct napi_struct *napi,
254 struct virtqueue *vq)
255{
256 if (napi_schedule_prep(napi)) {
257 virtqueue_disable_cb(vq);
258 __napi_schedule(napi);
259 }
260}
261
262static void virtqueue_napi_complete(struct napi_struct *napi,
263 struct virtqueue *vq, int processed)
264{
265 int opaque;
266
267 opaque = virtqueue_enable_cb_prepare(vq);
e3eb8fe2
TM
268 if (napi_complete_done(napi, processed)) {
269 if (unlikely(virtqueue_poll(vq, opaque)))
270 virtqueue_napi_schedule(napi, vq);
271 } else {
272 virtqueue_disable_cb(vq);
273 }
e4e8452a
WB
274}
275
e9d7417b 276static void skb_xmit_done(struct virtqueue *vq)
296f96fc 277{
e9d7417b 278 struct virtnet_info *vi = vq->vdev->priv;
b92f1e67 279 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
296f96fc 280
2cb9c6ba 281 /* Suppress further interrupts. */
e9d7417b 282 virtqueue_disable_cb(vq);
11a3a154 283
b92f1e67
WB
284 if (napi->weight)
285 virtqueue_napi_schedule(napi, vq);
286 else
287 /* We were probably waiting for more output buffers. */
288 netif_wake_subqueue(vi->dev, vq2txq(vq));
296f96fc
RR
289}
290
28b39bc7
JW
291#define MRG_CTX_HEADER_SHIFT 22
292static void *mergeable_len_to_ctx(unsigned int truesize,
293 unsigned int headroom)
294{
295 return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
296}
297
298static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
299{
300 return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
301}
302
303static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
304{
305 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
306}
307
3464645a 308/* Called from bottom half context */
946fa564
MT
309static struct sk_buff *page_to_skb(struct virtnet_info *vi,
310 struct receive_queue *rq,
2613af0e
MD
311 struct page *page, unsigned int offset,
312 unsigned int len, unsigned int truesize)
9ab86bbc
SM
313{
314 struct sk_buff *skb;
012873d0 315 struct virtio_net_hdr_mrg_rxbuf *hdr;
2613af0e 316 unsigned int copy, hdr_len, hdr_padded_len;
9ab86bbc 317 char *p;
fb6813f4 318
2613af0e 319 p = page_address(page) + offset;
3f2c31d9 320
9ab86bbc 321 /* copy small packet so we can reuse these pages for small data */
c67f5db8 322 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
9ab86bbc
SM
323 if (unlikely(!skb))
324 return NULL;
3f2c31d9 325
9ab86bbc 326 hdr = skb_vnet_hdr(skb);
3f2c31d9 327
012873d0
MT
328 hdr_len = vi->hdr_len;
329 if (vi->mergeable_rx_bufs)
a4a76503 330 hdr_padded_len = sizeof(*hdr);
012873d0 331 else
2613af0e 332 hdr_padded_len = sizeof(struct padded_vnet_hdr);
3f2c31d9 333
9ab86bbc 334 memcpy(hdr, p, hdr_len);
3f2c31d9 335
9ab86bbc 336 len -= hdr_len;
2613af0e
MD
337 offset += hdr_padded_len;
338 p += hdr_padded_len;
3f2c31d9 339
9ab86bbc
SM
340 copy = len;
341 if (copy > skb_tailroom(skb))
342 copy = skb_tailroom(skb);
59ae1d12 343 skb_put_data(skb, p, copy);
3f2c31d9 344
9ab86bbc
SM
345 len -= copy;
346 offset += copy;
3f2c31d9 347
2613af0e
MD
348 if (vi->mergeable_rx_bufs) {
349 if (len)
350 skb_add_rx_frag(skb, 0, page, offset, len, truesize);
351 else
352 put_page(page);
353 return skb;
354 }
355
e878d78b
SL
356 /*
357 * Verify that we can indeed put this data into a skb.
358 * This is here to handle cases when the device erroneously
359 * tries to receive more than is possible. This is usually
360 * the case of a broken device.
361 */
362 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
be443899 363 net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
e878d78b
SL
364 dev_kfree_skb(skb);
365 return NULL;
366 }
2613af0e 367 BUG_ON(offset >= PAGE_SIZE);
9ab86bbc 368 while (len) {
2613af0e
MD
369 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
370 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
371 frag_size, truesize);
372 len -= frag_size;
9ab86bbc
SM
373 page = (struct page *)page->private;
374 offset = 0;
375 }
3f2c31d9 376
9ab86bbc 377 if (page)
e9d7417b 378 give_pages(rq, page);
3f2c31d9 379
9ab86bbc
SM
380 return skb;
381}
3f2c31d9 382
186b3c99
JW
383static void virtnet_xdp_flush(struct net_device *dev)
384{
385 struct virtnet_info *vi = netdev_priv(dev);
386 struct send_queue *sq;
387 unsigned int qp;
388
389 qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
390 sq = &vi->sq[qp];
391
392 virtqueue_kick(sq->vq);
393}
394
395static bool __virtnet_xdp_xmit(struct virtnet_info *vi,
396 struct xdp_buff *xdp)
56434a01 397{
56434a01 398 struct virtio_net_hdr_mrg_rxbuf *hdr;
f6b10209 399 unsigned int len;
722d8283
JF
400 struct send_queue *sq;
401 unsigned int qp;
56434a01
JF
402 void *xdp_sent;
403 int err;
404
722d8283
JF
405 qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
406 sq = &vi->sq[qp];
407
56434a01
JF
408 /* Free up any pending old buffers before queueing new ones. */
409 while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) {
f6b10209 410 struct page *sent_page = virt_to_head_page(xdp_sent);
bb91accf 411
f6b10209 412 put_page(sent_page);
bb91accf 413 }
56434a01 414
f6b10209
JW
415 xdp->data -= vi->hdr_len;
416 /* Zero header and leave csum up to XDP layers */
417 hdr = xdp->data;
418 memset(hdr, 0, vi->hdr_len);
bb91accf 419
f6b10209 420 sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
bb91accf 421
f6b10209 422 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC);
56434a01 423 if (unlikely(err)) {
f6b10209 424 struct page *page = virt_to_head_page(xdp->data);
bb91accf 425
f6b10209 426 put_page(page);
a67edbf4 427 return false;
56434a01
JF
428 }
429
a67edbf4 430 return true;
56434a01
JF
431}
432
186b3c99
JW
433static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
434{
435 struct virtnet_info *vi = netdev_priv(dev);
436 bool sent = __virtnet_xdp_xmit(vi, xdp);
437
438 if (!sent)
439 return -ENOSPC;
440 return 0;
441}
442
f6b10209
JW
443static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
444{
445 return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0;
446}
447
4941d472
JW
448/* We copy the packet for XDP in the following cases:
449 *
450 * 1) Packet is scattered across multiple rx buffers.
451 * 2) Headroom space is insufficient.
452 *
453 * This is inefficient but it's a temporary condition that
454 * we hit right after XDP is enabled and until queue is refilled
455 * with large buffers with sufficient headroom - so it should affect
456 * at most queue size packets.
457 * Afterwards, the conditions to enable
458 * XDP should preclude the underlying device from sending packets
459 * across multiple buffers (num_buf > 1), and we make sure buffers
460 * have enough headroom.
461 */
462static struct page *xdp_linearize_page(struct receive_queue *rq,
463 u16 *num_buf,
464 struct page *p,
465 int offset,
466 int page_off,
467 unsigned int *len)
468{
469 struct page *page = alloc_page(GFP_ATOMIC);
470
471 if (!page)
472 return NULL;
473
474 memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
475 page_off += *len;
476
477 while (--*num_buf) {
478 unsigned int buflen;
479 void *buf;
480 int off;
481
482 buf = virtqueue_get_buf(rq->vq, &buflen);
483 if (unlikely(!buf))
484 goto err_buf;
485
486 p = virt_to_head_page(buf);
487 off = buf - page_address(p);
488
489 /* guard against a misconfigured or uncooperative backend that
490 * is sending packet larger than the MTU.
491 */
492 if ((page_off + buflen) > PAGE_SIZE) {
493 put_page(p);
494 goto err_buf;
495 }
496
497 memcpy(page_address(page) + page_off,
498 page_address(p) + off, buflen);
499 page_off += buflen;
500 put_page(p);
501 }
502
503 /* Headroom does not contribute to packet length */
504 *len = page_off - VIRTIO_XDP_HEADROOM;
505 return page;
506err_buf:
507 __free_pages(page, 0);
508 return NULL;
509}
510
bb91accf
JW
511static struct sk_buff *receive_small(struct net_device *dev,
512 struct virtnet_info *vi,
513 struct receive_queue *rq,
192f68cf 514 void *buf, void *ctx,
186b3c99
JW
515 unsigned int len,
516 bool *xdp_xmit)
f121159d 517{
f6b10209 518 struct sk_buff *skb;
bb91accf 519 struct bpf_prog *xdp_prog;
4941d472 520 unsigned int xdp_headroom = (unsigned long)ctx;
f6b10209
JW
521 unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
522 unsigned int headroom = vi->hdr_len + header_offset;
523 unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
524 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4941d472 525 struct page *page = virt_to_head_page(buf);
186b3c99 526 unsigned int delta = 0, err;
4941d472 527 struct page *xdp_page;
012873d0 528 len -= vi->hdr_len;
f121159d 529
bb91accf
JW
530 rcu_read_lock();
531 xdp_prog = rcu_dereference(rq->xdp_prog);
532 if (xdp_prog) {
f6b10209 533 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
0354e4d1 534 struct xdp_buff xdp;
f6b10209 535 void *orig_data;
bb91accf
JW
536 u32 act;
537
36c3dce0 538 if (unlikely(hdr->hdr.gso_type))
bb91accf 539 goto err_xdp;
0354e4d1 540
4941d472
JW
541 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
542 int offset = buf - page_address(page) + header_offset;
543 unsigned int tlen = len + vi->hdr_len;
544 u16 num_buf = 1;
545
546 xdp_headroom = virtnet_get_headroom(vi);
547 header_offset = VIRTNET_RX_PAD + xdp_headroom;
548 headroom = vi->hdr_len + header_offset;
549 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
550 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
551 xdp_page = xdp_linearize_page(rq, &num_buf, page,
552 offset, header_offset,
553 &tlen);
554 if (!xdp_page)
555 goto err_xdp;
556
557 buf = page_address(xdp_page);
558 put_page(page);
559 page = xdp_page;
560 }
561
f6b10209
JW
562 xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len;
563 xdp.data = xdp.data_hard_start + xdp_headroom;
de8f3a83 564 xdp_set_data_meta_invalid(&xdp);
0354e4d1 565 xdp.data_end = xdp.data + len;
f6b10209 566 orig_data = xdp.data;
0354e4d1
JF
567 act = bpf_prog_run_xdp(xdp_prog, &xdp);
568
bb91accf
JW
569 switch (act) {
570 case XDP_PASS:
2de2f7f4 571 /* Recalculate length in case bpf program changed it */
f6b10209 572 delta = orig_data - xdp.data;
bb91accf
JW
573 break;
574 case XDP_TX:
186b3c99 575 if (unlikely(!__virtnet_xdp_xmit(vi, &xdp)))
0354e4d1 576 trace_xdp_exception(vi->dev, xdp_prog, act);
186b3c99
JW
577 else
578 *xdp_xmit = true;
579 rcu_read_unlock();
580 goto xdp_xmit;
581 case XDP_REDIRECT:
582 err = xdp_do_redirect(dev, &xdp, xdp_prog);
583 if (!err)
584 *xdp_xmit = true;
bb91accf
JW
585 rcu_read_unlock();
586 goto xdp_xmit;
bb91accf 587 default:
0354e4d1
JF
588 bpf_warn_invalid_xdp_action(act);
589 case XDP_ABORTED:
590 trace_xdp_exception(vi->dev, xdp_prog, act);
591 case XDP_DROP:
bb91accf
JW
592 goto err_xdp;
593 }
594 }
595 rcu_read_unlock();
596
f6b10209
JW
597 skb = build_skb(buf, buflen);
598 if (!skb) {
4941d472 599 put_page(page);
f6b10209
JW
600 goto err;
601 }
602 skb_reserve(skb, headroom - delta);
603 skb_put(skb, len + delta);
604 if (!delta) {
605 buf += header_offset;
606 memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
607 } /* keep zeroed vnet hdr since packet was changed by bpf */
608
609err:
f121159d 610 return skb;
bb91accf
JW
611
612err_xdp:
613 rcu_read_unlock();
614 dev->stats.rx_dropped++;
4941d472 615 put_page(page);
bb91accf
JW
616xdp_xmit:
617 return NULL;
f121159d
MT
618}
619
620static struct sk_buff *receive_big(struct net_device *dev,
946fa564 621 struct virtnet_info *vi,
f121159d
MT
622 struct receive_queue *rq,
623 void *buf,
624 unsigned int len)
625{
626 struct page *page = buf;
c47a43d3 627 struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
f600b690 628
f121159d
MT
629 if (unlikely(!skb))
630 goto err;
631
632 return skb;
633
634err:
635 dev->stats.rx_dropped++;
636 give_pages(rq, page);
637 return NULL;
638}
639
8fc3b9e9 640static struct sk_buff *receive_mergeable(struct net_device *dev,
fdd819b2 641 struct virtnet_info *vi,
8fc3b9e9 642 struct receive_queue *rq,
680557cf
MT
643 void *buf,
644 void *ctx,
186b3c99
JW
645 unsigned int len,
646 bool *xdp_xmit)
9ab86bbc 647{
012873d0
MT
648 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
649 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
8fc3b9e9
MT
650 struct page *page = virt_to_head_page(buf);
651 int offset = buf - page_address(page);
f600b690
JF
652 struct sk_buff *head_skb, *curr_skb;
653 struct bpf_prog *xdp_prog;
654 unsigned int truesize;
4941d472 655 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
186b3c99 656 int err;
f600b690 657
56434a01
JF
658 head_skb = NULL;
659
f600b690
JF
660 rcu_read_lock();
661 xdp_prog = rcu_dereference(rq->xdp_prog);
662 if (xdp_prog) {
72979a6c 663 struct page *xdp_page;
0354e4d1 664 struct xdp_buff xdp;
0354e4d1 665 void *data;
f600b690
JF
666 u32 act;
667
5cf0b8a6
JW
668 /* Transient failure which in theory could occur if
669 * in-flight packets from before XDP was enabled reach
670 * the receive path after XDP is loaded.
671 */
672 if (unlikely(hdr->hdr.gso_type))
673 goto err_xdp;
674
73b62bd0 675 /* This happens when rx buffer size is underestimated */
4941d472
JW
676 if (unlikely(num_buf > 1 ||
677 headroom < virtnet_get_headroom(vi))) {
72979a6c 678 /* linearize data for XDP */
56a86f84 679 xdp_page = xdp_linearize_page(rq, &num_buf,
4941d472
JW
680 page, offset,
681 VIRTIO_XDP_HEADROOM,
682 &len);
72979a6c
JF
683 if (!xdp_page)
684 goto err_xdp;
2de2f7f4 685 offset = VIRTIO_XDP_HEADROOM;
72979a6c
JF
686 } else {
687 xdp_page = page;
f600b690
JF
688 }
689
2de2f7f4
JF
690 /* Allow consuming headroom but reserve enough space to push
691 * the descriptor on if we get an XDP_TX return code.
692 */
0354e4d1 693 data = page_address(xdp_page) + offset;
2de2f7f4 694 xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
0354e4d1 695 xdp.data = data + vi->hdr_len;
de8f3a83 696 xdp_set_data_meta_invalid(&xdp);
0354e4d1
JF
697 xdp.data_end = xdp.data + (len - vi->hdr_len);
698 act = bpf_prog_run_xdp(xdp_prog, &xdp);
699
31240345
JW
700 if (act != XDP_PASS)
701 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
702
56434a01
JF
703 switch (act) {
704 case XDP_PASS:
2de2f7f4
JF
705 /* recalculate offset to account for any header
706 * adjustments. Note other cases do not build an
707 * skb and avoid using offset
708 */
709 offset = xdp.data -
710 page_address(xdp_page) - vi->hdr_len;
711
1830f893
JW
712 /* We can only create skb based on xdp_page. */
713 if (unlikely(xdp_page != page)) {
714 rcu_read_unlock();
715 put_page(page);
716 head_skb = page_to_skb(vi, rq, xdp_page,
2de2f7f4 717 offset, len, PAGE_SIZE);
1830f893
JW
718 return head_skb;
719 }
56434a01
JF
720 break;
721 case XDP_TX:
186b3c99 722 if (unlikely(!__virtnet_xdp_xmit(vi, &xdp)))
0354e4d1 723 trace_xdp_exception(vi->dev, xdp_prog, act);
186b3c99
JW
724 else
725 *xdp_xmit = true;
72979a6c 726 if (unlikely(xdp_page != page))
62b92b48 727 put_page(page);
56434a01
JF
728 rcu_read_unlock();
729 goto xdp_xmit;
186b3c99
JW
730 case XDP_REDIRECT:
731 err = xdp_do_redirect(dev, &xdp, xdp_prog);
dd543797 732 if (!err)
186b3c99
JW
733 *xdp_xmit = true;
734 rcu_read_unlock();
735 goto xdp_xmit;
56434a01 736 default:
0354e4d1
JF
737 bpf_warn_invalid_xdp_action(act);
738 case XDP_ABORTED:
739 trace_xdp_exception(vi->dev, xdp_prog, act);
740 case XDP_DROP:
72979a6c
JF
741 if (unlikely(xdp_page != page))
742 __free_pages(xdp_page, 0);
f600b690 743 goto err_xdp;
56434a01 744 }
f600b690
JF
745 }
746 rcu_read_unlock();
ab7db917 747
28b39bc7
JW
748 truesize = mergeable_ctx_to_truesize(ctx);
749 if (unlikely(len > truesize)) {
56da5fd0 750 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
680557cf
MT
751 dev->name, len, (unsigned long)ctx);
752 dev->stats.rx_length_errors++;
753 goto err_skb;
754 }
28b39bc7 755
f600b690
JF
756 head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
757 curr_skb = head_skb;
9ab86bbc 758
8fc3b9e9
MT
759 if (unlikely(!curr_skb))
760 goto err_skb;
9ab86bbc 761 while (--num_buf) {
8fc3b9e9
MT
762 int num_skb_frags;
763
680557cf 764 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
03e9f8a0 765 if (unlikely(!buf)) {
8fc3b9e9 766 pr_debug("%s: rx error: %d buffers out of %d missing\n",
fdd819b2 767 dev->name, num_buf,
012873d0
MT
768 virtio16_to_cpu(vi->vdev,
769 hdr->num_buffers));
8fc3b9e9
MT
770 dev->stats.rx_length_errors++;
771 goto err_buf;
3f2c31d9 772 }
8fc3b9e9
MT
773
774 page = virt_to_head_page(buf);
28b39bc7
JW
775
776 truesize = mergeable_ctx_to_truesize(ctx);
777 if (unlikely(len > truesize)) {
56da5fd0 778 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
680557cf
MT
779 dev->name, len, (unsigned long)ctx);
780 dev->stats.rx_length_errors++;
781 goto err_skb;
782 }
8fc3b9e9
MT
783
784 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
2613af0e
MD
785 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
786 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
8fc3b9e9
MT
787
788 if (unlikely(!nskb))
789 goto err_skb;
2613af0e
MD
790 if (curr_skb == head_skb)
791 skb_shinfo(curr_skb)->frag_list = nskb;
792 else
793 curr_skb->next = nskb;
794 curr_skb = nskb;
795 head_skb->truesize += nskb->truesize;
796 num_skb_frags = 0;
797 }
798 if (curr_skb != head_skb) {
799 head_skb->data_len += len;
800 head_skb->len += len;
fb51879d 801 head_skb->truesize += truesize;
2613af0e 802 }
8fc3b9e9 803 offset = buf - page_address(page);
ba275241
JW
804 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
805 put_page(page);
806 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
fb51879d 807 len, truesize);
ba275241
JW
808 } else {
809 skb_add_rx_frag(curr_skb, num_skb_frags, page,
fb51879d 810 offset, len, truesize);
ba275241 811 }
8fc3b9e9
MT
812 }
813
5377d758 814 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
8fc3b9e9
MT
815 return head_skb;
816
f600b690
JF
817err_xdp:
818 rcu_read_unlock();
8fc3b9e9
MT
819err_skb:
820 put_page(page);
26f1676b 821 while (num_buf-- > 1) {
680557cf
MT
822 buf = virtqueue_get_buf(rq->vq, &len);
823 if (unlikely(!buf)) {
8fc3b9e9
MT
824 pr_debug("%s: rx error: %d buffers missing\n",
825 dev->name, num_buf);
826 dev->stats.rx_length_errors++;
827 break;
828 }
680557cf 829 page = virt_to_head_page(buf);
8fc3b9e9 830 put_page(page);
9ab86bbc 831 }
8fc3b9e9
MT
832err_buf:
833 dev->stats.rx_dropped++;
834 dev_kfree_skb(head_skb);
56434a01 835xdp_xmit:
8fc3b9e9 836 return NULL;
9ab86bbc
SM
837}
838
61845d20 839static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
186b3c99 840 void *buf, unsigned int len, void **ctx, bool *xdp_xmit)
9ab86bbc 841{
e9d7417b 842 struct net_device *dev = vi->dev;
9ab86bbc 843 struct sk_buff *skb;
012873d0 844 struct virtio_net_hdr_mrg_rxbuf *hdr;
61845d20 845 int ret;
3f2c31d9 846
bcff3162 847 if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
9ab86bbc
SM
848 pr_debug("%s: short packet %i\n", dev->name, len);
849 dev->stats.rx_length_errors++;
ab7db917 850 if (vi->mergeable_rx_bufs) {
680557cf 851 put_page(virt_to_head_page(buf));
ab7db917 852 } else if (vi->big_packets) {
98bfd23c 853 give_pages(rq, buf);
ab7db917 854 } else {
f6b10209 855 put_page(virt_to_head_page(buf));
ab7db917 856 }
61845d20 857 return 0;
9ab86bbc 858 }
3f2c31d9 859
f121159d 860 if (vi->mergeable_rx_bufs)
186b3c99 861 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit);
f121159d 862 else if (vi->big_packets)
946fa564 863 skb = receive_big(dev, vi, rq, buf, len);
f121159d 864 else
186b3c99 865 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit);
f121159d
MT
866
867 if (unlikely(!skb))
61845d20 868 return 0;
3f2c31d9 869
9ab86bbc 870 hdr = skb_vnet_hdr(skb);
3fa2a1df 871
61845d20 872 ret = skb->len;
296f96fc 873
e858fae2 874 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
10a8d94a 875 skb->ip_summed = CHECKSUM_UNNECESSARY;
296f96fc 876
e858fae2
MR
877 if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
878 virtio_is_little_endian(vi->vdev))) {
879 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
880 dev->name, hdr->hdr.gso_type,
881 hdr->hdr.gso_size);
882 goto frame_err;
296f96fc
RR
883 }
884
d1dc06dc
MR
885 skb->protocol = eth_type_trans(skb, dev);
886 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
887 ntohs(skb->protocol), skb->len, skb->pkt_type);
888
0fbd050a 889 napi_gro_receive(&rq->napi, skb);
61845d20 890 return ret;
296f96fc
RR
891
892frame_err:
893 dev->stats.rx_frame_errors++;
296f96fc 894 dev_kfree_skb(skb);
61845d20 895 return 0;
296f96fc
RR
896}
897
192f68cf
JW
898/* Unlike mergeable buffers, all buffers are allocated to the
899 * same size, except for the headroom. For this reason we do
900 * not need to use mergeable_len_to_ctx here - it is enough
901 * to store the headroom as the context ignoring the truesize.
902 */
946fa564
MT
903static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
904 gfp_t gfp)
296f96fc 905{
f6b10209
JW
906 struct page_frag *alloc_frag = &rq->alloc_frag;
907 char *buf;
2de2f7f4 908 unsigned int xdp_headroom = virtnet_get_headroom(vi);
192f68cf 909 void *ctx = (void *)(unsigned long)xdp_headroom;
f6b10209 910 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
9ab86bbc 911 int err;
3f2c31d9 912
f6b10209
JW
913 len = SKB_DATA_ALIGN(len) +
914 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
915 if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
9ab86bbc 916 return -ENOMEM;
296f96fc 917
f6b10209
JW
918 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
919 get_page(alloc_frag->page);
920 alloc_frag->offset += len;
921 sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom,
922 vi->hdr_len + GOOD_PACKET_LEN);
192f68cf 923 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
9ab86bbc 924 if (err < 0)
f6b10209 925 put_page(virt_to_head_page(buf));
9ab86bbc
SM
926 return err;
927}
97402b96 928
012873d0
MT
929static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
930 gfp_t gfp)
9ab86bbc 931{
9ab86bbc
SM
932 struct page *first, *list = NULL;
933 char *p;
934 int i, err, offset;
935
a5835440
RR
936 sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
937
e9d7417b 938 /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
9ab86bbc 939 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
e9d7417b 940 first = get_a_page(rq, gfp);
9ab86bbc
SM
941 if (!first) {
942 if (list)
e9d7417b 943 give_pages(rq, list);
9ab86bbc 944 return -ENOMEM;
97402b96 945 }
e9d7417b 946 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
97402b96 947
9ab86bbc
SM
948 /* chain new page in list head to match sg */
949 first->private = (unsigned long)list;
950 list = first;
951 }
296f96fc 952
e9d7417b 953 first = get_a_page(rq, gfp);
9ab86bbc 954 if (!first) {
e9d7417b 955 give_pages(rq, list);
9ab86bbc
SM
956 return -ENOMEM;
957 }
958 p = page_address(first);
959
e9d7417b 960 /* rq->sg[0], rq->sg[1] share the same page */
012873d0
MT
961 /* a separated rq->sg[0] for header - required in case !any_header_sg */
962 sg_set_buf(&rq->sg[0], p, vi->hdr_len);
9ab86bbc 963
e9d7417b 964 /* rq->sg[1] for data packet, from offset */
9ab86bbc 965 offset = sizeof(struct padded_vnet_hdr);
e9d7417b 966 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
9ab86bbc
SM
967
968 /* chain first in list head */
969 first->private = (unsigned long)list;
9dc7b9e4
RR
970 err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
971 first, gfp);
9ab86bbc 972 if (err < 0)
e9d7417b 973 give_pages(rq, first);
9ab86bbc
SM
974
975 return err;
296f96fc
RR
976}
977
d85b758f
MT
978static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
979 struct ewma_pkt_len *avg_pkt_len)
3f2c31d9 980{
ab7db917 981 const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
fbf28d78
MD
982 unsigned int len;
983
5377d758 984 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
f0c3192c 985 rq->min_buf_len, PAGE_SIZE - hdr_len);
e377fcc8 986 return ALIGN(len, L1_CACHE_BYTES);
fbf28d78
MD
987}
988
2de2f7f4
JF
989static int add_recvbuf_mergeable(struct virtnet_info *vi,
990 struct receive_queue *rq, gfp_t gfp)
fbf28d78 991{
fb51879d 992 struct page_frag *alloc_frag = &rq->alloc_frag;
2de2f7f4 993 unsigned int headroom = virtnet_get_headroom(vi);
fb51879d 994 char *buf;
680557cf 995 void *ctx;
3f2c31d9 996 int err;
fb51879d 997 unsigned int len, hole;
3f2c31d9 998
d85b758f 999 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len);
2de2f7f4 1000 if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp)))
9ab86bbc 1001 return -ENOMEM;
ab7db917 1002
fb51879d 1003 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
2de2f7f4 1004 buf += headroom; /* advance address leaving hole at front of pkt */
fb51879d 1005 get_page(alloc_frag->page);
2de2f7f4 1006 alloc_frag->offset += len + headroom;
fb51879d 1007 hole = alloc_frag->size - alloc_frag->offset;
2de2f7f4 1008 if (hole < len + headroom) {
ab7db917
MD
1009 /* To avoid internal fragmentation, if there is very likely not
1010 * enough space for another buffer, add the remaining space to
1daa8790 1011 * the current buffer.
ab7db917 1012 */
fb51879d
MD
1013 len += hole;
1014 alloc_frag->offset += hole;
1015 }
3f2c31d9 1016
fb51879d 1017 sg_init_one(rq->sg, buf, len);
29fda25a 1018 ctx = mergeable_len_to_ctx(len, headroom);
680557cf 1019 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
9ab86bbc 1020 if (err < 0)
2613af0e 1021 put_page(virt_to_head_page(buf));
3f2c31d9 1022
9ab86bbc
SM
1023 return err;
1024}
3f2c31d9 1025
b2baed69
RR
1026/*
1027 * Returns false if we couldn't fill entirely (OOM).
1028 *
1029 * Normally run in the receive path, but can also be run from ndo_open
1030 * before we're receiving packets, or from refill_work which is
1031 * careful to disable receiving (using napi_disable).
1032 */
946fa564
MT
1033static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
1034 gfp_t gfp)
9ab86bbc
SM
1035{
1036 int err;
1788f495 1037 bool oom;
3f2c31d9 1038
9ab86bbc
SM
1039 do {
1040 if (vi->mergeable_rx_bufs)
2de2f7f4 1041 err = add_recvbuf_mergeable(vi, rq, gfp);
9ab86bbc 1042 else if (vi->big_packets)
012873d0 1043 err = add_recvbuf_big(vi, rq, gfp);
9ab86bbc 1044 else
946fa564 1045 err = add_recvbuf_small(vi, rq, gfp);
3f2c31d9 1046
1788f495 1047 oom = err == -ENOMEM;
9ed4cb07 1048 if (err)
3f2c31d9 1049 break;
b7dfde95 1050 } while (rq->vq->num_free);
681daee2 1051 virtqueue_kick(rq->vq);
3161e453 1052 return !oom;
3f2c31d9
MM
1053}
1054
18445c4d 1055static void skb_recv_done(struct virtqueue *rvq)
296f96fc
RR
1056{
1057 struct virtnet_info *vi = rvq->vdev->priv;
986a4f4d 1058 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
e9d7417b 1059
e4e8452a 1060 virtqueue_napi_schedule(&rq->napi, rvq);
296f96fc
RR
1061}
1062
e4e8452a 1063static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
3e9d08ec 1064{
e4e8452a 1065 napi_enable(napi);
3e9d08ec
BR
1066
1067 /* If all buffers were filled by other side before we napi_enabled, we
e4e8452a
WB
1068 * won't get another interrupt, so process any outstanding packets now.
1069 * Call local_bh_enable after to trigger softIRQ processing.
1070 */
1071 local_bh_disable();
1072 virtqueue_napi_schedule(napi, vq);
1073 local_bh_enable();
3e9d08ec
BR
1074}
1075
b92f1e67
WB
1076static void virtnet_napi_tx_enable(struct virtnet_info *vi,
1077 struct virtqueue *vq,
1078 struct napi_struct *napi)
1079{
1080 if (!napi->weight)
1081 return;
1082
1083 /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
1084 * enable the feature if this is likely affine with the transmit path.
1085 */
1086 if (!vi->affinity_hint_set) {
1087 napi->weight = 0;
1088 return;
1089 }
1090
1091 return virtnet_napi_enable(vq, napi);
1092}
1093
78a57b48
WB
1094static void virtnet_napi_tx_disable(struct napi_struct *napi)
1095{
1096 if (napi->weight)
1097 napi_disable(napi);
1098}
1099
3161e453
RR
1100static void refill_work(struct work_struct *work)
1101{
e9d7417b
JW
1102 struct virtnet_info *vi =
1103 container_of(work, struct virtnet_info, refill.work);
3161e453 1104 bool still_empty;
986a4f4d
JW
1105 int i;
1106
55257d72 1107 for (i = 0; i < vi->curr_queue_pairs; i++) {
986a4f4d 1108 struct receive_queue *rq = &vi->rq[i];
3161e453 1109
986a4f4d 1110 napi_disable(&rq->napi);
946fa564 1111 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
e4e8452a 1112 virtnet_napi_enable(rq->vq, &rq->napi);
3161e453 1113
986a4f4d
JW
1114 /* In theory, this can happen: if we don't get any buffers in
1115 * we will *never* try to fill again.
1116 */
1117 if (still_empty)
1118 schedule_delayed_work(&vi->refill, HZ/2);
1119 }
3161e453
RR
1120}
1121
186b3c99 1122static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
296f96fc 1123{
e9d7417b 1124 struct virtnet_info *vi = rq->vq->vdev->priv;
61845d20 1125 unsigned int len, received = 0, bytes = 0;
9ab86bbc 1126 void *buf;
61845d20 1127 struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
296f96fc 1128
192f68cf 1129 if (!vi->big_packets || vi->mergeable_rx_bufs) {
680557cf
MT
1130 void *ctx;
1131
1132 while (received < budget &&
1133 (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
186b3c99 1134 bytes += receive_buf(vi, rq, buf, len, ctx, xdp_xmit);
680557cf
MT
1135 received++;
1136 }
1137 } else {
1138 while (received < budget &&
1139 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
186b3c99 1140 bytes += receive_buf(vi, rq, buf, len, NULL, xdp_xmit);
680557cf
MT
1141 received++;
1142 }
296f96fc
RR
1143 }
1144
be121f46 1145 if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
946fa564 1146 if (!try_fill_recv(vi, rq, GFP_ATOMIC))
3b07e9ca 1147 schedule_delayed_work(&vi->refill, 0);
3161e453 1148 }
296f96fc 1149
61845d20
JW
1150 u64_stats_update_begin(&stats->rx_syncp);
1151 stats->rx_bytes += bytes;
1152 stats->rx_packets += received;
1153 u64_stats_update_end(&stats->rx_syncp);
1154
2ffa7598
JW
1155 return received;
1156}
1157
ea7735d9
WB
1158static void free_old_xmit_skbs(struct send_queue *sq)
1159{
1160 struct sk_buff *skb;
1161 unsigned int len;
1162 struct virtnet_info *vi = sq->vq->vdev->priv;
1163 struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
1164 unsigned int packets = 0;
1165 unsigned int bytes = 0;
1166
1167 while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
1168 pr_debug("Sent skb %p\n", skb);
1169
1170 bytes += skb->len;
1171 packets++;
1172
dadc0736 1173 dev_consume_skb_any(skb);
ea7735d9
WB
1174 }
1175
1176 /* Avoid overhead when no packets have been processed
1177 * happens when called speculatively from start_xmit.
1178 */
1179 if (!packets)
1180 return;
1181
1182 u64_stats_update_begin(&stats->tx_syncp);
1183 stats->tx_bytes += bytes;
1184 stats->tx_packets += packets;
1185 u64_stats_update_end(&stats->tx_syncp);
1186}
1187
7b0411ef
WB
1188static void virtnet_poll_cleantx(struct receive_queue *rq)
1189{
1190 struct virtnet_info *vi = rq->vq->vdev->priv;
1191 unsigned int index = vq2rxq(rq->vq);
1192 struct send_queue *sq = &vi->sq[index];
1193 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
1194
1195 if (!sq->napi.weight)
1196 return;
1197
1198 if (__netif_tx_trylock(txq)) {
1199 free_old_xmit_skbs(sq);
1200 __netif_tx_unlock(txq);
1201 }
1202
1203 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
1204 netif_tx_wake_queue(txq);
1205}
1206
2ffa7598
JW
1207static int virtnet_poll(struct napi_struct *napi, int budget)
1208{
1209 struct receive_queue *rq =
1210 container_of(napi, struct receive_queue, napi);
3c93c06c
JW
1211 struct virtnet_info *vi = rq->vq->vdev->priv;
1212 struct send_queue *sq;
1213 unsigned int received, qp;
186b3c99 1214 bool xdp_xmit = false;
2ffa7598 1215
7b0411ef
WB
1216 virtnet_poll_cleantx(rq);
1217
186b3c99 1218 received = virtnet_receive(rq, budget, &xdp_xmit);
2ffa7598 1219
8329d98e 1220 /* Out of packets? */
e4e8452a
WB
1221 if (received < budget)
1222 virtqueue_napi_complete(napi, rq->vq, received);
296f96fc 1223
3c93c06c
JW
1224 if (xdp_xmit) {
1225 qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
1226 smp_processor_id();
1227 sq = &vi->sq[qp];
1228 virtqueue_kick(sq->vq);
186b3c99 1229 xdp_do_flush_map();
3c93c06c 1230 }
186b3c99 1231
296f96fc
RR
1232 return received;
1233}
1234
986a4f4d
JW
1235static int virtnet_open(struct net_device *dev)
1236{
1237 struct virtnet_info *vi = netdev_priv(dev);
1238 int i;
1239
e4166625
JW
1240 for (i = 0; i < vi->max_queue_pairs; i++) {
1241 if (i < vi->curr_queue_pairs)
1242 /* Make sure we have some buffers: if oom use wq. */
946fa564 1243 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
e4166625 1244 schedule_delayed_work(&vi->refill, 0);
e4e8452a 1245 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
b92f1e67 1246 virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
986a4f4d
JW
1247 }
1248
1249 return 0;
1250}
1251
b92f1e67
WB
1252static int virtnet_poll_tx(struct napi_struct *napi, int budget)
1253{
1254 struct send_queue *sq = container_of(napi, struct send_queue, napi);
1255 struct virtnet_info *vi = sq->vq->vdev->priv;
1256 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
1257
1258 __netif_tx_lock(txq, raw_smp_processor_id());
1259 free_old_xmit_skbs(sq);
1260 __netif_tx_unlock(txq);
1261
1262 virtqueue_napi_complete(napi, sq->vq, 0);
1263
1264 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
1265 netif_tx_wake_queue(txq);
1266
1267 return 0;
1268}
1269
e9d7417b 1270static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
296f96fc 1271{
012873d0 1272 struct virtio_net_hdr_mrg_rxbuf *hdr;
296f96fc 1273 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
e9d7417b 1274 struct virtnet_info *vi = sq->vq->vdev->priv;
e2fcad58 1275 int num_sg;
012873d0 1276 unsigned hdr_len = vi->hdr_len;
e7428e95 1277 bool can_push;
296f96fc 1278
e174961c 1279 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
e7428e95
MT
1280
1281 can_push = vi->any_header_sg &&
1282 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
1283 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
1284 /* Even if we can, don't push here yet as this would skew
1285 * csum_start offset below. */
1286 if (can_push)
012873d0 1287 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
e7428e95
MT
1288 else
1289 hdr = skb_vnet_hdr(skb);
296f96fc 1290
e858fae2 1291 if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
6391a448 1292 virtio_is_little_endian(vi->vdev), false))
e858fae2 1293 BUG();
296f96fc 1294
3f2c31d9 1295 if (vi->mergeable_rx_bufs)
012873d0 1296 hdr->num_buffers = 0;
3f2c31d9 1297
547c890c 1298 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
e7428e95
MT
1299 if (can_push) {
1300 __skb_push(skb, hdr_len);
1301 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
e2fcad58
JD
1302 if (unlikely(num_sg < 0))
1303 return num_sg;
e7428e95
MT
1304 /* Pull header back to avoid skew in tx bytes calculations. */
1305 __skb_pull(skb, hdr_len);
1306 } else {
1307 sg_set_buf(sq->sg, hdr, hdr_len);
e2fcad58
JD
1308 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
1309 if (unlikely(num_sg < 0))
1310 return num_sg;
1311 num_sg++;
e7428e95 1312 }
9dc7b9e4 1313 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
11a3a154
RR
1314}
1315
424efe9c 1316static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
99ffc696
RR
1317{
1318 struct virtnet_info *vi = netdev_priv(dev);
986a4f4d
JW
1319 int qnum = skb_get_queue_mapping(skb);
1320 struct send_queue *sq = &vi->sq[qnum];
9ed4cb07 1321 int err;
4b7fd2e6
MT
1322 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
1323 bool kick = !skb->xmit_more;
b92f1e67 1324 bool use_napi = sq->napi.weight;
2cb9c6ba 1325
2cb9c6ba 1326 /* Free up any pending old buffers before queueing new ones. */
e9d7417b 1327 free_old_xmit_skbs(sq);
99ffc696 1328
bdb12e0d
WB
1329 if (use_napi && kick)
1330 virtqueue_enable_cb_delayed(sq->vq);
1331
074c3582
JK
1332 /* timestamp packet in software */
1333 skb_tx_timestamp(skb);
1334
03f191ba 1335 /* Try to transmit */
b7dfde95 1336 err = xmit_skb(sq, skb);
48925e37 1337
9ed4cb07 1338 /* This should not happen! */
681daee2 1339 if (unlikely(err)) {
9ed4cb07
RR
1340 dev->stats.tx_fifo_errors++;
1341 if (net_ratelimit())
1342 dev_warn(&dev->dev,
b7dfde95 1343 "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
58eba97d 1344 dev->stats.tx_dropped++;
85e94525 1345 dev_kfree_skb_any(skb);
58eba97d 1346 return NETDEV_TX_OK;
296f96fc 1347 }
03f191ba 1348
48925e37 1349 /* Don't wait up for transmitted skbs to be freed. */
b92f1e67
WB
1350 if (!use_napi) {
1351 skb_orphan(skb);
1352 nf_reset(skb);
1353 }
48925e37 1354
60302ff6
MT
1355 /* If running out of space, stop queue to avoid getting packets that we
1356 * are then unable to transmit.
1357 * An alternative would be to force queuing layer to requeue the skb by
1358 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
1359 * returned in a normal path of operation: it means that driver is not
1360 * maintaining the TX queue stop/start state properly, and causes
1361 * the stack to do a non-trivial amount of useless work.
1362 * Since most packets only take 1 or 2 ring slots, stopping the queue
1363 * early means 16 slots are typically wasted.
d631b94e 1364 */
b7dfde95 1365 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
986a4f4d 1366 netif_stop_subqueue(dev, qnum);
b92f1e67
WB
1367 if (!use_napi &&
1368 unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
48925e37 1369 /* More just got used, free them then recheck. */
b7dfde95
LT
1370 free_old_xmit_skbs(sq);
1371 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
986a4f4d 1372 netif_start_subqueue(dev, qnum);
e9d7417b 1373 virtqueue_disable_cb(sq->vq);
48925e37
RR
1374 }
1375 }
99ffc696 1376 }
48925e37 1377
4b7fd2e6 1378 if (kick || netif_xmit_stopped(txq))
0b725a2c 1379 virtqueue_kick(sq->vq);
296f96fc 1380
0b725a2c 1381 return NETDEV_TX_OK;
c223a078
DM
1382}
1383
40cbfc37
AK
1384/*
1385 * Send command via the control virtqueue and check status. Commands
1386 * supported by the hypervisor, as indicated by feature bits, should
788a8b6d 1387 * never fail unless improperly formatted.
40cbfc37
AK
1388 */
1389static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
d24bae32 1390 struct scatterlist *out)
40cbfc37 1391{
f7bc9594 1392 struct scatterlist *sgs[4], hdr, stat;
d24bae32 1393 unsigned out_num = 0, tmp;
40cbfc37
AK
1394
1395 /* Caller should know better */
f7bc9594 1396 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
40cbfc37 1397
603343b9
MT
1398 vi->ctrl->status = ~0;
1399 vi->ctrl->hdr.class = class;
1400 vi->ctrl->hdr.cmd = cmd;
f7bc9594 1401 /* Add header */
603343b9 1402 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
f7bc9594 1403 sgs[out_num++] = &hdr;
40cbfc37 1404
f7bc9594
RR
1405 if (out)
1406 sgs[out_num++] = out;
40cbfc37 1407
f7bc9594 1408 /* Add return status. */
603343b9 1409 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
d24bae32 1410 sgs[out_num] = &stat;
40cbfc37 1411
d24bae32 1412 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
a7c58146 1413 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
40cbfc37 1414
67975901 1415 if (unlikely(!virtqueue_kick(vi->cvq)))
603343b9 1416 return vi->ctrl->status == VIRTIO_NET_OK;
40cbfc37
AK
1417
1418 /* Spin for a response, the kick causes an ioport write, trapping
1419 * into the hypervisor, so the request should be handled immediately.
1420 */
047b9b94
HG
1421 while (!virtqueue_get_buf(vi->cvq, &tmp) &&
1422 !virtqueue_is_broken(vi->cvq))
40cbfc37
AK
1423 cpu_relax();
1424
603343b9 1425 return vi->ctrl->status == VIRTIO_NET_OK;
40cbfc37
AK
1426}
1427
9c46f6d4
AW
1428static int virtnet_set_mac_address(struct net_device *dev, void *p)
1429{
1430 struct virtnet_info *vi = netdev_priv(dev);
1431 struct virtio_device *vdev = vi->vdev;
f2f2c8b4 1432 int ret;
e37e2ff3 1433 struct sockaddr *addr;
7e58d5ae 1434 struct scatterlist sg;
9c46f6d4 1435
801822d1 1436 addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
e37e2ff3
AL
1437 if (!addr)
1438 return -ENOMEM;
e37e2ff3
AL
1439
1440 ret = eth_prepare_mac_addr_change(dev, addr);
f2f2c8b4 1441 if (ret)
e37e2ff3 1442 goto out;
9c46f6d4 1443
7e58d5ae
AK
1444 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
1445 sg_init_one(&sg, addr->sa_data, dev->addr_len);
1446 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
d24bae32 1447 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
7e58d5ae
AK
1448 dev_warn(&vdev->dev,
1449 "Failed to set mac address by vq command.\n");
e37e2ff3
AL
1450 ret = -EINVAL;
1451 goto out;
7e58d5ae 1452 }
7e93a02f
MT
1453 } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
1454 !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
855e0c52
RR
1455 unsigned int i;
1456
1457 /* Naturally, this has an atomicity problem. */
1458 for (i = 0; i < dev->addr_len; i++)
1459 virtio_cwrite8(vdev,
1460 offsetof(struct virtio_net_config, mac) +
1461 i, addr->sa_data[i]);
7e58d5ae
AK
1462 }
1463
1464 eth_commit_mac_addr_change(dev, p);
e37e2ff3 1465 ret = 0;
9c46f6d4 1466
e37e2ff3
AL
1467out:
1468 kfree(addr);
1469 return ret;
9c46f6d4
AW
1470}
1471
bc1f4470 1472static void virtnet_stats(struct net_device *dev,
1473 struct rtnl_link_stats64 *tot)
3fa2a1df 1474{
1475 struct virtnet_info *vi = netdev_priv(dev);
1476 int cpu;
1477 unsigned int start;
1478
1479 for_each_possible_cpu(cpu) {
58472a76 1480 struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
3fa2a1df 1481 u64 tpackets, tbytes, rpackets, rbytes;
1482
1483 do {
57a7744e 1484 start = u64_stats_fetch_begin_irq(&stats->tx_syncp);
3fa2a1df 1485 tpackets = stats->tx_packets;
1486 tbytes = stats->tx_bytes;
57a7744e 1487 } while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start));
83a27052
ED
1488
1489 do {
57a7744e 1490 start = u64_stats_fetch_begin_irq(&stats->rx_syncp);
3fa2a1df 1491 rpackets = stats->rx_packets;
1492 rbytes = stats->rx_bytes;
57a7744e 1493 } while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start));
3fa2a1df 1494
1495 tot->rx_packets += rpackets;
1496 tot->tx_packets += tpackets;
1497 tot->rx_bytes += rbytes;
1498 tot->tx_bytes += tbytes;
1499 }
1500
1501 tot->tx_dropped = dev->stats.tx_dropped;
021ac8d3 1502 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
3fa2a1df 1503 tot->rx_dropped = dev->stats.rx_dropped;
1504 tot->rx_length_errors = dev->stats.rx_length_errors;
1505 tot->rx_frame_errors = dev->stats.rx_frame_errors;
3fa2a1df 1506}
1507
da74e89d
AS
1508#ifdef CONFIG_NET_POLL_CONTROLLER
1509static void virtnet_netpoll(struct net_device *dev)
1510{
1511 struct virtnet_info *vi = netdev_priv(dev);
986a4f4d 1512 int i;
da74e89d 1513
986a4f4d
JW
1514 for (i = 0; i < vi->curr_queue_pairs; i++)
1515 napi_schedule(&vi->rq[i].napi);
da74e89d
AS
1516}
1517#endif
1518
586d17c5
JW
1519static void virtnet_ack_link_announce(struct virtnet_info *vi)
1520{
1521 rtnl_lock();
1522 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
d24bae32 1523 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
586d17c5
JW
1524 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
1525 rtnl_unlock();
1526}
1527
47315329 1528static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
986a4f4d
JW
1529{
1530 struct scatterlist sg;
986a4f4d
JW
1531 struct net_device *dev = vi->dev;
1532
1533 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
1534 return 0;
1535
603343b9
MT
1536 vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
1537 sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
986a4f4d
JW
1538
1539 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
d24bae32 1540 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
986a4f4d
JW
1541 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
1542 queue_pairs);
1543 return -EINVAL;
55257d72 1544 } else {
986a4f4d 1545 vi->curr_queue_pairs = queue_pairs;
35ed159b
JW
1546 /* virtnet_open() will refill when device is going to up. */
1547 if (dev->flags & IFF_UP)
1548 schedule_delayed_work(&vi->refill, 0);
55257d72 1549 }
986a4f4d
JW
1550
1551 return 0;
1552}
1553
47315329
JF
1554static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1555{
1556 int err;
1557
1558 rtnl_lock();
1559 err = _virtnet_set_queues(vi, queue_pairs);
1560 rtnl_unlock();
1561 return err;
1562}
1563
296f96fc
RR
1564static int virtnet_close(struct net_device *dev)
1565{
1566 struct virtnet_info *vi = netdev_priv(dev);
986a4f4d 1567 int i;
296f96fc 1568
b2baed69
RR
1569 /* Make sure refill_work doesn't re-enable napi! */
1570 cancel_delayed_work_sync(&vi->refill);
986a4f4d 1571
b92f1e67 1572 for (i = 0; i < vi->max_queue_pairs; i++) {
986a4f4d 1573 napi_disable(&vi->rq[i].napi);
78a57b48 1574 virtnet_napi_tx_disable(&vi->sq[i].napi);
b92f1e67 1575 }
296f96fc 1576
296f96fc
RR
1577 return 0;
1578}
1579
2af7698e
AW
1580static void virtnet_set_rx_mode(struct net_device *dev)
1581{
1582 struct virtnet_info *vi = netdev_priv(dev);
f565a7c2 1583 struct scatterlist sg[2];
f565a7c2 1584 struct virtio_net_ctrl_mac *mac_data;
ccffad25 1585 struct netdev_hw_addr *ha;
32e7bfc4 1586 int uc_count;
4cd24eaf 1587 int mc_count;
f565a7c2
AW
1588 void *buf;
1589 int i;
2af7698e 1590
788a8b6d 1591 /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
2af7698e
AW
1592 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
1593 return;
1594
603343b9
MT
1595 vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
1596 vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
2af7698e 1597
603343b9 1598 sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
2af7698e
AW
1599
1600 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
d24bae32 1601 VIRTIO_NET_CTRL_RX_PROMISC, sg))
2af7698e 1602 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
603343b9 1603 vi->ctrl->promisc ? "en" : "dis");
2af7698e 1604
603343b9 1605 sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
2af7698e
AW
1606
1607 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
d24bae32 1608 VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
2af7698e 1609 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
603343b9 1610 vi->ctrl->allmulti ? "en" : "dis");
f565a7c2 1611
32e7bfc4 1612 uc_count = netdev_uc_count(dev);
4cd24eaf 1613 mc_count = netdev_mc_count(dev);
f565a7c2 1614 /* MAC filter - use one buffer for both lists */
4cd24eaf
JP
1615 buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
1616 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
1617 mac_data = buf;
e68ed8f0 1618 if (!buf)
f565a7c2 1619 return;
f565a7c2 1620
23e258e1
AW
1621 sg_init_table(sg, 2);
1622
f565a7c2 1623 /* Store the unicast list and count in the front of the buffer */
fdd819b2 1624 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
ccffad25 1625 i = 0;
32e7bfc4 1626 netdev_for_each_uc_addr(ha, dev)
ccffad25 1627 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
f565a7c2
AW
1628
1629 sg_set_buf(&sg[0], mac_data,
32e7bfc4 1630 sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
f565a7c2
AW
1631
1632 /* multicast list and count fill the end */
32e7bfc4 1633 mac_data = (void *)&mac_data->macs[uc_count][0];
f565a7c2 1634
fdd819b2 1635 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
567ec874 1636 i = 0;
22bedad3
JP
1637 netdev_for_each_mc_addr(ha, dev)
1638 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
f565a7c2
AW
1639
1640 sg_set_buf(&sg[1], mac_data,
4cd24eaf 1641 sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
f565a7c2
AW
1642
1643 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
d24bae32 1644 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
99e872ae 1645 dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
f565a7c2
AW
1646
1647 kfree(buf);
2af7698e
AW
1648}
1649
80d5c368
PM
1650static int virtnet_vlan_rx_add_vid(struct net_device *dev,
1651 __be16 proto, u16 vid)
0bde9569
AW
1652{
1653 struct virtnet_info *vi = netdev_priv(dev);
1654 struct scatterlist sg;
1655
c285a7d5 1656 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
603343b9 1657 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
0bde9569
AW
1658
1659 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
d24bae32 1660 VIRTIO_NET_CTRL_VLAN_ADD, &sg))
0bde9569 1661 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
8e586137 1662 return 0;
0bde9569
AW
1663}
1664
80d5c368
PM
1665static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
1666 __be16 proto, u16 vid)
0bde9569
AW
1667{
1668 struct virtnet_info *vi = netdev_priv(dev);
1669 struct scatterlist sg;
1670
c285a7d5 1671 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
603343b9 1672 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
0bde9569
AW
1673
1674 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
d24bae32 1675 VIRTIO_NET_CTRL_VLAN_DEL, &sg))
0bde9569 1676 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
8e586137 1677 return 0;
0bde9569
AW
1678}
1679
8898c21c 1680static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
986a4f4d
JW
1681{
1682 int i;
1683
8898c21c
WG
1684 if (vi->affinity_hint_set) {
1685 for (i = 0; i < vi->max_queue_pairs; i++) {
47be2479
WG
1686 virtqueue_set_affinity(vi->rq[i].vq, -1);
1687 virtqueue_set_affinity(vi->sq[i].vq, -1);
1688 }
1689
8898c21c
WG
1690 vi->affinity_hint_set = false;
1691 }
8898c21c 1692}
47be2479 1693
8898c21c
WG
1694static void virtnet_set_affinity(struct virtnet_info *vi)
1695{
1696 int i;
1697 int cpu;
986a4f4d
JW
1698
1699 /* In multiqueue mode, when the number of cpu is equal to the number of
1700 * queue pairs, we let the queue pairs to be private to one cpu by
1701 * setting the affinity hint to eliminate the contention.
1702 */
8898c21c
WG
1703 if (vi->curr_queue_pairs == 1 ||
1704 vi->max_queue_pairs != num_online_cpus()) {
1705 virtnet_clean_affinity(vi, -1);
1706 return;
986a4f4d
JW
1707 }
1708
8898c21c
WG
1709 i = 0;
1710 for_each_online_cpu(cpu) {
986a4f4d
JW
1711 virtqueue_set_affinity(vi->rq[i].vq, cpu);
1712 virtqueue_set_affinity(vi->sq[i].vq, cpu);
9bb8ca86 1713 netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
8898c21c 1714 i++;
986a4f4d
JW
1715 }
1716
8898c21c 1717 vi->affinity_hint_set = true;
986a4f4d
JW
1718}
1719
8017c279 1720static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
8de4b2f3 1721{
8017c279
SAS
1722 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
1723 node);
1724 virtnet_set_affinity(vi);
1725 return 0;
1726}
8de4b2f3 1727
8017c279
SAS
1728static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
1729{
1730 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
1731 node_dead);
1732 virtnet_set_affinity(vi);
1733 return 0;
1734}
3ab098df 1735
8017c279
SAS
1736static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
1737{
1738 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
1739 node);
1740
1741 virtnet_clean_affinity(vi, cpu);
1742 return 0;
1743}
1744
1745static enum cpuhp_state virtionet_online;
1746
1747static int virtnet_cpu_notif_add(struct virtnet_info *vi)
1748{
1749 int ret;
1750
1751 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
1752 if (ret)
1753 return ret;
1754 ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
1755 &vi->node_dead);
1756 if (!ret)
1757 return ret;
1758 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
1759 return ret;
1760}
1761
1762static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
1763{
1764 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
1765 cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
1766 &vi->node_dead);
986a4f4d
JW
1767}
1768
8f9f4668
RJ
1769static void virtnet_get_ringparam(struct net_device *dev,
1770 struct ethtool_ringparam *ring)
1771{
1772 struct virtnet_info *vi = netdev_priv(dev);
1773
986a4f4d
JW
1774 ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
1775 ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
8f9f4668
RJ
1776 ring->rx_pending = ring->rx_max_pending;
1777 ring->tx_pending = ring->tx_max_pending;
8f9f4668
RJ
1778}
1779
66846048
RJ
1780
1781static void virtnet_get_drvinfo(struct net_device *dev,
1782 struct ethtool_drvinfo *info)
1783{
1784 struct virtnet_info *vi = netdev_priv(dev);
1785 struct virtio_device *vdev = vi->vdev;
1786
1787 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1788 strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
1789 strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
1790
1791}
1792
d73bcd2c
JW
1793/* TODO: Eliminate OOO packets during switching */
1794static int virtnet_set_channels(struct net_device *dev,
1795 struct ethtool_channels *channels)
1796{
1797 struct virtnet_info *vi = netdev_priv(dev);
1798 u16 queue_pairs = channels->combined_count;
1799 int err;
1800
1801 /* We don't support separate rx/tx channels.
1802 * We don't allow setting 'other' channels.
1803 */
1804 if (channels->rx_count || channels->tx_count || channels->other_count)
1805 return -EINVAL;
1806
c18e9cd6 1807 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
d73bcd2c
JW
1808 return -EINVAL;
1809
f600b690
JF
1810 /* For now we don't support modifying channels while XDP is loaded
1811 * also when XDP is loaded all RX queues have XDP programs so we only
1812 * need to check a single RX queue.
1813 */
1814 if (vi->rq[0].xdp_prog)
1815 return -EINVAL;
1816
47be2479 1817 get_online_cpus();
47315329 1818 err = _virtnet_set_queues(vi, queue_pairs);
d73bcd2c
JW
1819 if (!err) {
1820 netif_set_real_num_tx_queues(dev, queue_pairs);
1821 netif_set_real_num_rx_queues(dev, queue_pairs);
1822
8898c21c 1823 virtnet_set_affinity(vi);
d73bcd2c 1824 }
47be2479 1825 put_online_cpus();
d73bcd2c
JW
1826
1827 return err;
1828}
1829
1830static void virtnet_get_channels(struct net_device *dev,
1831 struct ethtool_channels *channels)
1832{
1833 struct virtnet_info *vi = netdev_priv(dev);
1834
1835 channels->combined_count = vi->curr_queue_pairs;
1836 channels->max_combined = vi->max_queue_pairs;
1837 channels->max_other = 0;
1838 channels->rx_count = 0;
1839 channels->tx_count = 0;
1840 channels->other_count = 0;
1841}
1842
16032be5 1843/* Check if the user is trying to change anything besides speed/duplex */
ebb6b4b1
PR
1844static bool
1845virtnet_validate_ethtool_cmd(const struct ethtool_link_ksettings *cmd)
16032be5 1846{
ebb6b4b1
PR
1847 struct ethtool_link_ksettings diff1 = *cmd;
1848 struct ethtool_link_ksettings diff2 = {};
16032be5 1849
0cf3ace9
NA
1850 /* cmd is always set so we need to clear it, validate the port type
1851 * and also without autonegotiation we can ignore advertising
1852 */
ebb6b4b1
PR
1853 diff1.base.speed = 0;
1854 diff2.base.port = PORT_OTHER;
1855 ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
1856 diff1.base.duplex = 0;
1857 diff1.base.cmd = 0;
1858 diff1.base.link_mode_masks_nwords = 0;
1859
1860 return !memcmp(&diff1.base, &diff2.base, sizeof(diff1.base)) &&
1861 bitmap_empty(diff1.link_modes.supported,
1862 __ETHTOOL_LINK_MODE_MASK_NBITS) &&
1863 bitmap_empty(diff1.link_modes.advertising,
1864 __ETHTOOL_LINK_MODE_MASK_NBITS) &&
1865 bitmap_empty(diff1.link_modes.lp_advertising,
1866 __ETHTOOL_LINK_MODE_MASK_NBITS);
16032be5
NA
1867}
1868
ebb6b4b1
PR
1869static int virtnet_set_link_ksettings(struct net_device *dev,
1870 const struct ethtool_link_ksettings *cmd)
16032be5
NA
1871{
1872 struct virtnet_info *vi = netdev_priv(dev);
1873 u32 speed;
1874
ebb6b4b1 1875 speed = cmd->base.speed;
16032be5
NA
1876 /* don't allow custom speed and duplex */
1877 if (!ethtool_validate_speed(speed) ||
ebb6b4b1 1878 !ethtool_validate_duplex(cmd->base.duplex) ||
16032be5
NA
1879 !virtnet_validate_ethtool_cmd(cmd))
1880 return -EINVAL;
1881 vi->speed = speed;
ebb6b4b1 1882 vi->duplex = cmd->base.duplex;
16032be5
NA
1883
1884 return 0;
1885}
1886
ebb6b4b1
PR
1887static int virtnet_get_link_ksettings(struct net_device *dev,
1888 struct ethtool_link_ksettings *cmd)
16032be5
NA
1889{
1890 struct virtnet_info *vi = netdev_priv(dev);
1891
ebb6b4b1
PR
1892 cmd->base.speed = vi->speed;
1893 cmd->base.duplex = vi->duplex;
1894 cmd->base.port = PORT_OTHER;
16032be5
NA
1895
1896 return 0;
1897}
1898
1899static void virtnet_init_settings(struct net_device *dev)
1900{
1901 struct virtnet_info *vi = netdev_priv(dev);
1902
1903 vi->speed = SPEED_UNKNOWN;
1904 vi->duplex = DUPLEX_UNKNOWN;
1905}
1906
0fc0b732 1907static const struct ethtool_ops virtnet_ethtool_ops = {
66846048 1908 .get_drvinfo = virtnet_get_drvinfo,
9f4d26d0 1909 .get_link = ethtool_op_get_link,
8f9f4668 1910 .get_ringparam = virtnet_get_ringparam,
d73bcd2c
JW
1911 .set_channels = virtnet_set_channels,
1912 .get_channels = virtnet_get_channels,
074c3582 1913 .get_ts_info = ethtool_op_get_ts_info,
ebb6b4b1
PR
1914 .get_link_ksettings = virtnet_get_link_ksettings,
1915 .set_link_ksettings = virtnet_set_link_ksettings,
a9ea3fc6
HX
1916};
1917
9fe7bfce
JF
1918static void virtnet_freeze_down(struct virtio_device *vdev)
1919{
1920 struct virtnet_info *vi = vdev->priv;
1921 int i;
1922
1923 /* Make sure no work handler is accessing the device */
1924 flush_work(&vi->config_work);
1925
1926 netif_device_detach(vi->dev);
713a98d9 1927 netif_tx_disable(vi->dev);
9fe7bfce
JF
1928 cancel_delayed_work_sync(&vi->refill);
1929
1930 if (netif_running(vi->dev)) {
b92f1e67 1931 for (i = 0; i < vi->max_queue_pairs; i++) {
9fe7bfce 1932 napi_disable(&vi->rq[i].napi);
78a57b48 1933 virtnet_napi_tx_disable(&vi->sq[i].napi);
b92f1e67 1934 }
9fe7bfce
JF
1935 }
1936}
1937
1938static int init_vqs(struct virtnet_info *vi);
1939
1940static int virtnet_restore_up(struct virtio_device *vdev)
1941{
1942 struct virtnet_info *vi = vdev->priv;
1943 int err, i;
1944
1945 err = init_vqs(vi);
1946 if (err)
1947 return err;
1948
1949 virtio_device_ready(vdev);
1950
1951 if (netif_running(vi->dev)) {
1952 for (i = 0; i < vi->curr_queue_pairs; i++)
1953 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
1954 schedule_delayed_work(&vi->refill, 0);
1955
b92f1e67 1956 for (i = 0; i < vi->max_queue_pairs; i++) {
e4e8452a 1957 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
b92f1e67
WB
1958 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
1959 &vi->sq[i].napi);
1960 }
9fe7bfce
JF
1961 }
1962
1963 netif_device_attach(vi->dev);
1964 return err;
1965}
1966
3f93522f
JW
1967static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
1968{
1969 struct scatterlist sg;
603343b9 1970 vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
3f93522f 1971
603343b9 1972 sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
3f93522f
JW
1973
1974 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
1975 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
1976 dev_warn(&vi->dev->dev, "Fail to set guest offload. \n");
1977 return -EINVAL;
1978 }
1979
1980 return 0;
1981}
1982
1983static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
1984{
1985 u64 offloads = 0;
1986
1987 if (!vi->guest_offloads)
1988 return 0;
1989
1990 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))
1991 offloads = 1ULL << VIRTIO_NET_F_GUEST_CSUM;
1992
1993 return virtnet_set_guest_offloads(vi, offloads);
1994}
1995
1996static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
1997{
1998 u64 offloads = vi->guest_offloads;
1999
2000 if (!vi->guest_offloads)
2001 return 0;
2002 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))
2003 offloads |= 1ULL << VIRTIO_NET_F_GUEST_CSUM;
2004
2005 return virtnet_set_guest_offloads(vi, offloads);
2006}
2007
9861ce03
JK
2008static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2009 struct netlink_ext_ack *extack)
f600b690
JF
2010{
2011 unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
2012 struct virtnet_info *vi = netdev_priv(dev);
2013 struct bpf_prog *old_prog;
017b29c3 2014 u16 xdp_qp = 0, curr_qp;
672aafd5 2015 int i, err;
f600b690 2016
3f93522f
JW
2017 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
2018 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
2019 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
2020 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
2021 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO))) {
4d463c4d 2022 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first");
f600b690
JF
2023 return -EOPNOTSUPP;
2024 }
2025
2026 if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
4d463c4d 2027 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
f600b690
JF
2028 return -EINVAL;
2029 }
2030
2031 if (dev->mtu > max_sz) {
4d463c4d 2032 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
f600b690
JF
2033 netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
2034 return -EINVAL;
2035 }
2036
672aafd5
JF
2037 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
2038 if (prog)
2039 xdp_qp = nr_cpu_ids;
2040
2041 /* XDP requires extra queues for XDP_TX */
2042 if (curr_qp + xdp_qp > vi->max_queue_pairs) {
4d463c4d 2043 NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available");
672aafd5
JF
2044 netdev_warn(dev, "request %i queues but max is %i\n",
2045 curr_qp + xdp_qp, vi->max_queue_pairs);
2046 return -ENOMEM;
2047 }
2048
2de2f7f4
JF
2049 if (prog) {
2050 prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
2051 if (IS_ERR(prog))
2052 return PTR_ERR(prog);
2053 }
2054
4941d472 2055 /* Make sure NAPI is not using any XDP TX queues for RX. */
e527a479
JW
2056 if (netif_running(dev))
2057 for (i = 0; i < vi->max_queue_pairs; i++)
2058 napi_disable(&vi->rq[i].napi);
f600b690 2059
672aafd5 2060 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
4941d472
JW
2061 err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
2062 if (err)
2063 goto err;
2064 vi->xdp_queue_pairs = xdp_qp;
672aafd5 2065
f600b690
JF
2066 for (i = 0; i < vi->max_queue_pairs; i++) {
2067 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
2068 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
3f93522f
JW
2069 if (i == 0) {
2070 if (!old_prog)
2071 virtnet_clear_guest_offloads(vi);
2072 if (!prog)
2073 virtnet_restore_guest_offloads(vi);
2074 }
f600b690
JF
2075 if (old_prog)
2076 bpf_prog_put(old_prog);
e527a479
JW
2077 if (netif_running(dev))
2078 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
f600b690
JF
2079 }
2080
2081 return 0;
2de2f7f4 2082
4941d472
JW
2083err:
2084 for (i = 0; i < vi->max_queue_pairs; i++)
2085 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2de2f7f4
JF
2086 if (prog)
2087 bpf_prog_sub(prog, vi->max_queue_pairs - 1);
2088 return err;
f600b690
JF
2089}
2090
5b0e6629 2091static u32 virtnet_xdp_query(struct net_device *dev)
f600b690
JF
2092{
2093 struct virtnet_info *vi = netdev_priv(dev);
5b0e6629 2094 const struct bpf_prog *xdp_prog;
f600b690
JF
2095 int i;
2096
2097 for (i = 0; i < vi->max_queue_pairs; i++) {
5b0e6629
MKL
2098 xdp_prog = rtnl_dereference(vi->rq[i].xdp_prog);
2099 if (xdp_prog)
2100 return xdp_prog->aux->id;
f600b690 2101 }
5b0e6629 2102 return 0;
f600b690
JF
2103}
2104
f4e63525 2105static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
f600b690
JF
2106{
2107 switch (xdp->command) {
2108 case XDP_SETUP_PROG:
9861ce03 2109 return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
f600b690 2110 case XDP_QUERY_PROG:
5b0e6629
MKL
2111 xdp->prog_id = virtnet_xdp_query(dev);
2112 xdp->prog_attached = !!xdp->prog_id;
f600b690
JF
2113 return 0;
2114 default:
2115 return -EINVAL;
2116 }
2117}
2118
76288b4e
SH
2119static const struct net_device_ops virtnet_netdev = {
2120 .ndo_open = virtnet_open,
2121 .ndo_stop = virtnet_close,
2122 .ndo_start_xmit = start_xmit,
2123 .ndo_validate_addr = eth_validate_addr,
9c46f6d4 2124 .ndo_set_mac_address = virtnet_set_mac_address,
2af7698e 2125 .ndo_set_rx_mode = virtnet_set_rx_mode,
3fa2a1df 2126 .ndo_get_stats64 = virtnet_stats,
1824a989
AW
2127 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
2128 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
76288b4e
SH
2129#ifdef CONFIG_NET_POLL_CONTROLLER
2130 .ndo_poll_controller = virtnet_netpoll,
91815639 2131#endif
f4e63525 2132 .ndo_bpf = virtnet_xdp,
186b3c99
JW
2133 .ndo_xdp_xmit = virtnet_xdp_xmit,
2134 .ndo_xdp_flush = virtnet_xdp_flush,
2836b4f2 2135 .ndo_features_check = passthru_features_check,
76288b4e
SH
2136};
2137
586d17c5 2138static void virtnet_config_changed_work(struct work_struct *work)
9f4d26d0 2139{
586d17c5
JW
2140 struct virtnet_info *vi =
2141 container_of(work, struct virtnet_info, config_work);
9f4d26d0
MM
2142 u16 v;
2143
855e0c52
RR
2144 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
2145 struct virtio_net_config, status, &v) < 0)
507613bf 2146 return;
586d17c5
JW
2147
2148 if (v & VIRTIO_NET_S_ANNOUNCE) {
ee89bab1 2149 netdev_notify_peers(vi->dev);
586d17c5
JW
2150 virtnet_ack_link_announce(vi);
2151 }
9f4d26d0
MM
2152
2153 /* Ignore unknown (future) status bits */
2154 v &= VIRTIO_NET_S_LINK_UP;
2155
2156 if (vi->status == v)
507613bf 2157 return;
9f4d26d0
MM
2158
2159 vi->status = v;
2160
2161 if (vi->status & VIRTIO_NET_S_LINK_UP) {
2162 netif_carrier_on(vi->dev);
986a4f4d 2163 netif_tx_wake_all_queues(vi->dev);
9f4d26d0
MM
2164 } else {
2165 netif_carrier_off(vi->dev);
986a4f4d 2166 netif_tx_stop_all_queues(vi->dev);
9f4d26d0
MM
2167 }
2168}
2169
2170static void virtnet_config_changed(struct virtio_device *vdev)
2171{
2172 struct virtnet_info *vi = vdev->priv;
2173
3b07e9ca 2174 schedule_work(&vi->config_work);
9f4d26d0
MM
2175}
2176
986a4f4d
JW
2177static void virtnet_free_queues(struct virtnet_info *vi)
2178{
d4fb84ee
AV
2179 int i;
2180
ab3971b1
JW
2181 for (i = 0; i < vi->max_queue_pairs; i++) {
2182 napi_hash_del(&vi->rq[i].napi);
d4fb84ee 2183 netif_napi_del(&vi->rq[i].napi);
b92f1e67 2184 netif_napi_del(&vi->sq[i].napi);
ab3971b1 2185 }
d4fb84ee 2186
963abe5c
ED
2187 /* We called napi_hash_del() before netif_napi_del(),
2188 * we need to respect an RCU grace period before freeing vi->rq
2189 */
2190 synchronize_net();
2191
986a4f4d
JW
2192 kfree(vi->rq);
2193 kfree(vi->sq);
603343b9 2194 kfree(vi->ctrl);
986a4f4d
JW
2195}
2196
47315329 2197static void _free_receive_bufs(struct virtnet_info *vi)
986a4f4d 2198{
f600b690 2199 struct bpf_prog *old_prog;
986a4f4d
JW
2200 int i;
2201
2202 for (i = 0; i < vi->max_queue_pairs; i++) {
2203 while (vi->rq[i].pages)
2204 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
f600b690
JF
2205
2206 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
2207 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
2208 if (old_prog)
2209 bpf_prog_put(old_prog);
986a4f4d 2210 }
47315329
JF
2211}
2212
2213static void free_receive_bufs(struct virtnet_info *vi)
2214{
2215 rtnl_lock();
2216 _free_receive_bufs(vi);
f600b690 2217 rtnl_unlock();
986a4f4d
JW
2218}
2219
fb51879d
MD
2220static void free_receive_page_frags(struct virtnet_info *vi)
2221{
2222 int i;
2223 for (i = 0; i < vi->max_queue_pairs; i++)
2224 if (vi->rq[i].alloc_frag.page)
2225 put_page(vi->rq[i].alloc_frag.page);
2226}
2227
b68df015 2228static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
56434a01
JF
2229{
2230 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
2231 return false;
2232 else if (q < vi->curr_queue_pairs)
2233 return true;
2234 else
2235 return false;
2236}
2237
986a4f4d
JW
2238static void free_unused_bufs(struct virtnet_info *vi)
2239{
2240 void *buf;
2241 int i;
2242
2243 for (i = 0; i < vi->max_queue_pairs; i++) {
2244 struct virtqueue *vq = vi->sq[i].vq;
56434a01 2245 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
b68df015 2246 if (!is_xdp_raw_buffer_queue(vi, i))
56434a01
JF
2247 dev_kfree_skb(buf);
2248 else
2249 put_page(virt_to_head_page(buf));
2250 }
986a4f4d
JW
2251 }
2252
2253 for (i = 0; i < vi->max_queue_pairs; i++) {
2254 struct virtqueue *vq = vi->rq[i].vq;
2255
2256 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
ab7db917 2257 if (vi->mergeable_rx_bufs) {
680557cf 2258 put_page(virt_to_head_page(buf));
ab7db917 2259 } else if (vi->big_packets) {
fa9fac17 2260 give_pages(&vi->rq[i], buf);
ab7db917 2261 } else {
f6b10209 2262 put_page(virt_to_head_page(buf));
ab7db917 2263 }
986a4f4d 2264 }
986a4f4d
JW
2265 }
2266}
2267
e9d7417b
JW
2268static void virtnet_del_vqs(struct virtnet_info *vi)
2269{
2270 struct virtio_device *vdev = vi->vdev;
2271
8898c21c 2272 virtnet_clean_affinity(vi, -1);
986a4f4d 2273
e9d7417b 2274 vdev->config->del_vqs(vdev);
986a4f4d
JW
2275
2276 virtnet_free_queues(vi);
e9d7417b
JW
2277}
2278
d85b758f
MT
2279/* How large should a single buffer be so a queue full of these can fit at
2280 * least one full packet?
2281 * Logic below assumes the mergeable buffer header is used.
2282 */
2283static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
2284{
2285 const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2286 unsigned int rq_size = virtqueue_get_vring_size(vq);
2287 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
2288 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
2289 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
2290
f0c3192c
MT
2291 return max(max(min_buf_len, hdr_len) - hdr_len,
2292 (unsigned int)GOOD_PACKET_LEN);
d85b758f
MT
2293}
2294
986a4f4d 2295static int virtnet_find_vqs(struct virtnet_info *vi)
3f9c10b0 2296{
986a4f4d
JW
2297 vq_callback_t **callbacks;
2298 struct virtqueue **vqs;
2299 int ret = -ENOMEM;
2300 int i, total_vqs;
2301 const char **names;
d45b897b 2302 bool *ctx;
986a4f4d
JW
2303
2304 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
2305 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
2306 * possible control vq.
2307 */
2308 total_vqs = vi->max_queue_pairs * 2 +
2309 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
2310
2311 /* Allocate space for find_vqs parameters */
2312 vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL);
2313 if (!vqs)
2314 goto err_vq;
2315 callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL);
2316 if (!callbacks)
2317 goto err_callback;
2318 names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL);
2319 if (!names)
2320 goto err_names;
192f68cf 2321 if (!vi->big_packets || vi->mergeable_rx_bufs) {
d45b897b
MT
2322 ctx = kzalloc(total_vqs * sizeof(*ctx), GFP_KERNEL);
2323 if (!ctx)
2324 goto err_ctx;
2325 } else {
2326 ctx = NULL;
2327 }
986a4f4d
JW
2328
2329 /* Parameters for control virtqueue, if any */
2330 if (vi->has_cvq) {
2331 callbacks[total_vqs - 1] = NULL;
2332 names[total_vqs - 1] = "control";
2333 }
3f9c10b0 2334
986a4f4d
JW
2335 /* Allocate/initialize parameters for send/receive virtqueues */
2336 for (i = 0; i < vi->max_queue_pairs; i++) {
2337 callbacks[rxq2vq(i)] = skb_recv_done;
2338 callbacks[txq2vq(i)] = skb_xmit_done;
2339 sprintf(vi->rq[i].name, "input.%d", i);
2340 sprintf(vi->sq[i].name, "output.%d", i);
2341 names[rxq2vq(i)] = vi->rq[i].name;
2342 names[txq2vq(i)] = vi->sq[i].name;
d45b897b
MT
2343 if (ctx)
2344 ctx[rxq2vq(i)] = true;
986a4f4d 2345 }
3f9c10b0 2346
986a4f4d 2347 ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
d45b897b 2348 names, ctx, NULL);
986a4f4d
JW
2349 if (ret)
2350 goto err_find;
3f9c10b0 2351
986a4f4d
JW
2352 if (vi->has_cvq) {
2353 vi->cvq = vqs[total_vqs - 1];
3f9c10b0 2354 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
f646968f 2355 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3f9c10b0 2356 }
986a4f4d
JW
2357
2358 for (i = 0; i < vi->max_queue_pairs; i++) {
2359 vi->rq[i].vq = vqs[rxq2vq(i)];
d85b758f 2360 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
986a4f4d
JW
2361 vi->sq[i].vq = vqs[txq2vq(i)];
2362 }
2363
2364 kfree(names);
2365 kfree(callbacks);
2366 kfree(vqs);
55281621 2367 kfree(ctx);
986a4f4d 2368
3f9c10b0 2369 return 0;
986a4f4d
JW
2370
2371err_find:
d45b897b
MT
2372 kfree(ctx);
2373err_ctx:
986a4f4d
JW
2374 kfree(names);
2375err_names:
2376 kfree(callbacks);
2377err_callback:
2378 kfree(vqs);
2379err_vq:
2380 return ret;
2381}
2382
2383static int virtnet_alloc_queues(struct virtnet_info *vi)
2384{
2385 int i;
2386
603343b9
MT
2387 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
2388 if (!vi->ctrl)
2389 goto err_ctrl;
986a4f4d
JW
2390 vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
2391 if (!vi->sq)
2392 goto err_sq;
2393 vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL);
008d4278 2394 if (!vi->rq)
986a4f4d
JW
2395 goto err_rq;
2396
2397 INIT_DELAYED_WORK(&vi->refill, refill_work);
2398 for (i = 0; i < vi->max_queue_pairs; i++) {
2399 vi->rq[i].pages = NULL;
2400 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
2401 napi_weight);
1d11e732
WB
2402 netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
2403 napi_tx ? napi_weight : 0);
986a4f4d
JW
2404
2405 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
5377d758 2406 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
986a4f4d
JW
2407 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
2408 }
2409
2410 return 0;
2411
2412err_rq:
2413 kfree(vi->sq);
2414err_sq:
603343b9
MT
2415 kfree(vi->ctrl);
2416err_ctrl:
986a4f4d
JW
2417 return -ENOMEM;
2418}
2419
2420static int init_vqs(struct virtnet_info *vi)
2421{
2422 int ret;
2423
2424 /* Allocate send & receive queues */
2425 ret = virtnet_alloc_queues(vi);
2426 if (ret)
2427 goto err;
2428
2429 ret = virtnet_find_vqs(vi);
2430 if (ret)
2431 goto err_free;
2432
47be2479 2433 get_online_cpus();
8898c21c 2434 virtnet_set_affinity(vi);
47be2479
WG
2435 put_online_cpus();
2436
986a4f4d
JW
2437 return 0;
2438
2439err_free:
2440 virtnet_free_queues(vi);
2441err:
2442 return ret;
3f9c10b0
AS
2443}
2444
fbf28d78
MD
2445#ifdef CONFIG_SYSFS
2446static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
718ad681 2447 char *buf)
fbf28d78
MD
2448{
2449 struct virtnet_info *vi = netdev_priv(queue->dev);
2450 unsigned int queue_index = get_netdev_rx_queue_index(queue);
5377d758 2451 struct ewma_pkt_len *avg;
fbf28d78
MD
2452
2453 BUG_ON(queue_index >= vi->max_queue_pairs);
2454 avg = &vi->rq[queue_index].mrg_avg_pkt_len;
d85b758f
MT
2455 return sprintf(buf, "%u\n",
2456 get_mergeable_buf_len(&vi->rq[queue_index], avg));
fbf28d78
MD
2457}
2458
2459static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
2460 __ATTR_RO(mergeable_rx_buffer_size);
2461
2462static struct attribute *virtio_net_mrg_rx_attrs[] = {
2463 &mergeable_rx_buffer_size_attribute.attr,
2464 NULL
2465};
2466
2467static const struct attribute_group virtio_net_mrg_rx_group = {
2468 .name = "virtio_net",
2469 .attrs = virtio_net_mrg_rx_attrs
2470};
2471#endif
2472
892d6eb1
JW
2473static bool virtnet_fail_on_feature(struct virtio_device *vdev,
2474 unsigned int fbit,
2475 const char *fname, const char *dname)
2476{
2477 if (!virtio_has_feature(vdev, fbit))
2478 return false;
2479
2480 dev_err(&vdev->dev, "device advertises feature %s but not %s",
2481 fname, dname);
2482
2483 return true;
2484}
2485
2486#define VIRTNET_FAIL_ON(vdev, fbit, dbit) \
2487 virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
2488
2489static bool virtnet_validate_features(struct virtio_device *vdev)
2490{
2491 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
2492 (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
2493 "VIRTIO_NET_F_CTRL_VQ") ||
2494 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
2495 "VIRTIO_NET_F_CTRL_VQ") ||
2496 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
2497 "VIRTIO_NET_F_CTRL_VQ") ||
2498 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
2499 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
2500 "VIRTIO_NET_F_CTRL_VQ"))) {
2501 return false;
2502 }
2503
2504 return true;
2505}
2506
d0c2c997
JW
2507#define MIN_MTU ETH_MIN_MTU
2508#define MAX_MTU ETH_MAX_MTU
2509
fe36cbe0 2510static int virtnet_validate(struct virtio_device *vdev)
296f96fc 2511{
6ba42248
MT
2512 if (!vdev->config->get) {
2513 dev_err(&vdev->dev, "%s failure: config access disabled\n",
2514 __func__);
2515 return -EINVAL;
2516 }
2517
892d6eb1
JW
2518 if (!virtnet_validate_features(vdev))
2519 return -EINVAL;
2520
fe36cbe0
MT
2521 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
2522 int mtu = virtio_cread16(vdev,
2523 offsetof(struct virtio_net_config,
2524 mtu));
2525 if (mtu < MIN_MTU)
2526 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
2527 }
2528
2529 return 0;
2530}
2531
2532static int virtnet_probe(struct virtio_device *vdev)
2533{
2534 int i, err;
2535 struct net_device *dev;
2536 struct virtnet_info *vi;
2537 u16 max_queue_pairs;
2538 int mtu;
2539
986a4f4d 2540 /* Find if host supports multiqueue virtio_net device */
855e0c52
RR
2541 err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
2542 struct virtio_net_config,
2543 max_virtqueue_pairs, &max_queue_pairs);
986a4f4d
JW
2544
2545 /* We need at least 2 queue's */
2546 if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
2547 max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
2548 !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
2549 max_queue_pairs = 1;
296f96fc
RR
2550
2551 /* Allocate ourselves a network device with room for our info */
986a4f4d 2552 dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
296f96fc
RR
2553 if (!dev)
2554 return -ENOMEM;
2555
2556 /* Set up network device as normal. */
f2f2c8b4 2557 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
76288b4e 2558 dev->netdev_ops = &virtnet_netdev;
296f96fc 2559 dev->features = NETIF_F_HIGHDMA;
3fa2a1df 2560
7ad24ea4 2561 dev->ethtool_ops = &virtnet_ethtool_ops;
296f96fc
RR
2562 SET_NETDEV_DEV(dev, &vdev->dev);
2563
2564 /* Do we support "hardware" checksums? */
98e778c9 2565 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
296f96fc 2566 /* This opens up the world of extra features. */
48900cb6 2567 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
98e778c9 2568 if (csum)
48900cb6 2569 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
98e778c9
MM
2570
2571 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
e078de03 2572 dev->hw_features |= NETIF_F_TSO
34a48579
RR
2573 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
2574 }
5539ae96 2575 /* Individual feature bits: what can host handle? */
98e778c9
MM
2576 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
2577 dev->hw_features |= NETIF_F_TSO;
2578 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
2579 dev->hw_features |= NETIF_F_TSO6;
2580 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
2581 dev->hw_features |= NETIF_F_TSO_ECN;
98e778c9 2582
41f2f127
JW
2583 dev->features |= NETIF_F_GSO_ROBUST;
2584
98e778c9 2585 if (gso)
e078de03 2586 dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
98e778c9 2587 /* (!csum && gso) case will be fixed by register_netdev() */
296f96fc 2588 }
4f49129b
TH
2589 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
2590 dev->features |= NETIF_F_RXCSUM;
296f96fc 2591
4fda8302
JW
2592 dev->vlan_features = dev->features;
2593
d0c2c997
JW
2594 /* MTU range: 68 - 65535 */
2595 dev->min_mtu = MIN_MTU;
2596 dev->max_mtu = MAX_MTU;
2597
296f96fc 2598 /* Configuration may specify what MAC to use. Otherwise random. */
855e0c52
RR
2599 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
2600 virtio_cread_bytes(vdev,
2601 offsetof(struct virtio_net_config, mac),
2602 dev->dev_addr, dev->addr_len);
2603 else
f2cedb63 2604 eth_hw_addr_random(dev);
296f96fc
RR
2605
2606 /* Set up our device-specific information */
2607 vi = netdev_priv(dev);
296f96fc
RR
2608 vi->dev = dev;
2609 vi->vdev = vdev;
d9d5dcc8 2610 vdev->priv = vi;
3fa2a1df 2611 vi->stats = alloc_percpu(struct virtnet_stats);
2612 err = -ENOMEM;
2613 if (vi->stats == NULL)
2614 goto free;
2615
827da44c
JS
2616 for_each_possible_cpu(i) {
2617 struct virtnet_stats *virtnet_stats;
2618 virtnet_stats = per_cpu_ptr(vi->stats, i);
2619 u64_stats_init(&virtnet_stats->tx_syncp);
2620 u64_stats_init(&virtnet_stats->rx_syncp);
2621 }
2622
586d17c5 2623 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
296f96fc 2624
97402b96 2625 /* If we can receive ANY GSO packets, we must allocate large ones. */
8e95a202
JP
2626 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
2627 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
e3e3c423
VY
2628 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
2629 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
97402b96
HX
2630 vi->big_packets = true;
2631
3f2c31d9
MM
2632 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
2633 vi->mergeable_rx_bufs = true;
2634
d04302b3
MT
2635 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
2636 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
012873d0
MT
2637 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2638 else
2639 vi->hdr_len = sizeof(struct virtio_net_hdr);
2640
75993300
MT
2641 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
2642 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
e7428e95
MT
2643 vi->any_header_sg = true;
2644
986a4f4d
JW
2645 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
2646 vi->has_cvq = true;
2647
14de9d11
AC
2648 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
2649 mtu = virtio_cread16(vdev,
2650 offsetof(struct virtio_net_config,
2651 mtu));
93a205ee 2652 if (mtu < dev->min_mtu) {
fe36cbe0
MT
2653 /* Should never trigger: MTU was previously validated
2654 * in virtnet_validate.
2655 */
2656 dev_err(&vdev->dev, "device MTU appears to have changed "
2657 "it is now %d < %d", mtu, dev->min_mtu);
2658 goto free_stats;
93a205ee 2659 }
2e123b44 2660
fe36cbe0
MT
2661 dev->mtu = mtu;
2662 dev->max_mtu = mtu;
2663
2e123b44
MT
2664 /* TODO: size buffers correctly in this case. */
2665 if (dev->mtu > ETH_DATA_LEN)
2666 vi->big_packets = true;
14de9d11
AC
2667 }
2668
012873d0
MT
2669 if (vi->any_header_sg)
2670 dev->needed_headroom = vi->hdr_len;
6ebbc1a6 2671
44900010
JW
2672 /* Enable multiqueue by default */
2673 if (num_online_cpus() >= max_queue_pairs)
2674 vi->curr_queue_pairs = max_queue_pairs;
2675 else
2676 vi->curr_queue_pairs = num_online_cpus();
986a4f4d
JW
2677 vi->max_queue_pairs = max_queue_pairs;
2678
2679 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
3f9c10b0 2680 err = init_vqs(vi);
d2a7ddda 2681 if (err)
9bb8ca86 2682 goto free_stats;
296f96fc 2683
fbf28d78
MD
2684#ifdef CONFIG_SYSFS
2685 if (vi->mergeable_rx_bufs)
2686 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
2687#endif
0f13b66b
ZYW
2688 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
2689 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
986a4f4d 2690
16032be5
NA
2691 virtnet_init_settings(dev);
2692
296f96fc
RR
2693 err = register_netdev(dev);
2694 if (err) {
2695 pr_debug("virtio_net: registering device failed\n");
d2a7ddda 2696 goto free_vqs;
296f96fc 2697 }
b3369c1f 2698
4baf1e33
MT
2699 virtio_device_ready(vdev);
2700
8017c279 2701 err = virtnet_cpu_notif_add(vi);
8de4b2f3
WG
2702 if (err) {
2703 pr_debug("virtio_net: registering cpu notifier failed\n");
f00e35e2 2704 goto free_unregister_netdev;
8de4b2f3
WG
2705 }
2706
a220871b 2707 virtnet_set_queues(vi, vi->curr_queue_pairs);
44900010 2708
167c25e4
JW
2709 /* Assume link up if device can't report link status,
2710 otherwise get link status from config. */
5bf6a429 2711 netif_carrier_off(dev);
167c25e4 2712 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
3b07e9ca 2713 schedule_work(&vi->config_work);
167c25e4
JW
2714 } else {
2715 vi->status = VIRTIO_NET_S_LINK_UP;
2716 netif_carrier_on(dev);
2717 }
9f4d26d0 2718
3f93522f
JW
2719 for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
2720 if (virtio_has_feature(vi->vdev, guest_offloads[i]))
2721 set_bit(guest_offloads[i], &vi->guest_offloads);
2722
986a4f4d
JW
2723 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
2724 dev->name, max_queue_pairs);
2725
296f96fc
RR
2726 return 0;
2727
f00e35e2 2728free_unregister_netdev:
02465555
MT
2729 vi->vdev->config->reset(vdev);
2730
b3369c1f 2731 unregister_netdev(dev);
d2a7ddda 2732free_vqs:
986a4f4d 2733 cancel_delayed_work_sync(&vi->refill);
fb51879d 2734 free_receive_page_frags(vi);
e9d7417b 2735 virtnet_del_vqs(vi);
3fa2a1df 2736free_stats:
2737 free_percpu(vi->stats);
296f96fc
RR
2738free:
2739 free_netdev(dev);
2740 return err;
2741}
2742
04486ed0 2743static void remove_vq_common(struct virtnet_info *vi)
296f96fc 2744{
04486ed0 2745 vi->vdev->config->reset(vi->vdev);
830a8a97
SM
2746
2747 /* Free unused buffers in both send and recv, if any. */
9ab86bbc 2748 free_unused_bufs(vi);
fb6813f4 2749
986a4f4d 2750 free_receive_bufs(vi);
d2a7ddda 2751
fb51879d
MD
2752 free_receive_page_frags(vi);
2753
986a4f4d 2754 virtnet_del_vqs(vi);
04486ed0
AS
2755}
2756
8cc085d6 2757static void virtnet_remove(struct virtio_device *vdev)
04486ed0
AS
2758{
2759 struct virtnet_info *vi = vdev->priv;
2760
8017c279 2761 virtnet_cpu_notif_remove(vi);
8de4b2f3 2762
102a2786
MT
2763 /* Make sure no work handler is accessing the device. */
2764 flush_work(&vi->config_work);
586d17c5 2765
04486ed0
AS
2766 unregister_netdev(vi->dev);
2767
2768 remove_vq_common(vi);
fb6813f4 2769
2e66f55b 2770 free_percpu(vi->stats);
74b2553f 2771 free_netdev(vi->dev);
296f96fc
RR
2772}
2773
67a75194 2774static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
0741bcb5
AS
2775{
2776 struct virtnet_info *vi = vdev->priv;
2777
8017c279 2778 virtnet_cpu_notif_remove(vi);
9fe7bfce 2779 virtnet_freeze_down(vdev);
0741bcb5
AS
2780 remove_vq_common(vi);
2781
2782 return 0;
2783}
2784
67a75194 2785static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
0741bcb5
AS
2786{
2787 struct virtnet_info *vi = vdev->priv;
9fe7bfce 2788 int err;
0741bcb5 2789
9fe7bfce 2790 err = virtnet_restore_up(vdev);
0741bcb5
AS
2791 if (err)
2792 return err;
986a4f4d
JW
2793 virtnet_set_queues(vi, vi->curr_queue_pairs);
2794
8017c279 2795 err = virtnet_cpu_notif_add(vi);
ec9debbd
JW
2796 if (err)
2797 return err;
2798
0741bcb5
AS
2799 return 0;
2800}
0741bcb5 2801
296f96fc
RR
2802static struct virtio_device_id id_table[] = {
2803 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
2804 { 0 },
2805};
2806
f3358507
MT
2807#define VIRTNET_FEATURES \
2808 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
2809 VIRTIO_NET_F_MAC, \
2810 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
2811 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
2812 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
2813 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
2814 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
2815 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
2816 VIRTIO_NET_F_CTRL_MAC_ADDR, \
3f93522f 2817 VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
f3358507 2818
c45a6816 2819static unsigned int features[] = {
f3358507
MT
2820 VIRTNET_FEATURES,
2821};
2822
2823static unsigned int features_legacy[] = {
2824 VIRTNET_FEATURES,
2825 VIRTIO_NET_F_GSO,
e7428e95 2826 VIRTIO_F_ANY_LAYOUT,
c45a6816
RR
2827};
2828
22402529 2829static struct virtio_driver virtio_net_driver = {
c45a6816
RR
2830 .feature_table = features,
2831 .feature_table_size = ARRAY_SIZE(features),
f3358507
MT
2832 .feature_table_legacy = features_legacy,
2833 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
296f96fc
RR
2834 .driver.name = KBUILD_MODNAME,
2835 .driver.owner = THIS_MODULE,
2836 .id_table = id_table,
fe36cbe0 2837 .validate = virtnet_validate,
296f96fc 2838 .probe = virtnet_probe,
8cc085d6 2839 .remove = virtnet_remove,
9f4d26d0 2840 .config_changed = virtnet_config_changed,
89107000 2841#ifdef CONFIG_PM_SLEEP
0741bcb5
AS
2842 .freeze = virtnet_freeze,
2843 .restore = virtnet_restore,
2844#endif
296f96fc
RR
2845};
2846
8017c279
SAS
2847static __init int virtio_net_driver_init(void)
2848{
2849 int ret;
2850
73c1b41e 2851 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
8017c279
SAS
2852 virtnet_cpu_online,
2853 virtnet_cpu_down_prep);
2854 if (ret < 0)
2855 goto out;
2856 virtionet_online = ret;
73c1b41e 2857 ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
8017c279
SAS
2858 NULL, virtnet_cpu_dead);
2859 if (ret)
2860 goto err_dead;
2861
2862 ret = register_virtio_driver(&virtio_net_driver);
2863 if (ret)
2864 goto err_virtio;
2865 return 0;
2866err_virtio:
2867 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
2868err_dead:
2869 cpuhp_remove_multi_state(virtionet_online);
2870out:
2871 return ret;
2872}
2873module_init(virtio_net_driver_init);
2874
2875static __exit void virtio_net_driver_exit(void)
2876{
cfa0ebc9 2877 unregister_virtio_driver(&virtio_net_driver);
8017c279
SAS
2878 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
2879 cpuhp_remove_multi_state(virtionet_online);
8017c279
SAS
2880}
2881module_exit(virtio_net_driver_exit);
296f96fc
RR
2882
2883MODULE_DEVICE_TABLE(virtio, id_table);
2884MODULE_DESCRIPTION("Virtio network driver");
2885MODULE_LICENSE("GPL");