1 /* A network driver using virtio.
3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/module.h>
23 #include <linux/virtio.h>
24 #include <linux/virtio_net.h>
25 #include <linux/bpf.h>
26 #include <linux/scatterlist.h>
27 #include <linux/if_vlan.h>
28 #include <linux/slab.h>
29 #include <linux/cpu.h>
30 #include <linux/average.h>
31 #include <net/busy_poll.h>
33 static int napi_weight
= NAPI_POLL_WEIGHT
;
34 module_param(napi_weight
, int, 0444);
36 static bool csum
= true, gso
= true;
37 module_param(csum
, bool, 0444);
38 module_param(gso
, bool, 0444);
40 /* FIXME: MTU in config. */
41 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
42 #define GOOD_COPY_LEN 128
44 /* RX packet size EWMA. The average packet size is used to determine the packet
45 * buffer size when refilling RX rings. As the entire RX ring may be refilled
46 * at once, the weight is chosen so that the EWMA will be insensitive to short-
47 * term, transient changes in packet size.
49 DECLARE_EWMA(pkt_len
, 1, 64)
51 /* Minimum alignment for mergeable packet buffers. */
52 #define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
54 #define VIRTNET_DRIVER_VERSION "1.0.0"
56 struct virtnet_stats
{
57 struct u64_stats_sync tx_syncp
;
58 struct u64_stats_sync rx_syncp
;
66 /* Internal representation of a send virtqueue */
68 /* Virtqueue associated with this send _queue */
71 /* TX: fragments + linear part + virtio header */
72 struct scatterlist sg
[MAX_SKB_FRAGS
+ 2];
74 /* Name of the send queue: output.$index */
78 /* Internal representation of a receive virtqueue */
79 struct receive_queue
{
80 /* Virtqueue associated with this receive_queue */
83 struct napi_struct napi
;
85 struct bpf_prog __rcu
*xdp_prog
;
87 /* Chain pages by the private ptr. */
90 /* Average packet length for mergeable receive buffers. */
91 struct ewma_pkt_len mrg_avg_pkt_len
;
93 /* Page frag for packet buffer allocation. */
94 struct page_frag alloc_frag
;
96 /* RX: fragments + linear part + virtio header */
97 struct scatterlist sg
[MAX_SKB_FRAGS
+ 2];
99 /* Name of this receive queue: input.$index */
103 struct virtnet_info
{
104 struct virtio_device
*vdev
;
105 struct virtqueue
*cvq
;
106 struct net_device
*dev
;
107 struct send_queue
*sq
;
108 struct receive_queue
*rq
;
111 /* Max # of queue pairs supported by the device */
114 /* # of queue pairs currently used by the driver */
115 u16 curr_queue_pairs
;
117 /* # of XDP queue pairs currently used by the driver */
120 /* I like... big packets and I cannot lie! */
123 /* Host will merge rx buffers for big packets (shake it! shake it!) */
124 bool mergeable_rx_bufs
;
126 /* Has control virtqueue */
129 /* Host can handle any s/g split between our header and packet data */
132 /* Packet virtio header size */
135 /* Active statistics */
136 struct virtnet_stats __percpu
*stats
;
138 /* Work struct for refilling if we run low on memory. */
139 struct delayed_work refill
;
141 /* Work struct for config space updates */
142 struct work_struct config_work
;
144 /* Does the affinity hint is set for virtqueues? */
145 bool affinity_hint_set
;
147 /* CPU hotplug instances for online & dead */
148 struct hlist_node node
;
149 struct hlist_node node_dead
;
151 /* Control VQ buffers: protected by the rtnl lock */
152 struct virtio_net_ctrl_hdr ctrl_hdr
;
153 virtio_net_ctrl_ack ctrl_status
;
154 struct virtio_net_ctrl_mq ctrl_mq
;
159 /* Ethtool settings */
164 struct padded_vnet_hdr
{
165 struct virtio_net_hdr_mrg_rxbuf hdr
;
167 * hdr is in a separate sg buffer, and data sg buffer shares same page
168 * with this header sg. This padding makes next sg 16 byte aligned
174 /* Converting between virtqueue no. and kernel tx/rx queue no.
175 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
177 static int vq2txq(struct virtqueue
*vq
)
179 return (vq
->index
- 1) / 2;
182 static int txq2vq(int txq
)
187 static int vq2rxq(struct virtqueue
*vq
)
189 return vq
->index
/ 2;
192 static int rxq2vq(int rxq
)
197 static inline struct virtio_net_hdr_mrg_rxbuf
*skb_vnet_hdr(struct sk_buff
*skb
)
199 return (struct virtio_net_hdr_mrg_rxbuf
*)skb
->cb
;
203 * private is used to chain pages for big packets, put the whole
204 * most recent used list in the beginning for reuse
206 static void give_pages(struct receive_queue
*rq
, struct page
*page
)
210 /* Find end of list, sew whole thing into vi->rq.pages. */
211 for (end
= page
; end
->private; end
= (struct page
*)end
->private);
212 end
->private = (unsigned long)rq
->pages
;
216 static struct page
*get_a_page(struct receive_queue
*rq
, gfp_t gfp_mask
)
218 struct page
*p
= rq
->pages
;
221 rq
->pages
= (struct page
*)p
->private;
222 /* clear private here, it is used to chain pages */
225 p
= alloc_page(gfp_mask
);
229 static void skb_xmit_done(struct virtqueue
*vq
)
231 struct virtnet_info
*vi
= vq
->vdev
->priv
;
233 /* Suppress further interrupts. */
234 virtqueue_disable_cb(vq
);
236 /* We were probably waiting for more output buffers. */
237 netif_wake_subqueue(vi
->dev
, vq2txq(vq
));
240 static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx
)
242 unsigned int truesize
= mrg_ctx
& (MERGEABLE_BUFFER_ALIGN
- 1);
243 return (truesize
+ 1) * MERGEABLE_BUFFER_ALIGN
;
246 static void *mergeable_ctx_to_buf_address(unsigned long mrg_ctx
)
248 return (void *)(mrg_ctx
& -MERGEABLE_BUFFER_ALIGN
);
252 static unsigned long mergeable_buf_to_ctx(void *buf
, unsigned int truesize
)
254 unsigned int size
= truesize
/ MERGEABLE_BUFFER_ALIGN
;
255 return (unsigned long)buf
| (size
- 1);
258 /* Called from bottom half context */
259 static struct sk_buff
*page_to_skb(struct virtnet_info
*vi
,
260 struct receive_queue
*rq
,
261 struct page
*page
, unsigned int offset
,
262 unsigned int len
, unsigned int truesize
)
265 struct virtio_net_hdr_mrg_rxbuf
*hdr
;
266 unsigned int copy
, hdr_len
, hdr_padded_len
;
269 p
= page_address(page
) + offset
;
271 /* copy small packet so we can reuse these pages for small data */
272 skb
= napi_alloc_skb(&rq
->napi
, GOOD_COPY_LEN
);
276 hdr
= skb_vnet_hdr(skb
);
278 hdr_len
= vi
->hdr_len
;
279 if (vi
->mergeable_rx_bufs
)
280 hdr_padded_len
= sizeof *hdr
;
282 hdr_padded_len
= sizeof(struct padded_vnet_hdr
);
284 memcpy(hdr
, p
, hdr_len
);
287 offset
+= hdr_padded_len
;
291 if (copy
> skb_tailroom(skb
))
292 copy
= skb_tailroom(skb
);
293 memcpy(skb_put(skb
, copy
), p
, copy
);
298 if (vi
->mergeable_rx_bufs
) {
300 skb_add_rx_frag(skb
, 0, page
, offset
, len
, truesize
);
307 * Verify that we can indeed put this data into a skb.
308 * This is here to handle cases when the device erroneously
309 * tries to receive more than is possible. This is usually
310 * the case of a broken device.
312 if (unlikely(len
> MAX_SKB_FRAGS
* PAGE_SIZE
)) {
313 net_dbg_ratelimited("%s: too much data\n", skb
->dev
->name
);
317 BUG_ON(offset
>= PAGE_SIZE
);
319 unsigned int frag_size
= min((unsigned)PAGE_SIZE
- offset
, len
);
320 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, page
, offset
,
321 frag_size
, truesize
);
323 page
= (struct page
*)page
->private;
328 give_pages(rq
, page
);
333 static void virtnet_xdp_xmit(struct virtnet_info
*vi
,
334 struct receive_queue
*rq
,
335 struct send_queue
*sq
,
336 struct xdp_buff
*xdp
)
338 struct page
*page
= virt_to_head_page(xdp
->data
);
339 struct virtio_net_hdr_mrg_rxbuf
*hdr
;
340 unsigned int num_sg
, len
;
344 /* Free up any pending old buffers before queueing new ones. */
345 while ((xdp_sent
= virtqueue_get_buf(sq
->vq
, &len
)) != NULL
) {
346 struct page
*sent_page
= virt_to_head_page(xdp_sent
);
348 if (vi
->mergeable_rx_bufs
)
351 give_pages(rq
, sent_page
);
354 /* Zero header and leave csum up to XDP layers */
356 memset(hdr
, 0, vi
->hdr_len
);
359 sg_init_one(sq
->sg
, xdp
->data
, xdp
->data_end
- xdp
->data
);
360 err
= virtqueue_add_outbuf(sq
->vq
, sq
->sg
, num_sg
,
361 xdp
->data
, GFP_ATOMIC
);
363 if (vi
->mergeable_rx_bufs
)
366 give_pages(rq
, page
);
367 return; // On error abort to avoid unnecessary kick
368 } else if (!vi
->mergeable_rx_bufs
) {
369 /* If not mergeable bufs must be big packets so cleanup pages */
370 give_pages(rq
, (struct page
*)page
->private);
374 virtqueue_kick(sq
->vq
);
377 static u32
do_xdp_prog(struct virtnet_info
*vi
,
378 struct receive_queue
*rq
,
379 struct bpf_prog
*xdp_prog
,
380 struct page
*page
, int offset
, int len
)
388 buf
= page_address(page
) + offset
;
390 if (vi
->mergeable_rx_bufs
)
391 hdr_padded_len
= sizeof(struct virtio_net_hdr_mrg_rxbuf
);
393 hdr_padded_len
= sizeof(struct padded_vnet_hdr
);
395 xdp
.data
= buf
+ hdr_padded_len
;
396 xdp
.data_end
= xdp
.data
+ (len
- vi
->hdr_len
);
398 act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
403 qp
= vi
->curr_queue_pairs
-
404 vi
->xdp_queue_pairs
+
406 xdp
.data
= buf
+ (vi
->mergeable_rx_bufs
? 0 : 4);
407 virtnet_xdp_xmit(vi
, rq
, &vi
->sq
[qp
], &xdp
);
410 bpf_warn_invalid_xdp_action(act
);
417 static struct sk_buff
*receive_small(struct virtnet_info
*vi
, void *buf
, unsigned int len
)
419 struct sk_buff
* skb
= buf
;
427 static struct sk_buff
*receive_big(struct net_device
*dev
,
428 struct virtnet_info
*vi
,
429 struct receive_queue
*rq
,
433 struct bpf_prog
*xdp_prog
;
434 struct page
*page
= buf
;
438 xdp_prog
= rcu_dereference(rq
->xdp_prog
);
440 struct virtio_net_hdr_mrg_rxbuf
*hdr
= buf
;
443 if (unlikely(hdr
->hdr
.gso_type
|| hdr
->hdr
.flags
))
445 act
= do_xdp_prog(vi
, rq
, xdp_prog
, page
, 0, len
);
459 skb
= page_to_skb(vi
, rq
, page
, 0, len
, PAGE_SIZE
);
468 dev
->stats
.rx_dropped
++;
469 give_pages(rq
, page
);
474 /* The conditions to enable XDP should preclude the underlying device from
475 * sending packets across multiple buffers (num_buf > 1). However per spec
476 * it does not appear to be illegal to do so but rather just against convention.
477 * So in order to avoid making a system unresponsive the packets are pushed
478 * into a page and the XDP program is run. This will be extremely slow and we
479 * push a warning to the user to fix this as soon as possible. Fixing this may
480 * require resolving the underlying hardware to determine why multiple buffers
481 * are being received or simply loading the XDP program in the ingress stack
482 * after the skb is built because there is no advantage to running it here
485 static struct page
*xdp_linearize_page(struct receive_queue
*rq
,
491 struct page
*page
= alloc_page(GFP_ATOMIC
);
492 unsigned int page_off
= 0;
497 memcpy(page_address(page
) + page_off
, page_address(p
) + offset
, *len
);
506 ctx
= (unsigned long)virtqueue_get_buf(rq
->vq
, &buflen
);
510 /* guard against a misconfigured or uncooperative backend that
511 * is sending packet larger than the MTU.
513 if ((page_off
+ buflen
) > PAGE_SIZE
)
516 buf
= mergeable_ctx_to_buf_address(ctx
);
517 p
= virt_to_head_page(buf
);
518 off
= buf
- page_address(p
);
520 memcpy(page_address(page
) + page_off
,
521 page_address(p
) + off
, buflen
);
528 __free_pages(page
, 0);
532 static struct sk_buff
*receive_mergeable(struct net_device
*dev
,
533 struct virtnet_info
*vi
,
534 struct receive_queue
*rq
,
538 void *buf
= mergeable_ctx_to_buf_address(ctx
);
539 struct virtio_net_hdr_mrg_rxbuf
*hdr
= buf
;
540 u16 num_buf
= virtio16_to_cpu(vi
->vdev
, hdr
->num_buffers
);
541 struct page
*page
= virt_to_head_page(buf
);
542 int offset
= buf
- page_address(page
);
543 struct sk_buff
*head_skb
, *curr_skb
;
544 struct bpf_prog
*xdp_prog
;
545 unsigned int truesize
;
550 xdp_prog
= rcu_dereference(rq
->xdp_prog
);
552 struct page
*xdp_page
;
555 /* No known backend devices should send packets with
556 * more than a single buffer when XDP conditions are
557 * met. However it is not strictly illegal so the case
558 * is handled as an exception and a warning is thrown.
560 if (unlikely(num_buf
> 1)) {
561 bpf_warn_invalid_xdp_buffer();
563 /* linearize data for XDP */
564 xdp_page
= xdp_linearize_page(rq
, num_buf
,
573 /* Transient failure which in theory could occur if
574 * in-flight packets from before XDP was enabled reach
575 * the receive path after XDP is loaded. In practice I
576 * was not able to create this condition.
578 if (unlikely(hdr
->hdr
.gso_type
|| hdr
->hdr
.flags
))
581 act
= do_xdp_prog(vi
, rq
, xdp_prog
, page
, offset
, len
);
584 if (unlikely(xdp_page
!= page
))
585 __free_pages(xdp_page
, 0);
588 if (unlikely(xdp_page
!= page
))
594 if (unlikely(xdp_page
!= page
))
595 __free_pages(xdp_page
, 0);
601 truesize
= max(len
, mergeable_ctx_to_buf_truesize(ctx
));
602 head_skb
= page_to_skb(vi
, rq
, page
, offset
, len
, truesize
);
605 if (unlikely(!curr_skb
))
610 ctx
= (unsigned long)virtqueue_get_buf(rq
->vq
, &len
);
611 if (unlikely(!ctx
)) {
612 pr_debug("%s: rx error: %d buffers out of %d missing\n",
614 virtio16_to_cpu(vi
->vdev
,
616 dev
->stats
.rx_length_errors
++;
620 buf
= mergeable_ctx_to_buf_address(ctx
);
621 page
= virt_to_head_page(buf
);
623 num_skb_frags
= skb_shinfo(curr_skb
)->nr_frags
;
624 if (unlikely(num_skb_frags
== MAX_SKB_FRAGS
)) {
625 struct sk_buff
*nskb
= alloc_skb(0, GFP_ATOMIC
);
629 if (curr_skb
== head_skb
)
630 skb_shinfo(curr_skb
)->frag_list
= nskb
;
632 curr_skb
->next
= nskb
;
634 head_skb
->truesize
+= nskb
->truesize
;
637 truesize
= max(len
, mergeable_ctx_to_buf_truesize(ctx
));
638 if (curr_skb
!= head_skb
) {
639 head_skb
->data_len
+= len
;
640 head_skb
->len
+= len
;
641 head_skb
->truesize
+= truesize
;
643 offset
= buf
- page_address(page
);
644 if (skb_can_coalesce(curr_skb
, num_skb_frags
, page
, offset
)) {
646 skb_coalesce_rx_frag(curr_skb
, num_skb_frags
- 1,
649 skb_add_rx_frag(curr_skb
, num_skb_frags
, page
,
650 offset
, len
, truesize
);
654 ewma_pkt_len_add(&rq
->mrg_avg_pkt_len
, head_skb
->len
);
662 ctx
= (unsigned long)virtqueue_get_buf(rq
->vq
, &len
);
663 if (unlikely(!ctx
)) {
664 pr_debug("%s: rx error: %d buffers missing\n",
666 dev
->stats
.rx_length_errors
++;
669 page
= virt_to_head_page(mergeable_ctx_to_buf_address(ctx
));
673 dev
->stats
.rx_dropped
++;
674 dev_kfree_skb(head_skb
);
679 static void receive_buf(struct virtnet_info
*vi
, struct receive_queue
*rq
,
680 void *buf
, unsigned int len
)
682 struct net_device
*dev
= vi
->dev
;
683 struct virtnet_stats
*stats
= this_cpu_ptr(vi
->stats
);
685 struct virtio_net_hdr_mrg_rxbuf
*hdr
;
687 if (unlikely(len
< vi
->hdr_len
+ ETH_HLEN
)) {
688 pr_debug("%s: short packet %i\n", dev
->name
, len
);
689 dev
->stats
.rx_length_errors
++;
690 if (vi
->mergeable_rx_bufs
) {
691 unsigned long ctx
= (unsigned long)buf
;
692 void *base
= mergeable_ctx_to_buf_address(ctx
);
693 put_page(virt_to_head_page(base
));
694 } else if (vi
->big_packets
) {
702 if (vi
->mergeable_rx_bufs
)
703 skb
= receive_mergeable(dev
, vi
, rq
, (unsigned long)buf
, len
);
704 else if (vi
->big_packets
)
705 skb
= receive_big(dev
, vi
, rq
, buf
, len
);
707 skb
= receive_small(vi
, buf
, len
);
712 hdr
= skb_vnet_hdr(skb
);
714 u64_stats_update_begin(&stats
->rx_syncp
);
715 stats
->rx_bytes
+= skb
->len
;
717 u64_stats_update_end(&stats
->rx_syncp
);
719 if (hdr
->hdr
.flags
& VIRTIO_NET_HDR_F_DATA_VALID
)
720 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
722 if (virtio_net_hdr_to_skb(skb
, &hdr
->hdr
,
723 virtio_is_little_endian(vi
->vdev
))) {
724 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
725 dev
->name
, hdr
->hdr
.gso_type
,
730 skb
->protocol
= eth_type_trans(skb
, dev
);
731 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
732 ntohs(skb
->protocol
), skb
->len
, skb
->pkt_type
);
734 napi_gro_receive(&rq
->napi
, skb
);
738 dev
->stats
.rx_frame_errors
++;
742 static int add_recvbuf_small(struct virtnet_info
*vi
, struct receive_queue
*rq
,
746 struct virtio_net_hdr_mrg_rxbuf
*hdr
;
749 skb
= __netdev_alloc_skb_ip_align(vi
->dev
, GOOD_PACKET_LEN
, gfp
);
753 skb_put(skb
, GOOD_PACKET_LEN
);
755 hdr
= skb_vnet_hdr(skb
);
756 sg_init_table(rq
->sg
, 2);
757 sg_set_buf(rq
->sg
, hdr
, vi
->hdr_len
);
758 skb_to_sgvec(skb
, rq
->sg
+ 1, 0, skb
->len
);
760 err
= virtqueue_add_inbuf(rq
->vq
, rq
->sg
, 2, skb
, gfp
);
767 static int add_recvbuf_big(struct virtnet_info
*vi
, struct receive_queue
*rq
,
770 struct page
*first
, *list
= NULL
;
774 sg_init_table(rq
->sg
, MAX_SKB_FRAGS
+ 2);
776 /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
777 for (i
= MAX_SKB_FRAGS
+ 1; i
> 1; --i
) {
778 first
= get_a_page(rq
, gfp
);
781 give_pages(rq
, list
);
784 sg_set_buf(&rq
->sg
[i
], page_address(first
), PAGE_SIZE
);
786 /* chain new page in list head to match sg */
787 first
->private = (unsigned long)list
;
791 first
= get_a_page(rq
, gfp
);
793 give_pages(rq
, list
);
796 p
= page_address(first
);
798 /* rq->sg[0], rq->sg[1] share the same page */
799 /* a separated rq->sg[0] for header - required in case !any_header_sg */
800 sg_set_buf(&rq
->sg
[0], p
, vi
->hdr_len
);
802 /* rq->sg[1] for data packet, from offset */
803 offset
= sizeof(struct padded_vnet_hdr
);
804 sg_set_buf(&rq
->sg
[1], p
+ offset
, PAGE_SIZE
- offset
);
806 /* chain first in list head */
807 first
->private = (unsigned long)list
;
808 err
= virtqueue_add_inbuf(rq
->vq
, rq
->sg
, MAX_SKB_FRAGS
+ 2,
811 give_pages(rq
, first
);
816 static unsigned int get_mergeable_buf_len(struct ewma_pkt_len
*avg_pkt_len
)
818 const size_t hdr_len
= sizeof(struct virtio_net_hdr_mrg_rxbuf
);
821 len
= hdr_len
+ clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len
),
822 GOOD_PACKET_LEN
, PAGE_SIZE
- hdr_len
);
823 return ALIGN(len
, MERGEABLE_BUFFER_ALIGN
);
826 static int add_recvbuf_mergeable(struct receive_queue
*rq
, gfp_t gfp
)
828 struct page_frag
*alloc_frag
= &rq
->alloc_frag
;
832 unsigned int len
, hole
;
834 len
= get_mergeable_buf_len(&rq
->mrg_avg_pkt_len
);
835 if (unlikely(!skb_page_frag_refill(len
, alloc_frag
, gfp
)))
838 buf
= (char *)page_address(alloc_frag
->page
) + alloc_frag
->offset
;
839 ctx
= mergeable_buf_to_ctx(buf
, len
);
840 get_page(alloc_frag
->page
);
841 alloc_frag
->offset
+= len
;
842 hole
= alloc_frag
->size
- alloc_frag
->offset
;
844 /* To avoid internal fragmentation, if there is very likely not
845 * enough space for another buffer, add the remaining space to
846 * the current buffer. This extra space is not included in
847 * the truesize stored in ctx.
850 alloc_frag
->offset
+= hole
;
853 sg_init_one(rq
->sg
, buf
, len
);
854 err
= virtqueue_add_inbuf(rq
->vq
, rq
->sg
, 1, (void *)ctx
, gfp
);
856 put_page(virt_to_head_page(buf
));
862 * Returns false if we couldn't fill entirely (OOM).
864 * Normally run in the receive path, but can also be run from ndo_open
865 * before we're receiving packets, or from refill_work which is
866 * careful to disable receiving (using napi_disable).
868 static bool try_fill_recv(struct virtnet_info
*vi
, struct receive_queue
*rq
,
876 if (vi
->mergeable_rx_bufs
)
877 err
= add_recvbuf_mergeable(rq
, gfp
);
878 else if (vi
->big_packets
)
879 err
= add_recvbuf_big(vi
, rq
, gfp
);
881 err
= add_recvbuf_small(vi
, rq
, gfp
);
883 oom
= err
== -ENOMEM
;
886 } while (rq
->vq
->num_free
);
887 virtqueue_kick(rq
->vq
);
891 static void skb_recv_done(struct virtqueue
*rvq
)
893 struct virtnet_info
*vi
= rvq
->vdev
->priv
;
894 struct receive_queue
*rq
= &vi
->rq
[vq2rxq(rvq
)];
896 /* Schedule NAPI, Suppress further interrupts if successful. */
897 if (napi_schedule_prep(&rq
->napi
)) {
898 virtqueue_disable_cb(rvq
);
899 __napi_schedule(&rq
->napi
);
903 static void virtnet_napi_enable(struct receive_queue
*rq
)
905 napi_enable(&rq
->napi
);
907 /* If all buffers were filled by other side before we napi_enabled, we
908 * won't get another interrupt, so process any outstanding packets
909 * now. virtnet_poll wants re-enable the queue, so we disable here.
910 * We synchronize against interrupts via NAPI_STATE_SCHED */
911 if (napi_schedule_prep(&rq
->napi
)) {
912 virtqueue_disable_cb(rq
->vq
);
914 __napi_schedule(&rq
->napi
);
919 static void refill_work(struct work_struct
*work
)
921 struct virtnet_info
*vi
=
922 container_of(work
, struct virtnet_info
, refill
.work
);
926 for (i
= 0; i
< vi
->curr_queue_pairs
; i
++) {
927 struct receive_queue
*rq
= &vi
->rq
[i
];
929 napi_disable(&rq
->napi
);
930 still_empty
= !try_fill_recv(vi
, rq
, GFP_KERNEL
);
931 virtnet_napi_enable(rq
);
933 /* In theory, this can happen: if we don't get any buffers in
934 * we will *never* try to fill again.
937 schedule_delayed_work(&vi
->refill
, HZ
/2);
941 static int virtnet_receive(struct receive_queue
*rq
, int budget
)
943 struct virtnet_info
*vi
= rq
->vq
->vdev
->priv
;
944 unsigned int len
, received
= 0;
947 while (received
< budget
&&
948 (buf
= virtqueue_get_buf(rq
->vq
, &len
)) != NULL
) {
949 receive_buf(vi
, rq
, buf
, len
);
953 if (rq
->vq
->num_free
> virtqueue_get_vring_size(rq
->vq
) / 2) {
954 if (!try_fill_recv(vi
, rq
, GFP_ATOMIC
))
955 schedule_delayed_work(&vi
->refill
, 0);
961 static int virtnet_poll(struct napi_struct
*napi
, int budget
)
963 struct receive_queue
*rq
=
964 container_of(napi
, struct receive_queue
, napi
);
965 unsigned int r
, received
;
967 received
= virtnet_receive(rq
, budget
);
969 /* Out of packets? */
970 if (received
< budget
) {
971 r
= virtqueue_enable_cb_prepare(rq
->vq
);
972 napi_complete_done(napi
, received
);
973 if (unlikely(virtqueue_poll(rq
->vq
, r
)) &&
974 napi_schedule_prep(napi
)) {
975 virtqueue_disable_cb(rq
->vq
);
976 __napi_schedule(napi
);
983 #ifdef CONFIG_NET_RX_BUSY_POLL
984 /* must be called with local_bh_disable()d */
985 static int virtnet_busy_poll(struct napi_struct
*napi
)
987 struct receive_queue
*rq
=
988 container_of(napi
, struct receive_queue
, napi
);
989 struct virtnet_info
*vi
= rq
->vq
->vdev
->priv
;
990 int r
, received
= 0, budget
= 4;
992 if (!(vi
->status
& VIRTIO_NET_S_LINK_UP
))
993 return LL_FLUSH_FAILED
;
995 if (!napi_schedule_prep(napi
))
996 return LL_FLUSH_BUSY
;
998 virtqueue_disable_cb(rq
->vq
);
1001 received
+= virtnet_receive(rq
, budget
);
1003 r
= virtqueue_enable_cb_prepare(rq
->vq
);
1004 clear_bit(NAPI_STATE_SCHED
, &napi
->state
);
1005 if (unlikely(virtqueue_poll(rq
->vq
, r
)) &&
1006 napi_schedule_prep(napi
)) {
1007 virtqueue_disable_cb(rq
->vq
);
1008 if (received
< budget
) {
1012 __napi_schedule(napi
);
1018 #endif /* CONFIG_NET_RX_BUSY_POLL */
1020 static int virtnet_open(struct net_device
*dev
)
1022 struct virtnet_info
*vi
= netdev_priv(dev
);
1025 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
1026 if (i
< vi
->curr_queue_pairs
)
1027 /* Make sure we have some buffers: if oom use wq. */
1028 if (!try_fill_recv(vi
, &vi
->rq
[i
], GFP_KERNEL
))
1029 schedule_delayed_work(&vi
->refill
, 0);
1030 virtnet_napi_enable(&vi
->rq
[i
]);
1036 static void free_old_xmit_skbs(struct send_queue
*sq
)
1038 struct sk_buff
*skb
;
1040 struct virtnet_info
*vi
= sq
->vq
->vdev
->priv
;
1041 struct virtnet_stats
*stats
= this_cpu_ptr(vi
->stats
);
1043 while ((skb
= virtqueue_get_buf(sq
->vq
, &len
)) != NULL
) {
1044 pr_debug("Sent skb %p\n", skb
);
1046 u64_stats_update_begin(&stats
->tx_syncp
);
1047 stats
->tx_bytes
+= skb
->len
;
1048 stats
->tx_packets
++;
1049 u64_stats_update_end(&stats
->tx_syncp
);
1051 dev_kfree_skb_any(skb
);
1055 static int xmit_skb(struct send_queue
*sq
, struct sk_buff
*skb
)
1057 struct virtio_net_hdr_mrg_rxbuf
*hdr
;
1058 const unsigned char *dest
= ((struct ethhdr
*)skb
->data
)->h_dest
;
1059 struct virtnet_info
*vi
= sq
->vq
->vdev
->priv
;
1061 unsigned hdr_len
= vi
->hdr_len
;
1064 pr_debug("%s: xmit %p %pM\n", vi
->dev
->name
, skb
, dest
);
1066 can_push
= vi
->any_header_sg
&&
1067 !((unsigned long)skb
->data
& (__alignof__(*hdr
) - 1)) &&
1068 !skb_header_cloned(skb
) && skb_headroom(skb
) >= hdr_len
;
1069 /* Even if we can, don't push here yet as this would skew
1070 * csum_start offset below. */
1072 hdr
= (struct virtio_net_hdr_mrg_rxbuf
*)(skb
->data
- hdr_len
);
1074 hdr
= skb_vnet_hdr(skb
);
1076 if (virtio_net_hdr_from_skb(skb
, &hdr
->hdr
,
1077 virtio_is_little_endian(vi
->vdev
)))
1080 if (vi
->mergeable_rx_bufs
)
1081 hdr
->num_buffers
= 0;
1083 sg_init_table(sq
->sg
, skb_shinfo(skb
)->nr_frags
+ (can_push
? 1 : 2));
1085 __skb_push(skb
, hdr_len
);
1086 num_sg
= skb_to_sgvec(skb
, sq
->sg
, 0, skb
->len
);
1087 /* Pull header back to avoid skew in tx bytes calculations. */
1088 __skb_pull(skb
, hdr_len
);
1090 sg_set_buf(sq
->sg
, hdr
, hdr_len
);
1091 num_sg
= skb_to_sgvec(skb
, sq
->sg
+ 1, 0, skb
->len
) + 1;
1093 return virtqueue_add_outbuf(sq
->vq
, sq
->sg
, num_sg
, skb
, GFP_ATOMIC
);
1096 static netdev_tx_t
start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1098 struct virtnet_info
*vi
= netdev_priv(dev
);
1099 int qnum
= skb_get_queue_mapping(skb
);
1100 struct send_queue
*sq
= &vi
->sq
[qnum
];
1102 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, qnum
);
1103 bool kick
= !skb
->xmit_more
;
1105 /* Free up any pending old buffers before queueing new ones. */
1106 free_old_xmit_skbs(sq
);
1108 /* timestamp packet in software */
1109 skb_tx_timestamp(skb
);
1111 /* Try to transmit */
1112 err
= xmit_skb(sq
, skb
);
1114 /* This should not happen! */
1115 if (unlikely(err
)) {
1116 dev
->stats
.tx_fifo_errors
++;
1117 if (net_ratelimit())
1119 "Unexpected TXQ (%d) queue failure: %d\n", qnum
, err
);
1120 dev
->stats
.tx_dropped
++;
1121 dev_kfree_skb_any(skb
);
1122 return NETDEV_TX_OK
;
1125 /* Don't wait up for transmitted skbs to be freed. */
1129 /* If running out of space, stop queue to avoid getting packets that we
1130 * are then unable to transmit.
1131 * An alternative would be to force queuing layer to requeue the skb by
1132 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
1133 * returned in a normal path of operation: it means that driver is not
1134 * maintaining the TX queue stop/start state properly, and causes
1135 * the stack to do a non-trivial amount of useless work.
1136 * Since most packets only take 1 or 2 ring slots, stopping the queue
1137 * early means 16 slots are typically wasted.
1139 if (sq
->vq
->num_free
< 2+MAX_SKB_FRAGS
) {
1140 netif_stop_subqueue(dev
, qnum
);
1141 if (unlikely(!virtqueue_enable_cb_delayed(sq
->vq
))) {
1142 /* More just got used, free them then recheck. */
1143 free_old_xmit_skbs(sq
);
1144 if (sq
->vq
->num_free
>= 2+MAX_SKB_FRAGS
) {
1145 netif_start_subqueue(dev
, qnum
);
1146 virtqueue_disable_cb(sq
->vq
);
1151 if (kick
|| netif_xmit_stopped(txq
))
1152 virtqueue_kick(sq
->vq
);
1154 return NETDEV_TX_OK
;
1158 * Send command via the control virtqueue and check status. Commands
1159 * supported by the hypervisor, as indicated by feature bits, should
1160 * never fail unless improperly formatted.
1162 static bool virtnet_send_command(struct virtnet_info
*vi
, u8
class, u8 cmd
,
1163 struct scatterlist
*out
)
1165 struct scatterlist
*sgs
[4], hdr
, stat
;
1166 unsigned out_num
= 0, tmp
;
1168 /* Caller should know better */
1169 BUG_ON(!virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_CTRL_VQ
));
1171 vi
->ctrl_status
= ~0;
1172 vi
->ctrl_hdr
.class = class;
1173 vi
->ctrl_hdr
.cmd
= cmd
;
1175 sg_init_one(&hdr
, &vi
->ctrl_hdr
, sizeof(vi
->ctrl_hdr
));
1176 sgs
[out_num
++] = &hdr
;
1179 sgs
[out_num
++] = out
;
1181 /* Add return status. */
1182 sg_init_one(&stat
, &vi
->ctrl_status
, sizeof(vi
->ctrl_status
));
1183 sgs
[out_num
] = &stat
;
1185 BUG_ON(out_num
+ 1 > ARRAY_SIZE(sgs
));
1186 virtqueue_add_sgs(vi
->cvq
, sgs
, out_num
, 1, vi
, GFP_ATOMIC
);
1188 if (unlikely(!virtqueue_kick(vi
->cvq
)))
1189 return vi
->ctrl_status
== VIRTIO_NET_OK
;
1191 /* Spin for a response, the kick causes an ioport write, trapping
1192 * into the hypervisor, so the request should be handled immediately.
1194 while (!virtqueue_get_buf(vi
->cvq
, &tmp
) &&
1195 !virtqueue_is_broken(vi
->cvq
))
1198 return vi
->ctrl_status
== VIRTIO_NET_OK
;
1201 static int virtnet_set_mac_address(struct net_device
*dev
, void *p
)
1203 struct virtnet_info
*vi
= netdev_priv(dev
);
1204 struct virtio_device
*vdev
= vi
->vdev
;
1206 struct sockaddr
*addr
;
1207 struct scatterlist sg
;
1209 addr
= kmalloc(sizeof(*addr
), GFP_KERNEL
);
1212 memcpy(addr
, p
, sizeof(*addr
));
1214 ret
= eth_prepare_mac_addr_change(dev
, addr
);
1218 if (virtio_has_feature(vdev
, VIRTIO_NET_F_CTRL_MAC_ADDR
)) {
1219 sg_init_one(&sg
, addr
->sa_data
, dev
->addr_len
);
1220 if (!virtnet_send_command(vi
, VIRTIO_NET_CTRL_MAC
,
1221 VIRTIO_NET_CTRL_MAC_ADDR_SET
, &sg
)) {
1222 dev_warn(&vdev
->dev
,
1223 "Failed to set mac address by vq command.\n");
1227 } else if (virtio_has_feature(vdev
, VIRTIO_NET_F_MAC
) &&
1228 !virtio_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
1231 /* Naturally, this has an atomicity problem. */
1232 for (i
= 0; i
< dev
->addr_len
; i
++)
1233 virtio_cwrite8(vdev
,
1234 offsetof(struct virtio_net_config
, mac
) +
1235 i
, addr
->sa_data
[i
]);
1238 eth_commit_mac_addr_change(dev
, p
);
1246 static struct rtnl_link_stats64
*virtnet_stats(struct net_device
*dev
,
1247 struct rtnl_link_stats64
*tot
)
1249 struct virtnet_info
*vi
= netdev_priv(dev
);
1253 for_each_possible_cpu(cpu
) {
1254 struct virtnet_stats
*stats
= per_cpu_ptr(vi
->stats
, cpu
);
1255 u64 tpackets
, tbytes
, rpackets
, rbytes
;
1258 start
= u64_stats_fetch_begin_irq(&stats
->tx_syncp
);
1259 tpackets
= stats
->tx_packets
;
1260 tbytes
= stats
->tx_bytes
;
1261 } while (u64_stats_fetch_retry_irq(&stats
->tx_syncp
, start
));
1264 start
= u64_stats_fetch_begin_irq(&stats
->rx_syncp
);
1265 rpackets
= stats
->rx_packets
;
1266 rbytes
= stats
->rx_bytes
;
1267 } while (u64_stats_fetch_retry_irq(&stats
->rx_syncp
, start
));
1269 tot
->rx_packets
+= rpackets
;
1270 tot
->tx_packets
+= tpackets
;
1271 tot
->rx_bytes
+= rbytes
;
1272 tot
->tx_bytes
+= tbytes
;
1275 tot
->tx_dropped
= dev
->stats
.tx_dropped
;
1276 tot
->tx_fifo_errors
= dev
->stats
.tx_fifo_errors
;
1277 tot
->rx_dropped
= dev
->stats
.rx_dropped
;
1278 tot
->rx_length_errors
= dev
->stats
.rx_length_errors
;
1279 tot
->rx_frame_errors
= dev
->stats
.rx_frame_errors
;
1284 #ifdef CONFIG_NET_POLL_CONTROLLER
1285 static void virtnet_netpoll(struct net_device
*dev
)
1287 struct virtnet_info
*vi
= netdev_priv(dev
);
1290 for (i
= 0; i
< vi
->curr_queue_pairs
; i
++)
1291 napi_schedule(&vi
->rq
[i
].napi
);
1295 static void virtnet_ack_link_announce(struct virtnet_info
*vi
)
1298 if (!virtnet_send_command(vi
, VIRTIO_NET_CTRL_ANNOUNCE
,
1299 VIRTIO_NET_CTRL_ANNOUNCE_ACK
, NULL
))
1300 dev_warn(&vi
->dev
->dev
, "Failed to ack link announce.\n");
1304 static int virtnet_set_queues(struct virtnet_info
*vi
, u16 queue_pairs
)
1306 struct scatterlist sg
;
1307 struct net_device
*dev
= vi
->dev
;
1309 if (!vi
->has_cvq
|| !virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_MQ
))
1312 vi
->ctrl_mq
.virtqueue_pairs
= cpu_to_virtio16(vi
->vdev
, queue_pairs
);
1313 sg_init_one(&sg
, &vi
->ctrl_mq
, sizeof(vi
->ctrl_mq
));
1315 if (!virtnet_send_command(vi
, VIRTIO_NET_CTRL_MQ
,
1316 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
, &sg
)) {
1317 dev_warn(&dev
->dev
, "Fail to set num of queue pairs to %d\n",
1321 vi
->curr_queue_pairs
= queue_pairs
;
1322 /* virtnet_open() will refill when device is going to up. */
1323 if (dev
->flags
& IFF_UP
)
1324 schedule_delayed_work(&vi
->refill
, 0);
1330 static int virtnet_close(struct net_device
*dev
)
1332 struct virtnet_info
*vi
= netdev_priv(dev
);
1335 /* Make sure refill_work doesn't re-enable napi! */
1336 cancel_delayed_work_sync(&vi
->refill
);
1338 for (i
= 0; i
< vi
->max_queue_pairs
; i
++)
1339 napi_disable(&vi
->rq
[i
].napi
);
1344 static void virtnet_set_rx_mode(struct net_device
*dev
)
1346 struct virtnet_info
*vi
= netdev_priv(dev
);
1347 struct scatterlist sg
[2];
1348 struct virtio_net_ctrl_mac
*mac_data
;
1349 struct netdev_hw_addr
*ha
;
1355 /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
1356 if (!virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_CTRL_RX
))
1359 vi
->ctrl_promisc
= ((dev
->flags
& IFF_PROMISC
) != 0);
1360 vi
->ctrl_allmulti
= ((dev
->flags
& IFF_ALLMULTI
) != 0);
1362 sg_init_one(sg
, &vi
->ctrl_promisc
, sizeof(vi
->ctrl_promisc
));
1364 if (!virtnet_send_command(vi
, VIRTIO_NET_CTRL_RX
,
1365 VIRTIO_NET_CTRL_RX_PROMISC
, sg
))
1366 dev_warn(&dev
->dev
, "Failed to %sable promisc mode.\n",
1367 vi
->ctrl_promisc
? "en" : "dis");
1369 sg_init_one(sg
, &vi
->ctrl_allmulti
, sizeof(vi
->ctrl_allmulti
));
1371 if (!virtnet_send_command(vi
, VIRTIO_NET_CTRL_RX
,
1372 VIRTIO_NET_CTRL_RX_ALLMULTI
, sg
))
1373 dev_warn(&dev
->dev
, "Failed to %sable allmulti mode.\n",
1374 vi
->ctrl_allmulti
? "en" : "dis");
1376 uc_count
= netdev_uc_count(dev
);
1377 mc_count
= netdev_mc_count(dev
);
1378 /* MAC filter - use one buffer for both lists */
1379 buf
= kzalloc(((uc_count
+ mc_count
) * ETH_ALEN
) +
1380 (2 * sizeof(mac_data
->entries
)), GFP_ATOMIC
);
1385 sg_init_table(sg
, 2);
1387 /* Store the unicast list and count in the front of the buffer */
1388 mac_data
->entries
= cpu_to_virtio32(vi
->vdev
, uc_count
);
1390 netdev_for_each_uc_addr(ha
, dev
)
1391 memcpy(&mac_data
->macs
[i
++][0], ha
->addr
, ETH_ALEN
);
1393 sg_set_buf(&sg
[0], mac_data
,
1394 sizeof(mac_data
->entries
) + (uc_count
* ETH_ALEN
));
1396 /* multicast list and count fill the end */
1397 mac_data
= (void *)&mac_data
->macs
[uc_count
][0];
1399 mac_data
->entries
= cpu_to_virtio32(vi
->vdev
, mc_count
);
1401 netdev_for_each_mc_addr(ha
, dev
)
1402 memcpy(&mac_data
->macs
[i
++][0], ha
->addr
, ETH_ALEN
);
1404 sg_set_buf(&sg
[1], mac_data
,
1405 sizeof(mac_data
->entries
) + (mc_count
* ETH_ALEN
));
1407 if (!virtnet_send_command(vi
, VIRTIO_NET_CTRL_MAC
,
1408 VIRTIO_NET_CTRL_MAC_TABLE_SET
, sg
))
1409 dev_warn(&dev
->dev
, "Failed to set MAC filter table.\n");
1414 static int virtnet_vlan_rx_add_vid(struct net_device
*dev
,
1415 __be16 proto
, u16 vid
)
1417 struct virtnet_info
*vi
= netdev_priv(dev
);
1418 struct scatterlist sg
;
1421 sg_init_one(&sg
, &vi
->ctrl_vid
, sizeof(vi
->ctrl_vid
));
1423 if (!virtnet_send_command(vi
, VIRTIO_NET_CTRL_VLAN
,
1424 VIRTIO_NET_CTRL_VLAN_ADD
, &sg
))
1425 dev_warn(&dev
->dev
, "Failed to add VLAN ID %d.\n", vid
);
1429 static int virtnet_vlan_rx_kill_vid(struct net_device
*dev
,
1430 __be16 proto
, u16 vid
)
1432 struct virtnet_info
*vi
= netdev_priv(dev
);
1433 struct scatterlist sg
;
1436 sg_init_one(&sg
, &vi
->ctrl_vid
, sizeof(vi
->ctrl_vid
));
1438 if (!virtnet_send_command(vi
, VIRTIO_NET_CTRL_VLAN
,
1439 VIRTIO_NET_CTRL_VLAN_DEL
, &sg
))
1440 dev_warn(&dev
->dev
, "Failed to kill VLAN ID %d.\n", vid
);
1444 static void virtnet_clean_affinity(struct virtnet_info
*vi
, long hcpu
)
1448 if (vi
->affinity_hint_set
) {
1449 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
1450 virtqueue_set_affinity(vi
->rq
[i
].vq
, -1);
1451 virtqueue_set_affinity(vi
->sq
[i
].vq
, -1);
1454 vi
->affinity_hint_set
= false;
1458 static void virtnet_set_affinity(struct virtnet_info
*vi
)
1463 /* In multiqueue mode, when the number of cpu is equal to the number of
1464 * queue pairs, we let the queue pairs to be private to one cpu by
1465 * setting the affinity hint to eliminate the contention.
1467 if (vi
->curr_queue_pairs
== 1 ||
1468 vi
->max_queue_pairs
!= num_online_cpus()) {
1469 virtnet_clean_affinity(vi
, -1);
1474 for_each_online_cpu(cpu
) {
1475 virtqueue_set_affinity(vi
->rq
[i
].vq
, cpu
);
1476 virtqueue_set_affinity(vi
->sq
[i
].vq
, cpu
);
1477 netif_set_xps_queue(vi
->dev
, cpumask_of(cpu
), i
);
1481 vi
->affinity_hint_set
= true;
1484 static int virtnet_cpu_online(unsigned int cpu
, struct hlist_node
*node
)
1486 struct virtnet_info
*vi
= hlist_entry_safe(node
, struct virtnet_info
,
1488 virtnet_set_affinity(vi
);
1492 static int virtnet_cpu_dead(unsigned int cpu
, struct hlist_node
*node
)
1494 struct virtnet_info
*vi
= hlist_entry_safe(node
, struct virtnet_info
,
1496 virtnet_set_affinity(vi
);
1500 static int virtnet_cpu_down_prep(unsigned int cpu
, struct hlist_node
*node
)
1502 struct virtnet_info
*vi
= hlist_entry_safe(node
, struct virtnet_info
,
1505 virtnet_clean_affinity(vi
, cpu
);
1509 static enum cpuhp_state virtionet_online
;
1511 static int virtnet_cpu_notif_add(struct virtnet_info
*vi
)
1515 ret
= cpuhp_state_add_instance_nocalls(virtionet_online
, &vi
->node
);
1518 ret
= cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD
,
1522 cpuhp_state_remove_instance_nocalls(virtionet_online
, &vi
->node
);
1526 static void virtnet_cpu_notif_remove(struct virtnet_info
*vi
)
1528 cpuhp_state_remove_instance_nocalls(virtionet_online
, &vi
->node
);
1529 cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD
,
1533 static void virtnet_get_ringparam(struct net_device
*dev
,
1534 struct ethtool_ringparam
*ring
)
1536 struct virtnet_info
*vi
= netdev_priv(dev
);
1538 ring
->rx_max_pending
= virtqueue_get_vring_size(vi
->rq
[0].vq
);
1539 ring
->tx_max_pending
= virtqueue_get_vring_size(vi
->sq
[0].vq
);
1540 ring
->rx_pending
= ring
->rx_max_pending
;
1541 ring
->tx_pending
= ring
->tx_max_pending
;
1545 static void virtnet_get_drvinfo(struct net_device
*dev
,
1546 struct ethtool_drvinfo
*info
)
1548 struct virtnet_info
*vi
= netdev_priv(dev
);
1549 struct virtio_device
*vdev
= vi
->vdev
;
1551 strlcpy(info
->driver
, KBUILD_MODNAME
, sizeof(info
->driver
));
1552 strlcpy(info
->version
, VIRTNET_DRIVER_VERSION
, sizeof(info
->version
));
1553 strlcpy(info
->bus_info
, virtio_bus_name(vdev
), sizeof(info
->bus_info
));
1557 /* TODO: Eliminate OOO packets during switching */
1558 static int virtnet_set_channels(struct net_device
*dev
,
1559 struct ethtool_channels
*channels
)
1561 struct virtnet_info
*vi
= netdev_priv(dev
);
1562 u16 queue_pairs
= channels
->combined_count
;
1565 /* We don't support separate rx/tx channels.
1566 * We don't allow setting 'other' channels.
1568 if (channels
->rx_count
|| channels
->tx_count
|| channels
->other_count
)
1571 if (queue_pairs
> vi
->max_queue_pairs
|| queue_pairs
== 0)
1574 /* For now we don't support modifying channels while XDP is loaded
1575 * also when XDP is loaded all RX queues have XDP programs so we only
1576 * need to check a single RX queue.
1578 if (vi
->rq
[0].xdp_prog
)
1582 err
= virtnet_set_queues(vi
, queue_pairs
);
1584 netif_set_real_num_tx_queues(dev
, queue_pairs
);
1585 netif_set_real_num_rx_queues(dev
, queue_pairs
);
1587 virtnet_set_affinity(vi
);
1594 static void virtnet_get_channels(struct net_device
*dev
,
1595 struct ethtool_channels
*channels
)
1597 struct virtnet_info
*vi
= netdev_priv(dev
);
1599 channels
->combined_count
= vi
->curr_queue_pairs
;
1600 channels
->max_combined
= vi
->max_queue_pairs
;
1601 channels
->max_other
= 0;
1602 channels
->rx_count
= 0;
1603 channels
->tx_count
= 0;
1604 channels
->other_count
= 0;
1607 /* Check if the user is trying to change anything besides speed/duplex */
1608 static bool virtnet_validate_ethtool_cmd(const struct ethtool_cmd
*cmd
)
1610 struct ethtool_cmd diff1
= *cmd
;
1611 struct ethtool_cmd diff2
= {};
1613 /* cmd is always set so we need to clear it, validate the port type
1614 * and also without autonegotiation we can ignore advertising
1616 ethtool_cmd_speed_set(&diff1
, 0);
1617 diff2
.port
= PORT_OTHER
;
1618 diff1
.advertising
= 0;
1622 return !memcmp(&diff1
, &diff2
, sizeof(diff1
));
1625 static int virtnet_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1627 struct virtnet_info
*vi
= netdev_priv(dev
);
1630 speed
= ethtool_cmd_speed(cmd
);
1631 /* don't allow custom speed and duplex */
1632 if (!ethtool_validate_speed(speed
) ||
1633 !ethtool_validate_duplex(cmd
->duplex
) ||
1634 !virtnet_validate_ethtool_cmd(cmd
))
1637 vi
->duplex
= cmd
->duplex
;
1642 static int virtnet_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1644 struct virtnet_info
*vi
= netdev_priv(dev
);
1646 ethtool_cmd_speed_set(cmd
, vi
->speed
);
1647 cmd
->duplex
= vi
->duplex
;
1648 cmd
->port
= PORT_OTHER
;
1653 static void virtnet_init_settings(struct net_device
*dev
)
1655 struct virtnet_info
*vi
= netdev_priv(dev
);
1657 vi
->speed
= SPEED_UNKNOWN
;
1658 vi
->duplex
= DUPLEX_UNKNOWN
;
1661 static const struct ethtool_ops virtnet_ethtool_ops
= {
1662 .get_drvinfo
= virtnet_get_drvinfo
,
1663 .get_link
= ethtool_op_get_link
,
1664 .get_ringparam
= virtnet_get_ringparam
,
1665 .set_channels
= virtnet_set_channels
,
1666 .get_channels
= virtnet_get_channels
,
1667 .get_ts_info
= ethtool_op_get_ts_info
,
1668 .get_settings
= virtnet_get_settings
,
1669 .set_settings
= virtnet_set_settings
,
1672 static int virtnet_xdp_set(struct net_device
*dev
, struct bpf_prog
*prog
)
1674 unsigned long int max_sz
= PAGE_SIZE
- sizeof(struct padded_vnet_hdr
);
1675 struct virtnet_info
*vi
= netdev_priv(dev
);
1676 struct bpf_prog
*old_prog
;
1677 u16 xdp_qp
= 0, curr_qp
;
1680 if (virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_GUEST_TSO4
) ||
1681 virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_GUEST_TSO6
)) {
1682 netdev_warn(dev
, "can't set XDP while host is implementing LRO, disable LRO first\n");
1686 if (vi
->mergeable_rx_bufs
&& !vi
->any_header_sg
) {
1687 netdev_warn(dev
, "XDP expects header/data in single page, any_header_sg required\n");
1691 if (dev
->mtu
> max_sz
) {
1692 netdev_warn(dev
, "XDP requires MTU less than %lu\n", max_sz
);
1696 curr_qp
= vi
->curr_queue_pairs
- vi
->xdp_queue_pairs
;
1698 xdp_qp
= nr_cpu_ids
;
1700 /* XDP requires extra queues for XDP_TX */
1701 if (curr_qp
+ xdp_qp
> vi
->max_queue_pairs
) {
1702 netdev_warn(dev
, "request %i queues but max is %i\n",
1703 curr_qp
+ xdp_qp
, vi
->max_queue_pairs
);
1707 err
= virtnet_set_queues(vi
, curr_qp
+ xdp_qp
);
1709 dev_warn(&dev
->dev
, "XDP Device queue allocation failure.\n");
1714 prog
= bpf_prog_add(prog
, vi
->max_queue_pairs
- 1);
1716 virtnet_set_queues(vi
, curr_qp
);
1717 return PTR_ERR(prog
);
1721 vi
->xdp_queue_pairs
= xdp_qp
;
1722 netif_set_real_num_rx_queues(dev
, curr_qp
+ xdp_qp
);
1724 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
1725 old_prog
= rtnl_dereference(vi
->rq
[i
].xdp_prog
);
1726 rcu_assign_pointer(vi
->rq
[i
].xdp_prog
, prog
);
1728 bpf_prog_put(old_prog
);
1734 static bool virtnet_xdp_query(struct net_device
*dev
)
1736 struct virtnet_info
*vi
= netdev_priv(dev
);
1739 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
1740 if (vi
->rq
[i
].xdp_prog
)
1746 static int virtnet_xdp(struct net_device
*dev
, struct netdev_xdp
*xdp
)
1748 switch (xdp
->command
) {
1749 case XDP_SETUP_PROG
:
1750 return virtnet_xdp_set(dev
, xdp
->prog
);
1751 case XDP_QUERY_PROG
:
1752 xdp
->prog_attached
= virtnet_xdp_query(dev
);
1759 static const struct net_device_ops virtnet_netdev
= {
1760 .ndo_open
= virtnet_open
,
1761 .ndo_stop
= virtnet_close
,
1762 .ndo_start_xmit
= start_xmit
,
1763 .ndo_validate_addr
= eth_validate_addr
,
1764 .ndo_set_mac_address
= virtnet_set_mac_address
,
1765 .ndo_set_rx_mode
= virtnet_set_rx_mode
,
1766 .ndo_get_stats64
= virtnet_stats
,
1767 .ndo_vlan_rx_add_vid
= virtnet_vlan_rx_add_vid
,
1768 .ndo_vlan_rx_kill_vid
= virtnet_vlan_rx_kill_vid
,
1769 #ifdef CONFIG_NET_POLL_CONTROLLER
1770 .ndo_poll_controller
= virtnet_netpoll
,
1772 #ifdef CONFIG_NET_RX_BUSY_POLL
1773 .ndo_busy_poll
= virtnet_busy_poll
,
1775 .ndo_xdp
= virtnet_xdp
,
1778 static void virtnet_config_changed_work(struct work_struct
*work
)
1780 struct virtnet_info
*vi
=
1781 container_of(work
, struct virtnet_info
, config_work
);
1784 if (virtio_cread_feature(vi
->vdev
, VIRTIO_NET_F_STATUS
,
1785 struct virtio_net_config
, status
, &v
) < 0)
1788 if (v
& VIRTIO_NET_S_ANNOUNCE
) {
1789 netdev_notify_peers(vi
->dev
);
1790 virtnet_ack_link_announce(vi
);
1793 /* Ignore unknown (future) status bits */
1794 v
&= VIRTIO_NET_S_LINK_UP
;
1796 if (vi
->status
== v
)
1801 if (vi
->status
& VIRTIO_NET_S_LINK_UP
) {
1802 netif_carrier_on(vi
->dev
);
1803 netif_tx_wake_all_queues(vi
->dev
);
1805 netif_carrier_off(vi
->dev
);
1806 netif_tx_stop_all_queues(vi
->dev
);
1810 static void virtnet_config_changed(struct virtio_device
*vdev
)
1812 struct virtnet_info
*vi
= vdev
->priv
;
1814 schedule_work(&vi
->config_work
);
1817 static void virtnet_free_queues(struct virtnet_info
*vi
)
1821 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
1822 napi_hash_del(&vi
->rq
[i
].napi
);
1823 netif_napi_del(&vi
->rq
[i
].napi
);
1826 /* We called napi_hash_del() before netif_napi_del(),
1827 * we need to respect an RCU grace period before freeing vi->rq
1835 static void free_receive_bufs(struct virtnet_info
*vi
)
1837 struct bpf_prog
*old_prog
;
1841 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
1842 while (vi
->rq
[i
].pages
)
1843 __free_pages(get_a_page(&vi
->rq
[i
], GFP_KERNEL
), 0);
1845 old_prog
= rtnl_dereference(vi
->rq
[i
].xdp_prog
);
1846 RCU_INIT_POINTER(vi
->rq
[i
].xdp_prog
, NULL
);
1848 bpf_prog_put(old_prog
);
1853 static void free_receive_page_frags(struct virtnet_info
*vi
)
1856 for (i
= 0; i
< vi
->max_queue_pairs
; i
++)
1857 if (vi
->rq
[i
].alloc_frag
.page
)
1858 put_page(vi
->rq
[i
].alloc_frag
.page
);
1861 static bool is_xdp_queue(struct virtnet_info
*vi
, int q
)
1863 if (q
< (vi
->curr_queue_pairs
- vi
->xdp_queue_pairs
))
1865 else if (q
< vi
->curr_queue_pairs
)
1871 static void free_unused_bufs(struct virtnet_info
*vi
)
1876 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
1877 struct virtqueue
*vq
= vi
->sq
[i
].vq
;
1878 while ((buf
= virtqueue_detach_unused_buf(vq
)) != NULL
) {
1879 if (!is_xdp_queue(vi
, i
))
1882 put_page(virt_to_head_page(buf
));
1886 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
1887 struct virtqueue
*vq
= vi
->rq
[i
].vq
;
1889 while ((buf
= virtqueue_detach_unused_buf(vq
)) != NULL
) {
1890 if (vi
->mergeable_rx_bufs
) {
1891 unsigned long ctx
= (unsigned long)buf
;
1892 void *base
= mergeable_ctx_to_buf_address(ctx
);
1893 put_page(virt_to_head_page(base
));
1894 } else if (vi
->big_packets
) {
1895 give_pages(&vi
->rq
[i
], buf
);
1903 static void virtnet_del_vqs(struct virtnet_info
*vi
)
1905 struct virtio_device
*vdev
= vi
->vdev
;
1907 virtnet_clean_affinity(vi
, -1);
1909 vdev
->config
->del_vqs(vdev
);
1911 virtnet_free_queues(vi
);
1914 static int virtnet_find_vqs(struct virtnet_info
*vi
)
1916 vq_callback_t
**callbacks
;
1917 struct virtqueue
**vqs
;
1922 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
1923 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
1924 * possible control vq.
1926 total_vqs
= vi
->max_queue_pairs
* 2 +
1927 virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_CTRL_VQ
);
1929 /* Allocate space for find_vqs parameters */
1930 vqs
= kzalloc(total_vqs
* sizeof(*vqs
), GFP_KERNEL
);
1933 callbacks
= kmalloc(total_vqs
* sizeof(*callbacks
), GFP_KERNEL
);
1936 names
= kmalloc(total_vqs
* sizeof(*names
), GFP_KERNEL
);
1940 /* Parameters for control virtqueue, if any */
1942 callbacks
[total_vqs
- 1] = NULL
;
1943 names
[total_vqs
- 1] = "control";
1946 /* Allocate/initialize parameters for send/receive virtqueues */
1947 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
1948 callbacks
[rxq2vq(i
)] = skb_recv_done
;
1949 callbacks
[txq2vq(i
)] = skb_xmit_done
;
1950 sprintf(vi
->rq
[i
].name
, "input.%d", i
);
1951 sprintf(vi
->sq
[i
].name
, "output.%d", i
);
1952 names
[rxq2vq(i
)] = vi
->rq
[i
].name
;
1953 names
[txq2vq(i
)] = vi
->sq
[i
].name
;
1956 ret
= vi
->vdev
->config
->find_vqs(vi
->vdev
, total_vqs
, vqs
, callbacks
,
1962 vi
->cvq
= vqs
[total_vqs
- 1];
1963 if (virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_CTRL_VLAN
))
1964 vi
->dev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
1967 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
1968 vi
->rq
[i
].vq
= vqs
[rxq2vq(i
)];
1969 vi
->sq
[i
].vq
= vqs
[txq2vq(i
)];
1988 static int virtnet_alloc_queues(struct virtnet_info
*vi
)
1992 vi
->sq
= kzalloc(sizeof(*vi
->sq
) * vi
->max_queue_pairs
, GFP_KERNEL
);
1995 vi
->rq
= kzalloc(sizeof(*vi
->rq
) * vi
->max_queue_pairs
, GFP_KERNEL
);
1999 INIT_DELAYED_WORK(&vi
->refill
, refill_work
);
2000 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
2001 vi
->rq
[i
].pages
= NULL
;
2002 netif_napi_add(vi
->dev
, &vi
->rq
[i
].napi
, virtnet_poll
,
2005 sg_init_table(vi
->rq
[i
].sg
, ARRAY_SIZE(vi
->rq
[i
].sg
));
2006 ewma_pkt_len_init(&vi
->rq
[i
].mrg_avg_pkt_len
);
2007 sg_init_table(vi
->sq
[i
].sg
, ARRAY_SIZE(vi
->sq
[i
].sg
));
2018 static int init_vqs(struct virtnet_info
*vi
)
2022 /* Allocate send & receive queues */
2023 ret
= virtnet_alloc_queues(vi
);
2027 ret
= virtnet_find_vqs(vi
);
2032 virtnet_set_affinity(vi
);
2038 virtnet_free_queues(vi
);
2044 static ssize_t
mergeable_rx_buffer_size_show(struct netdev_rx_queue
*queue
,
2045 struct rx_queue_attribute
*attribute
, char *buf
)
2047 struct virtnet_info
*vi
= netdev_priv(queue
->dev
);
2048 unsigned int queue_index
= get_netdev_rx_queue_index(queue
);
2049 struct ewma_pkt_len
*avg
;
2051 BUG_ON(queue_index
>= vi
->max_queue_pairs
);
2052 avg
= &vi
->rq
[queue_index
].mrg_avg_pkt_len
;
2053 return sprintf(buf
, "%u\n", get_mergeable_buf_len(avg
));
2056 static struct rx_queue_attribute mergeable_rx_buffer_size_attribute
=
2057 __ATTR_RO(mergeable_rx_buffer_size
);
2059 static struct attribute
*virtio_net_mrg_rx_attrs
[] = {
2060 &mergeable_rx_buffer_size_attribute
.attr
,
2064 static const struct attribute_group virtio_net_mrg_rx_group
= {
2065 .name
= "virtio_net",
2066 .attrs
= virtio_net_mrg_rx_attrs
2070 static bool virtnet_fail_on_feature(struct virtio_device
*vdev
,
2072 const char *fname
, const char *dname
)
2074 if (!virtio_has_feature(vdev
, fbit
))
2077 dev_err(&vdev
->dev
, "device advertises feature %s but not %s",
2083 #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \
2084 virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
2086 static bool virtnet_validate_features(struct virtio_device
*vdev
)
2088 if (!virtio_has_feature(vdev
, VIRTIO_NET_F_CTRL_VQ
) &&
2089 (VIRTNET_FAIL_ON(vdev
, VIRTIO_NET_F_CTRL_RX
,
2090 "VIRTIO_NET_F_CTRL_VQ") ||
2091 VIRTNET_FAIL_ON(vdev
, VIRTIO_NET_F_CTRL_VLAN
,
2092 "VIRTIO_NET_F_CTRL_VQ") ||
2093 VIRTNET_FAIL_ON(vdev
, VIRTIO_NET_F_GUEST_ANNOUNCE
,
2094 "VIRTIO_NET_F_CTRL_VQ") ||
2095 VIRTNET_FAIL_ON(vdev
, VIRTIO_NET_F_MQ
, "VIRTIO_NET_F_CTRL_VQ") ||
2096 VIRTNET_FAIL_ON(vdev
, VIRTIO_NET_F_CTRL_MAC_ADDR
,
2097 "VIRTIO_NET_F_CTRL_VQ"))) {
2104 #define MIN_MTU ETH_MIN_MTU
2105 #define MAX_MTU ETH_MAX_MTU
2107 static int virtnet_probe(struct virtio_device
*vdev
)
2110 struct net_device
*dev
;
2111 struct virtnet_info
*vi
;
2112 u16 max_queue_pairs
;
2115 if (!vdev
->config
->get
) {
2116 dev_err(&vdev
->dev
, "%s failure: config access disabled\n",
2121 if (!virtnet_validate_features(vdev
))
2124 /* Find if host supports multiqueue virtio_net device */
2125 err
= virtio_cread_feature(vdev
, VIRTIO_NET_F_MQ
,
2126 struct virtio_net_config
,
2127 max_virtqueue_pairs
, &max_queue_pairs
);
2129 /* We need at least 2 queue's */
2130 if (err
|| max_queue_pairs
< VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN
||
2131 max_queue_pairs
> VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX
||
2132 !virtio_has_feature(vdev
, VIRTIO_NET_F_CTRL_VQ
))
2133 max_queue_pairs
= 1;
2135 /* Allocate ourselves a network device with room for our info */
2136 dev
= alloc_etherdev_mq(sizeof(struct virtnet_info
), max_queue_pairs
);
2140 /* Set up network device as normal. */
2141 dev
->priv_flags
|= IFF_UNICAST_FLT
| IFF_LIVE_ADDR_CHANGE
;
2142 dev
->netdev_ops
= &virtnet_netdev
;
2143 dev
->features
= NETIF_F_HIGHDMA
;
2145 dev
->ethtool_ops
= &virtnet_ethtool_ops
;
2146 SET_NETDEV_DEV(dev
, &vdev
->dev
);
2148 /* Do we support "hardware" checksums? */
2149 if (virtio_has_feature(vdev
, VIRTIO_NET_F_CSUM
)) {
2150 /* This opens up the world of extra features. */
2151 dev
->hw_features
|= NETIF_F_HW_CSUM
| NETIF_F_SG
;
2153 dev
->features
|= NETIF_F_HW_CSUM
| NETIF_F_SG
;
2155 if (virtio_has_feature(vdev
, VIRTIO_NET_F_GSO
)) {
2156 dev
->hw_features
|= NETIF_F_TSO
| NETIF_F_UFO
2157 | NETIF_F_TSO_ECN
| NETIF_F_TSO6
;
2159 /* Individual feature bits: what can host handle? */
2160 if (virtio_has_feature(vdev
, VIRTIO_NET_F_HOST_TSO4
))
2161 dev
->hw_features
|= NETIF_F_TSO
;
2162 if (virtio_has_feature(vdev
, VIRTIO_NET_F_HOST_TSO6
))
2163 dev
->hw_features
|= NETIF_F_TSO6
;
2164 if (virtio_has_feature(vdev
, VIRTIO_NET_F_HOST_ECN
))
2165 dev
->hw_features
|= NETIF_F_TSO_ECN
;
2166 if (virtio_has_feature(vdev
, VIRTIO_NET_F_HOST_UFO
))
2167 dev
->hw_features
|= NETIF_F_UFO
;
2169 dev
->features
|= NETIF_F_GSO_ROBUST
;
2172 dev
->features
|= dev
->hw_features
& (NETIF_F_ALL_TSO
|NETIF_F_UFO
);
2173 /* (!csum && gso) case will be fixed by register_netdev() */
2175 if (virtio_has_feature(vdev
, VIRTIO_NET_F_GUEST_CSUM
))
2176 dev
->features
|= NETIF_F_RXCSUM
;
2178 dev
->vlan_features
= dev
->features
;
2180 /* MTU range: 68 - 65535 */
2181 dev
->min_mtu
= MIN_MTU
;
2182 dev
->max_mtu
= MAX_MTU
;
2184 /* Configuration may specify what MAC to use. Otherwise random. */
2185 if (virtio_has_feature(vdev
, VIRTIO_NET_F_MAC
))
2186 virtio_cread_bytes(vdev
,
2187 offsetof(struct virtio_net_config
, mac
),
2188 dev
->dev_addr
, dev
->addr_len
);
2190 eth_hw_addr_random(dev
);
2192 /* Set up our device-specific information */
2193 vi
= netdev_priv(dev
);
2197 vi
->stats
= alloc_percpu(struct virtnet_stats
);
2199 if (vi
->stats
== NULL
)
2202 for_each_possible_cpu(i
) {
2203 struct virtnet_stats
*virtnet_stats
;
2204 virtnet_stats
= per_cpu_ptr(vi
->stats
, i
);
2205 u64_stats_init(&virtnet_stats
->tx_syncp
);
2206 u64_stats_init(&virtnet_stats
->rx_syncp
);
2209 INIT_WORK(&vi
->config_work
, virtnet_config_changed_work
);
2211 /* If we can receive ANY GSO packets, we must allocate large ones. */
2212 if (virtio_has_feature(vdev
, VIRTIO_NET_F_GUEST_TSO4
) ||
2213 virtio_has_feature(vdev
, VIRTIO_NET_F_GUEST_TSO6
) ||
2214 virtio_has_feature(vdev
, VIRTIO_NET_F_GUEST_ECN
) ||
2215 virtio_has_feature(vdev
, VIRTIO_NET_F_GUEST_UFO
))
2216 vi
->big_packets
= true;
2218 if (virtio_has_feature(vdev
, VIRTIO_NET_F_MRG_RXBUF
))
2219 vi
->mergeable_rx_bufs
= true;
2221 if (virtio_has_feature(vdev
, VIRTIO_NET_F_MRG_RXBUF
) ||
2222 virtio_has_feature(vdev
, VIRTIO_F_VERSION_1
))
2223 vi
->hdr_len
= sizeof(struct virtio_net_hdr_mrg_rxbuf
);
2225 vi
->hdr_len
= sizeof(struct virtio_net_hdr
);
2227 if (virtio_has_feature(vdev
, VIRTIO_F_ANY_LAYOUT
) ||
2228 virtio_has_feature(vdev
, VIRTIO_F_VERSION_1
))
2229 vi
->any_header_sg
= true;
2231 if (virtio_has_feature(vdev
, VIRTIO_NET_F_CTRL_VQ
))
2234 if (virtio_has_feature(vdev
, VIRTIO_NET_F_MTU
)) {
2235 mtu
= virtio_cread16(vdev
,
2236 offsetof(struct virtio_net_config
,
2238 if (mtu
< dev
->min_mtu
) {
2239 __virtio_clear_bit(vdev
, VIRTIO_NET_F_MTU
);
2246 if (vi
->any_header_sg
)
2247 dev
->needed_headroom
= vi
->hdr_len
;
2249 /* Enable multiqueue by default */
2250 if (num_online_cpus() >= max_queue_pairs
)
2251 vi
->curr_queue_pairs
= max_queue_pairs
;
2253 vi
->curr_queue_pairs
= num_online_cpus();
2254 vi
->max_queue_pairs
= max_queue_pairs
;
2256 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
2262 if (vi
->mergeable_rx_bufs
)
2263 dev
->sysfs_rx_queue_group
= &virtio_net_mrg_rx_group
;
2265 netif_set_real_num_tx_queues(dev
, vi
->curr_queue_pairs
);
2266 netif_set_real_num_rx_queues(dev
, vi
->curr_queue_pairs
);
2268 virtnet_init_settings(dev
);
2270 err
= register_netdev(dev
);
2272 pr_debug("virtio_net: registering device failed\n");
2276 virtio_device_ready(vdev
);
2278 err
= virtnet_cpu_notif_add(vi
);
2280 pr_debug("virtio_net: registering cpu notifier failed\n");
2281 goto free_unregister_netdev
;
2285 virtnet_set_queues(vi
, vi
->curr_queue_pairs
);
2288 /* Assume link up if device can't report link status,
2289 otherwise get link status from config. */
2290 if (virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_STATUS
)) {
2291 netif_carrier_off(dev
);
2292 schedule_work(&vi
->config_work
);
2294 vi
->status
= VIRTIO_NET_S_LINK_UP
;
2295 netif_carrier_on(dev
);
2298 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
2299 dev
->name
, max_queue_pairs
);
2303 free_unregister_netdev
:
2304 vi
->vdev
->config
->reset(vdev
);
2306 unregister_netdev(dev
);
2308 cancel_delayed_work_sync(&vi
->refill
);
2309 free_receive_page_frags(vi
);
2310 virtnet_del_vqs(vi
);
2312 free_percpu(vi
->stats
);
2318 static void remove_vq_common(struct virtnet_info
*vi
)
2320 vi
->vdev
->config
->reset(vi
->vdev
);
2322 /* Free unused buffers in both send and recv, if any. */
2323 free_unused_bufs(vi
);
2325 free_receive_bufs(vi
);
2327 free_receive_page_frags(vi
);
2329 virtnet_del_vqs(vi
);
2332 static void virtnet_remove(struct virtio_device
*vdev
)
2334 struct virtnet_info
*vi
= vdev
->priv
;
2336 virtnet_cpu_notif_remove(vi
);
2338 /* Make sure no work handler is accessing the device. */
2339 flush_work(&vi
->config_work
);
2341 unregister_netdev(vi
->dev
);
2343 remove_vq_common(vi
);
2345 free_percpu(vi
->stats
);
2346 free_netdev(vi
->dev
);
2349 #ifdef CONFIG_PM_SLEEP
2350 static int virtnet_freeze(struct virtio_device
*vdev
)
2352 struct virtnet_info
*vi
= vdev
->priv
;
2355 virtnet_cpu_notif_remove(vi
);
2357 /* Make sure no work handler is accessing the device */
2358 flush_work(&vi
->config_work
);
2360 netif_device_detach(vi
->dev
);
2361 cancel_delayed_work_sync(&vi
->refill
);
2363 if (netif_running(vi
->dev
)) {
2364 for (i
= 0; i
< vi
->max_queue_pairs
; i
++)
2365 napi_disable(&vi
->rq
[i
].napi
);
2368 remove_vq_common(vi
);
2373 static int virtnet_restore(struct virtio_device
*vdev
)
2375 struct virtnet_info
*vi
= vdev
->priv
;
2382 virtio_device_ready(vdev
);
2384 if (netif_running(vi
->dev
)) {
2385 for (i
= 0; i
< vi
->curr_queue_pairs
; i
++)
2386 if (!try_fill_recv(vi
, &vi
->rq
[i
], GFP_KERNEL
))
2387 schedule_delayed_work(&vi
->refill
, 0);
2389 for (i
= 0; i
< vi
->max_queue_pairs
; i
++)
2390 virtnet_napi_enable(&vi
->rq
[i
]);
2393 netif_device_attach(vi
->dev
);
2396 virtnet_set_queues(vi
, vi
->curr_queue_pairs
);
2399 err
= virtnet_cpu_notif_add(vi
);
2407 static struct virtio_device_id id_table
[] = {
2408 { VIRTIO_ID_NET
, VIRTIO_DEV_ANY_ID
},
2412 #define VIRTNET_FEATURES \
2413 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
2415 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
2416 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
2417 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
2418 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
2419 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
2420 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
2421 VIRTIO_NET_F_CTRL_MAC_ADDR, \
2424 static unsigned int features
[] = {
2428 static unsigned int features_legacy
[] = {
2431 VIRTIO_F_ANY_LAYOUT
,
2434 static struct virtio_driver virtio_net_driver
= {
2435 .feature_table
= features
,
2436 .feature_table_size
= ARRAY_SIZE(features
),
2437 .feature_table_legacy
= features_legacy
,
2438 .feature_table_size_legacy
= ARRAY_SIZE(features_legacy
),
2439 .driver
.name
= KBUILD_MODNAME
,
2440 .driver
.owner
= THIS_MODULE
,
2441 .id_table
= id_table
,
2442 .probe
= virtnet_probe
,
2443 .remove
= virtnet_remove
,
2444 .config_changed
= virtnet_config_changed
,
2445 #ifdef CONFIG_PM_SLEEP
2446 .freeze
= virtnet_freeze
,
2447 .restore
= virtnet_restore
,
2451 static __init
int virtio_net_driver_init(void)
2455 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
, "AP_VIRT_NET_ONLINE",
2457 virtnet_cpu_down_prep
);
2460 virtionet_online
= ret
;
2461 ret
= cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD
, "VIRT_NET_DEAD",
2462 NULL
, virtnet_cpu_dead
);
2466 ret
= register_virtio_driver(&virtio_net_driver
);
2471 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD
);
2473 cpuhp_remove_multi_state(virtionet_online
);
2477 module_init(virtio_net_driver_init
);
2479 static __exit
void virtio_net_driver_exit(void)
2481 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD
);
2482 cpuhp_remove_multi_state(virtionet_online
);
2483 unregister_virtio_driver(&virtio_net_driver
);
2485 module_exit(virtio_net_driver_exit
);
2487 MODULE_DEVICE_TABLE(virtio
, id_table
);
2488 MODULE_DESCRIPTION("Virtio network driver");
2489 MODULE_LICENSE("GPL");