]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/virtio_net.c
locking: Move the percpu-rwsem code to kernel/locking/
[mirror_ubuntu-bionic-kernel.git] / drivers / net / virtio_net.c
CommitLineData
48925e37 1/* A network driver using virtio.
296f96fc
RR
2 *
3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19//#define DEBUG
20#include <linux/netdevice.h>
21#include <linux/etherdevice.h>
a9ea3fc6 22#include <linux/ethtool.h>
296f96fc
RR
23#include <linux/module.h>
24#include <linux/virtio.h>
25#include <linux/virtio_net.h>
26#include <linux/scatterlist.h>
e918085a 27#include <linux/if_vlan.h>
5a0e3ad6 28#include <linux/slab.h>
8de4b2f3 29#include <linux/cpu.h>
296f96fc 30
d34710e3 31static int napi_weight = NAPI_POLL_WEIGHT;
6c0cd7c0
DL
32module_param(napi_weight, int, 0444);
33
eb939922 34static bool csum = true, gso = true;
34a48579
RR
35module_param(csum, bool, 0444);
36module_param(gso, bool, 0444);
37
296f96fc 38/* FIXME: MTU in config. */
e918085a 39#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
3f2c31d9 40#define GOOD_COPY_LEN 128
296f96fc 41
66846048 42#define VIRTNET_DRIVER_VERSION "1.0.0"
2a41f71d 43
3fa2a1df 44struct virtnet_stats {
83a27052
ED
45 struct u64_stats_sync tx_syncp;
46 struct u64_stats_sync rx_syncp;
3fa2a1df 47 u64 tx_bytes;
48 u64 tx_packets;
49
50 u64 rx_bytes;
51 u64 rx_packets;
52};
53
e9d7417b
JW
54/* Internal representation of a send virtqueue */
55struct send_queue {
56 /* Virtqueue associated with this send _queue */
57 struct virtqueue *vq;
58
59 /* TX: fragments + linear part + virtio header */
60 struct scatterlist sg[MAX_SKB_FRAGS + 2];
986a4f4d
JW
61
62 /* Name of the send queue: output.$index */
63 char name[40];
e9d7417b
JW
64};
65
66/* Internal representation of a receive virtqueue */
67struct receive_queue {
68 /* Virtqueue associated with this receive_queue */
69 struct virtqueue *vq;
70
296f96fc
RR
71 struct napi_struct napi;
72
73 /* Number of input buffers, and max we've ever had. */
74 unsigned int num, max;
75
e9d7417b
JW
76 /* Chain pages by the private ptr. */
77 struct page *pages;
78
79 /* RX: fragments + linear part + virtio header */
80 struct scatterlist sg[MAX_SKB_FRAGS + 2];
986a4f4d
JW
81
82 /* Name of this receive queue: input.$index */
83 char name[40];
e9d7417b
JW
84};
85
86struct virtnet_info {
87 struct virtio_device *vdev;
88 struct virtqueue *cvq;
89 struct net_device *dev;
986a4f4d
JW
90 struct send_queue *sq;
91 struct receive_queue *rq;
e9d7417b
JW
92 unsigned int status;
93
986a4f4d
JW
94 /* Max # of queue pairs supported by the device */
95 u16 max_queue_pairs;
96
97 /* # of queue pairs currently used by the driver */
98 u16 curr_queue_pairs;
99
97402b96
HX
100 /* I like... big packets and I cannot lie! */
101 bool big_packets;
102
3f2c31d9
MM
103 /* Host will merge rx buffers for big packets (shake it! shake it!) */
104 bool mergeable_rx_bufs;
105
986a4f4d
JW
106 /* Has control virtqueue */
107 bool has_cvq;
108
e7428e95
MT
109 /* Host can handle any s/g split between our header and packet data */
110 bool any_header_sg;
111
586d17c5
JW
112 /* enable config space updates */
113 bool config_enable;
114
3fa2a1df 115 /* Active statistics */
116 struct virtnet_stats __percpu *stats;
117
3161e453
RR
118 /* Work struct for refilling if we run low on memory. */
119 struct delayed_work refill;
120
586d17c5
JW
121 /* Work struct for config space updates */
122 struct work_struct config_work;
123
124 /* Lock for config space updates */
125 struct mutex config_lock;
986a4f4d
JW
126
127 /* Does the affinity hint is set for virtqueues? */
128 bool affinity_hint_set;
47be2479
WG
129
130 /* Per-cpu variable to show the mapping from CPU to virtqueue */
131 int __percpu *vq_index;
8de4b2f3
WG
132
133 /* CPU hot plug notifier */
134 struct notifier_block nb;
296f96fc
RR
135};
136
b3f24698
RR
137struct skb_vnet_hdr {
138 union {
139 struct virtio_net_hdr hdr;
140 struct virtio_net_hdr_mrg_rxbuf mhdr;
141 };
142};
143
9ab86bbc
SM
144struct padded_vnet_hdr {
145 struct virtio_net_hdr hdr;
146 /*
147 * virtio_net_hdr should be in a separated sg buffer because of a
148 * QEMU bug, and data sg buffer shares same page with this header sg.
149 * This padding makes next sg 16 byte aligned after virtio_net_hdr.
150 */
151 char padding[6];
152};
153
986a4f4d
JW
154/* Converting between virtqueue no. and kernel tx/rx queue no.
155 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
156 */
157static int vq2txq(struct virtqueue *vq)
158{
9d0ca6ed 159 return (vq->index - 1) / 2;
986a4f4d
JW
160}
161
162static int txq2vq(int txq)
163{
164 return txq * 2 + 1;
165}
166
167static int vq2rxq(struct virtqueue *vq)
168{
9d0ca6ed 169 return vq->index / 2;
986a4f4d
JW
170}
171
172static int rxq2vq(int rxq)
173{
174 return rxq * 2;
175}
176
b3f24698 177static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
296f96fc 178{
b3f24698 179 return (struct skb_vnet_hdr *)skb->cb;
296f96fc
RR
180}
181
9ab86bbc
SM
182/*
183 * private is used to chain pages for big packets, put the whole
184 * most recent used list in the beginning for reuse
185 */
e9d7417b 186static void give_pages(struct receive_queue *rq, struct page *page)
0a888fd1 187{
9ab86bbc 188 struct page *end;
0a888fd1 189
e9d7417b 190 /* Find end of list, sew whole thing into vi->rq.pages. */
9ab86bbc 191 for (end = page; end->private; end = (struct page *)end->private);
e9d7417b
JW
192 end->private = (unsigned long)rq->pages;
193 rq->pages = page;
0a888fd1
MM
194}
195
e9d7417b 196static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
fb6813f4 197{
e9d7417b 198 struct page *p = rq->pages;
fb6813f4 199
9ab86bbc 200 if (p) {
e9d7417b 201 rq->pages = (struct page *)p->private;
9ab86bbc
SM
202 /* clear private here, it is used to chain pages */
203 p->private = 0;
204 } else
fb6813f4
RR
205 p = alloc_page(gfp_mask);
206 return p;
207}
208
e9d7417b 209static void skb_xmit_done(struct virtqueue *vq)
296f96fc 210{
e9d7417b 211 struct virtnet_info *vi = vq->vdev->priv;
296f96fc 212
2cb9c6ba 213 /* Suppress further interrupts. */
e9d7417b 214 virtqueue_disable_cb(vq);
11a3a154 215
363f1514 216 /* We were probably waiting for more output buffers. */
986a4f4d 217 netif_wake_subqueue(vi->dev, vq2txq(vq));
296f96fc
RR
218}
219
9ab86bbc
SM
220static void set_skb_frag(struct sk_buff *skb, struct page *page,
221 unsigned int offset, unsigned int *len)
296f96fc 222{
8a59a7b9 223 int size = min((unsigned)PAGE_SIZE - offset, *len);
9ab86bbc 224 int i = skb_shinfo(skb)->nr_frags;
9ab86bbc 225
8a59a7b9 226 __skb_fill_page_desc(skb, i, page, offset, size);
9ab86bbc 227
8a59a7b9
KK
228 skb->data_len += size;
229 skb->len += size;
4b727361 230 skb->truesize += PAGE_SIZE;
9ab86bbc 231 skb_shinfo(skb)->nr_frags++;
c9af6db4 232 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
8a59a7b9 233 *len -= size;
9ab86bbc 234}
23cde76d 235
3464645a 236/* Called from bottom half context */
e9d7417b 237static struct sk_buff *page_to_skb(struct receive_queue *rq,
9ab86bbc
SM
238 struct page *page, unsigned int len)
239{
e9d7417b 240 struct virtnet_info *vi = rq->vq->vdev->priv;
9ab86bbc
SM
241 struct sk_buff *skb;
242 struct skb_vnet_hdr *hdr;
243 unsigned int copy, hdr_len, offset;
244 char *p;
fb6813f4 245
9ab86bbc 246 p = page_address(page);
3f2c31d9 247
9ab86bbc
SM
248 /* copy small packet so we can reuse these pages for small data */
249 skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
250 if (unlikely(!skb))
251 return NULL;
3f2c31d9 252
9ab86bbc 253 hdr = skb_vnet_hdr(skb);
3f2c31d9 254
9ab86bbc
SM
255 if (vi->mergeable_rx_bufs) {
256 hdr_len = sizeof hdr->mhdr;
257 offset = hdr_len;
258 } else {
259 hdr_len = sizeof hdr->hdr;
260 offset = sizeof(struct padded_vnet_hdr);
261 }
3f2c31d9 262
9ab86bbc 263 memcpy(hdr, p, hdr_len);
3f2c31d9 264
9ab86bbc
SM
265 len -= hdr_len;
266 p += offset;
3f2c31d9 267
9ab86bbc
SM
268 copy = len;
269 if (copy > skb_tailroom(skb))
270 copy = skb_tailroom(skb);
271 memcpy(skb_put(skb, copy), p, copy);
3f2c31d9 272
9ab86bbc
SM
273 len -= copy;
274 offset += copy;
3f2c31d9 275
e878d78b
SL
276 /*
277 * Verify that we can indeed put this data into a skb.
278 * This is here to handle cases when the device erroneously
279 * tries to receive more than is possible. This is usually
280 * the case of a broken device.
281 */
282 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
be443899 283 net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
e878d78b
SL
284 dev_kfree_skb(skb);
285 return NULL;
286 }
287
9ab86bbc
SM
288 while (len) {
289 set_skb_frag(skb, page, offset, &len);
290 page = (struct page *)page->private;
291 offset = 0;
292 }
3f2c31d9 293
9ab86bbc 294 if (page)
e9d7417b 295 give_pages(rq, page);
3f2c31d9 296
9ab86bbc
SM
297 return skb;
298}
3f2c31d9 299
e9d7417b 300static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb)
9ab86bbc
SM
301{
302 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
303 struct page *page;
304 int num_buf, i, len;
305
306 num_buf = hdr->mhdr.num_buffers;
307 while (--num_buf) {
308 i = skb_shinfo(skb)->nr_frags;
309 if (i >= MAX_SKB_FRAGS) {
310 pr_debug("%s: packet too long\n", skb->dev->name);
311 skb->dev->stats.rx_length_errors++;
312 return -EINVAL;
313 }
e9d7417b 314 page = virtqueue_get_buf(rq->vq, &len);
9ab86bbc
SM
315 if (!page) {
316 pr_debug("%s: rx error: %d buffers missing\n",
317 skb->dev->name, hdr->mhdr.num_buffers);
318 skb->dev->stats.rx_length_errors++;
319 return -EINVAL;
3f2c31d9 320 }
3fa2a1df 321
9ab86bbc
SM
322 if (len > PAGE_SIZE)
323 len = PAGE_SIZE;
324
325 set_skb_frag(skb, page, 0, &len);
326
e9d7417b 327 --rq->num;
9ab86bbc
SM
328 }
329 return 0;
330}
331
e9d7417b 332static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
9ab86bbc 333{
e9d7417b
JW
334 struct virtnet_info *vi = rq->vq->vdev->priv;
335 struct net_device *dev = vi->dev;
58472a76 336 struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
9ab86bbc
SM
337 struct sk_buff *skb;
338 struct page *page;
339 struct skb_vnet_hdr *hdr;
3f2c31d9 340
9ab86bbc
SM
341 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
342 pr_debug("%s: short packet %i\n", dev->name, len);
343 dev->stats.rx_length_errors++;
344 if (vi->mergeable_rx_bufs || vi->big_packets)
e9d7417b 345 give_pages(rq, buf);
9ab86bbc
SM
346 else
347 dev_kfree_skb(buf);
348 return;
349 }
3f2c31d9 350
9ab86bbc
SM
351 if (!vi->mergeable_rx_bufs && !vi->big_packets) {
352 skb = buf;
353 len -= sizeof(struct virtio_net_hdr);
354 skb_trim(skb, len);
355 } else {
356 page = buf;
e9d7417b 357 skb = page_to_skb(rq, page, len);
9ab86bbc 358 if (unlikely(!skb)) {
3f2c31d9 359 dev->stats.rx_dropped++;
e9d7417b 360 give_pages(rq, page);
9ab86bbc 361 return;
3f2c31d9 362 }
9ab86bbc 363 if (vi->mergeable_rx_bufs)
e9d7417b 364 if (receive_mergeable(rq, skb)) {
9ab86bbc
SM
365 dev_kfree_skb(skb);
366 return;
367 }
97402b96 368 }
3f2c31d9 369
9ab86bbc 370 hdr = skb_vnet_hdr(skb);
3fa2a1df 371
83a27052 372 u64_stats_update_begin(&stats->rx_syncp);
3fa2a1df 373 stats->rx_bytes += skb->len;
374 stats->rx_packets++;
83a27052 375 u64_stats_update_end(&stats->rx_syncp);
296f96fc 376
b3f24698 377 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
296f96fc 378 pr_debug("Needs csum!\n");
b3f24698
RR
379 if (!skb_partial_csum_set(skb,
380 hdr->hdr.csum_start,
381 hdr->hdr.csum_offset))
296f96fc 382 goto frame_err;
10a8d94a
JW
383 } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
384 skb->ip_summed = CHECKSUM_UNNECESSARY;
296f96fc
RR
385 }
386
23cde76d
MM
387 skb->protocol = eth_type_trans(skb, dev);
388 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
389 ntohs(skb->protocol), skb->len, skb->pkt_type);
390
b3f24698 391 if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
296f96fc 392 pr_debug("GSO!\n");
b3f24698 393 switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
296f96fc 394 case VIRTIO_NET_HDR_GSO_TCPV4:
c9af6db4 395 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
296f96fc 396 break;
296f96fc 397 case VIRTIO_NET_HDR_GSO_UDP:
c9af6db4 398 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
296f96fc
RR
399 break;
400 case VIRTIO_NET_HDR_GSO_TCPV6:
c9af6db4 401 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
296f96fc
RR
402 break;
403 default:
be443899
AW
404 net_warn_ratelimited("%s: bad gso type %u.\n",
405 dev->name, hdr->hdr.gso_type);
296f96fc
RR
406 goto frame_err;
407 }
408
b3f24698 409 if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
c9af6db4 410 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
34a48579 411
b3f24698 412 skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
296f96fc 413 if (skb_shinfo(skb)->gso_size == 0) {
be443899 414 net_warn_ratelimited("%s: zero gso size.\n", dev->name);
296f96fc
RR
415 goto frame_err;
416 }
417
418 /* Header must be checked, and gso_segs computed. */
419 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
420 skb_shinfo(skb)->gso_segs = 0;
421 }
422
423 netif_receive_skb(skb);
424 return;
425
426frame_err:
427 dev->stats.rx_frame_errors++;
296f96fc
RR
428 dev_kfree_skb(skb);
429}
430
e9d7417b 431static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
296f96fc 432{
e9d7417b 433 struct virtnet_info *vi = rq->vq->vdev->priv;
296f96fc 434 struct sk_buff *skb;
9ab86bbc 435 struct skb_vnet_hdr *hdr;
9ab86bbc 436 int err;
3f2c31d9 437
3464645a 438 skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp);
9ab86bbc
SM
439 if (unlikely(!skb))
440 return -ENOMEM;
296f96fc 441
9ab86bbc 442 skb_put(skb, MAX_PACKET_LEN);
3f2c31d9 443
9ab86bbc 444 hdr = skb_vnet_hdr(skb);
e9d7417b 445 sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr);
97402b96 446
e9d7417b 447 skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
97402b96 448
9dc7b9e4 449 err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
9ab86bbc
SM
450 if (err < 0)
451 dev_kfree_skb(skb);
97402b96 452
9ab86bbc
SM
453 return err;
454}
97402b96 455
e9d7417b 456static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
9ab86bbc 457{
9ab86bbc
SM
458 struct page *first, *list = NULL;
459 char *p;
460 int i, err, offset;
461
e9d7417b 462 /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
9ab86bbc 463 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
e9d7417b 464 first = get_a_page(rq, gfp);
9ab86bbc
SM
465 if (!first) {
466 if (list)
e9d7417b 467 give_pages(rq, list);
9ab86bbc 468 return -ENOMEM;
97402b96 469 }
e9d7417b 470 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
97402b96 471
9ab86bbc
SM
472 /* chain new page in list head to match sg */
473 first->private = (unsigned long)list;
474 list = first;
475 }
296f96fc 476
e9d7417b 477 first = get_a_page(rq, gfp);
9ab86bbc 478 if (!first) {
e9d7417b 479 give_pages(rq, list);
9ab86bbc
SM
480 return -ENOMEM;
481 }
482 p = page_address(first);
483
e9d7417b
JW
484 /* rq->sg[0], rq->sg[1] share the same page */
485 /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */
486 sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr));
9ab86bbc 487
e9d7417b 488 /* rq->sg[1] for data packet, from offset */
9ab86bbc 489 offset = sizeof(struct padded_vnet_hdr);
e9d7417b 490 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
9ab86bbc
SM
491
492 /* chain first in list head */
493 first->private = (unsigned long)list;
9dc7b9e4
RR
494 err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
495 first, gfp);
9ab86bbc 496 if (err < 0)
e9d7417b 497 give_pages(rq, first);
9ab86bbc
SM
498
499 return err;
296f96fc
RR
500}
501
e9d7417b 502static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
3f2c31d9 503{
9ab86bbc 504 struct page *page;
3f2c31d9 505 int err;
3f2c31d9 506
e9d7417b 507 page = get_a_page(rq, gfp);
9ab86bbc
SM
508 if (!page)
509 return -ENOMEM;
3f2c31d9 510
e9d7417b 511 sg_init_one(rq->sg, page_address(page), PAGE_SIZE);
3f2c31d9 512
9dc7b9e4 513 err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, page, gfp);
9ab86bbc 514 if (err < 0)
e9d7417b 515 give_pages(rq, page);
3f2c31d9 516
9ab86bbc
SM
517 return err;
518}
3f2c31d9 519
b2baed69
RR
520/*
521 * Returns false if we couldn't fill entirely (OOM).
522 *
523 * Normally run in the receive path, but can also be run from ndo_open
524 * before we're receiving packets, or from refill_work which is
525 * careful to disable receiving (using napi_disable).
526 */
e9d7417b 527static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
9ab86bbc 528{
e9d7417b 529 struct virtnet_info *vi = rq->vq->vdev->priv;
9ab86bbc 530 int err;
1788f495 531 bool oom;
3f2c31d9 532
9ab86bbc
SM
533 do {
534 if (vi->mergeable_rx_bufs)
e9d7417b 535 err = add_recvbuf_mergeable(rq, gfp);
9ab86bbc 536 else if (vi->big_packets)
e9d7417b 537 err = add_recvbuf_big(rq, gfp);
9ab86bbc 538 else
e9d7417b 539 err = add_recvbuf_small(rq, gfp);
3f2c31d9 540
1788f495 541 oom = err == -ENOMEM;
9ed4cb07 542 if (err)
3f2c31d9 543 break;
e9d7417b 544 ++rq->num;
b7dfde95 545 } while (rq->vq->num_free);
e9d7417b
JW
546 if (unlikely(rq->num > rq->max))
547 rq->max = rq->num;
548 virtqueue_kick(rq->vq);
3161e453 549 return !oom;
3f2c31d9
MM
550}
551
18445c4d 552static void skb_recv_done(struct virtqueue *rvq)
296f96fc
RR
553{
554 struct virtnet_info *vi = rvq->vdev->priv;
986a4f4d 555 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
e9d7417b 556
18445c4d 557 /* Schedule NAPI, Suppress further interrupts if successful. */
e9d7417b 558 if (napi_schedule_prep(&rq->napi)) {
1915a712 559 virtqueue_disable_cb(rvq);
e9d7417b 560 __napi_schedule(&rq->napi);
18445c4d 561 }
296f96fc
RR
562}
563
e9d7417b 564static void virtnet_napi_enable(struct receive_queue *rq)
3e9d08ec 565{
e9d7417b 566 napi_enable(&rq->napi);
3e9d08ec
BR
567
568 /* If all buffers were filled by other side before we napi_enabled, we
569 * won't get another interrupt, so process any outstanding packets
570 * now. virtnet_poll wants re-enable the queue, so we disable here.
571 * We synchronize against interrupts via NAPI_STATE_SCHED */
e9d7417b
JW
572 if (napi_schedule_prep(&rq->napi)) {
573 virtqueue_disable_cb(rq->vq);
ec13ee80 574 local_bh_disable();
e9d7417b 575 __napi_schedule(&rq->napi);
ec13ee80 576 local_bh_enable();
3e9d08ec
BR
577 }
578}
579
3161e453
RR
580static void refill_work(struct work_struct *work)
581{
e9d7417b
JW
582 struct virtnet_info *vi =
583 container_of(work, struct virtnet_info, refill.work);
3161e453 584 bool still_empty;
986a4f4d
JW
585 int i;
586
55257d72 587 for (i = 0; i < vi->curr_queue_pairs; i++) {
986a4f4d 588 struct receive_queue *rq = &vi->rq[i];
3161e453 589
986a4f4d
JW
590 napi_disable(&rq->napi);
591 still_empty = !try_fill_recv(rq, GFP_KERNEL);
592 virtnet_napi_enable(rq);
3161e453 593
986a4f4d
JW
594 /* In theory, this can happen: if we don't get any buffers in
595 * we will *never* try to fill again.
596 */
597 if (still_empty)
598 schedule_delayed_work(&vi->refill, HZ/2);
599 }
3161e453
RR
600}
601
296f96fc
RR
602static int virtnet_poll(struct napi_struct *napi, int budget)
603{
e9d7417b
JW
604 struct receive_queue *rq =
605 container_of(napi, struct receive_queue, napi);
606 struct virtnet_info *vi = rq->vq->vdev->priv;
9ab86bbc 607 void *buf;
cbdadbbf 608 unsigned int r, len, received = 0;
296f96fc
RR
609
610again:
611 while (received < budget &&
e9d7417b
JW
612 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
613 receive_buf(rq, buf, len);
614 --rq->num;
296f96fc
RR
615 received++;
616 }
617
e9d7417b
JW
618 if (rq->num < rq->max / 2) {
619 if (!try_fill_recv(rq, GFP_ATOMIC))
3b07e9ca 620 schedule_delayed_work(&vi->refill, 0);
3161e453 621 }
296f96fc 622
8329d98e
RR
623 /* Out of packets? */
624 if (received < budget) {
cbdadbbf 625 r = virtqueue_enable_cb_prepare(rq->vq);
288379f0 626 napi_complete(napi);
cbdadbbf 627 if (unlikely(virtqueue_poll(rq->vq, r)) &&
8e95a202 628 napi_schedule_prep(napi)) {
e9d7417b 629 virtqueue_disable_cb(rq->vq);
288379f0 630 __napi_schedule(napi);
296f96fc 631 goto again;
4265f161 632 }
296f96fc
RR
633 }
634
635 return received;
636}
637
986a4f4d
JW
638static int virtnet_open(struct net_device *dev)
639{
640 struct virtnet_info *vi = netdev_priv(dev);
641 int i;
642
e4166625
JW
643 for (i = 0; i < vi->max_queue_pairs; i++) {
644 if (i < vi->curr_queue_pairs)
645 /* Make sure we have some buffers: if oom use wq. */
646 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
647 schedule_delayed_work(&vi->refill, 0);
986a4f4d
JW
648 virtnet_napi_enable(&vi->rq[i]);
649 }
650
651 return 0;
652}
653
b7dfde95 654static void free_old_xmit_skbs(struct send_queue *sq)
296f96fc
RR
655{
656 struct sk_buff *skb;
6ee57bcc 657 unsigned int len;
e9d7417b 658 struct virtnet_info *vi = sq->vq->vdev->priv;
58472a76 659 struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
296f96fc 660
e9d7417b 661 while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
296f96fc 662 pr_debug("Sent skb %p\n", skb);
3fa2a1df 663
83a27052 664 u64_stats_update_begin(&stats->tx_syncp);
3fa2a1df 665 stats->tx_bytes += skb->len;
666 stats->tx_packets++;
83a27052 667 u64_stats_update_end(&stats->tx_syncp);
3fa2a1df 668
ed79bab8 669 dev_kfree_skb_any(skb);
296f96fc
RR
670 }
671}
672
e9d7417b 673static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
296f96fc 674{
e7428e95 675 struct skb_vnet_hdr *hdr;
296f96fc 676 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
e9d7417b 677 struct virtnet_info *vi = sq->vq->vdev->priv;
7bedc7dc 678 unsigned num_sg;
e7428e95
MT
679 unsigned hdr_len;
680 bool can_push;
296f96fc 681
e174961c 682 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
e7428e95
MT
683 if (vi->mergeable_rx_bufs)
684 hdr_len = sizeof hdr->mhdr;
685 else
686 hdr_len = sizeof hdr->hdr;
687
688 can_push = vi->any_header_sg &&
689 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
690 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
691 /* Even if we can, don't push here yet as this would skew
692 * csum_start offset below. */
693 if (can_push)
694 hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len);
695 else
696 hdr = skb_vnet_hdr(skb);
296f96fc 697
296f96fc 698 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b3f24698 699 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
55508d60 700 hdr->hdr.csum_start = skb_checksum_start_offset(skb);
b3f24698 701 hdr->hdr.csum_offset = skb->csum_offset;
296f96fc 702 } else {
b3f24698
RR
703 hdr->hdr.flags = 0;
704 hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
296f96fc
RR
705 }
706
707 if (skb_is_gso(skb)) {
b3f24698
RR
708 hdr->hdr.hdr_len = skb_headlen(skb);
709 hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
34a48579 710 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
b3f24698 711 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
296f96fc 712 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
b3f24698 713 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
296f96fc 714 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
b3f24698 715 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
296f96fc
RR
716 else
717 BUG();
34a48579 718 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
b3f24698 719 hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
296f96fc 720 } else {
b3f24698
RR
721 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
722 hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
296f96fc
RR
723 }
724
3f2c31d9 725 if (vi->mergeable_rx_bufs)
e7428e95 726 hdr->mhdr.num_buffers = 0;
3f2c31d9 727
e7428e95
MT
728 if (can_push) {
729 __skb_push(skb, hdr_len);
730 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
731 /* Pull header back to avoid skew in tx bytes calculations. */
732 __skb_pull(skb, hdr_len);
733 } else {
734 sg_set_buf(sq->sg, hdr, hdr_len);
735 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
736 }
9dc7b9e4 737 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
11a3a154
RR
738}
739
424efe9c 740static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
99ffc696
RR
741{
742 struct virtnet_info *vi = netdev_priv(dev);
986a4f4d
JW
743 int qnum = skb_get_queue_mapping(skb);
744 struct send_queue *sq = &vi->sq[qnum];
9ed4cb07 745 int err;
2cb9c6ba 746
2cb9c6ba 747 /* Free up any pending old buffers before queueing new ones. */
e9d7417b 748 free_old_xmit_skbs(sq);
99ffc696 749
03f191ba 750 /* Try to transmit */
b7dfde95 751 err = xmit_skb(sq, skb);
48925e37 752
9ed4cb07 753 /* This should not happen! */
0e3daa64 754 if (unlikely(err)) {
9ed4cb07
RR
755 dev->stats.tx_fifo_errors++;
756 if (net_ratelimit())
757 dev_warn(&dev->dev,
b7dfde95 758 "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
58eba97d
RR
759 dev->stats.tx_dropped++;
760 kfree_skb(skb);
761 return NETDEV_TX_OK;
296f96fc 762 }
e9d7417b 763 virtqueue_kick(sq->vq);
03f191ba 764
48925e37
RR
765 /* Don't wait up for transmitted skbs to be freed. */
766 skb_orphan(skb);
767 nf_reset(skb);
768
769 /* Apparently nice girls don't return TX_BUSY; stop the queue
770 * before it gets out of hand. Naturally, this wastes entries. */
b7dfde95 771 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
986a4f4d 772 netif_stop_subqueue(dev, qnum);
e9d7417b 773 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
48925e37 774 /* More just got used, free them then recheck. */
b7dfde95
LT
775 free_old_xmit_skbs(sq);
776 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
986a4f4d 777 netif_start_subqueue(dev, qnum);
e9d7417b 778 virtqueue_disable_cb(sq->vq);
48925e37
RR
779 }
780 }
99ffc696 781 }
48925e37
RR
782
783 return NETDEV_TX_OK;
296f96fc
RR
784}
785
40cbfc37
AK
786/*
787 * Send command via the control virtqueue and check status. Commands
788 * supported by the hypervisor, as indicated by feature bits, should
789 * never fail unless improperly formated.
790 */
791static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
f7bc9594
RR
792 struct scatterlist *out,
793 struct scatterlist *in)
40cbfc37 794{
f7bc9594 795 struct scatterlist *sgs[4], hdr, stat;
40cbfc37
AK
796 struct virtio_net_ctrl_hdr ctrl;
797 virtio_net_ctrl_ack status = ~0;
f7bc9594 798 unsigned out_num = 0, in_num = 0, tmp;
40cbfc37
AK
799
800 /* Caller should know better */
f7bc9594 801 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
40cbfc37
AK
802
803 ctrl.class = class;
804 ctrl.cmd = cmd;
f7bc9594
RR
805 /* Add header */
806 sg_init_one(&hdr, &ctrl, sizeof(ctrl));
807 sgs[out_num++] = &hdr;
40cbfc37 808
f7bc9594
RR
809 if (out)
810 sgs[out_num++] = out;
811 if (in)
812 sgs[out_num + in_num++] = in;
40cbfc37 813
f7bc9594
RR
814 /* Add return status. */
815 sg_init_one(&stat, &status, sizeof(status));
816 sgs[out_num + in_num++] = &stat;
40cbfc37 817
f7bc9594
RR
818 BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
819 BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
820 < 0);
40cbfc37
AK
821
822 virtqueue_kick(vi->cvq);
823
824 /* Spin for a response, the kick causes an ioport write, trapping
825 * into the hypervisor, so the request should be handled immediately.
826 */
827 while (!virtqueue_get_buf(vi->cvq, &tmp))
828 cpu_relax();
829
830 return status == VIRTIO_NET_OK;
831}
832
9c46f6d4
AW
833static int virtnet_set_mac_address(struct net_device *dev, void *p)
834{
835 struct virtnet_info *vi = netdev_priv(dev);
836 struct virtio_device *vdev = vi->vdev;
f2f2c8b4 837 int ret;
7e58d5ae
AK
838 struct sockaddr *addr = p;
839 struct scatterlist sg;
9c46f6d4 840
7e58d5ae 841 ret = eth_prepare_mac_addr_change(dev, p);
f2f2c8b4
JP
842 if (ret)
843 return ret;
9c46f6d4 844
7e58d5ae
AK
845 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
846 sg_init_one(&sg, addr->sa_data, dev->addr_len);
847 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
848 VIRTIO_NET_CTRL_MAC_ADDR_SET,
f7bc9594 849 &sg, NULL)) {
7e58d5ae
AK
850 dev_warn(&vdev->dev,
851 "Failed to set mac address by vq command.\n");
852 return -EINVAL;
853 }
854 } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
62994b2d 855 vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
7e58d5ae
AK
856 addr->sa_data, dev->addr_len);
857 }
858
859 eth_commit_mac_addr_change(dev, p);
9c46f6d4
AW
860
861 return 0;
862}
863
3fa2a1df 864static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
865 struct rtnl_link_stats64 *tot)
866{
867 struct virtnet_info *vi = netdev_priv(dev);
868 int cpu;
869 unsigned int start;
870
871 for_each_possible_cpu(cpu) {
58472a76 872 struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
3fa2a1df 873 u64 tpackets, tbytes, rpackets, rbytes;
874
875 do {
e3906486 876 start = u64_stats_fetch_begin_bh(&stats->tx_syncp);
3fa2a1df 877 tpackets = stats->tx_packets;
878 tbytes = stats->tx_bytes;
e3906486 879 } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start));
83a27052
ED
880
881 do {
e3906486 882 start = u64_stats_fetch_begin_bh(&stats->rx_syncp);
3fa2a1df 883 rpackets = stats->rx_packets;
884 rbytes = stats->rx_bytes;
e3906486 885 } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start));
3fa2a1df 886
887 tot->rx_packets += rpackets;
888 tot->tx_packets += tpackets;
889 tot->rx_bytes += rbytes;
890 tot->tx_bytes += tbytes;
891 }
892
893 tot->tx_dropped = dev->stats.tx_dropped;
021ac8d3 894 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
3fa2a1df 895 tot->rx_dropped = dev->stats.rx_dropped;
896 tot->rx_length_errors = dev->stats.rx_length_errors;
897 tot->rx_frame_errors = dev->stats.rx_frame_errors;
898
899 return tot;
900}
901
da74e89d
AS
902#ifdef CONFIG_NET_POLL_CONTROLLER
903static void virtnet_netpoll(struct net_device *dev)
904{
905 struct virtnet_info *vi = netdev_priv(dev);
986a4f4d 906 int i;
da74e89d 907
986a4f4d
JW
908 for (i = 0; i < vi->curr_queue_pairs; i++)
909 napi_schedule(&vi->rq[i].napi);
da74e89d
AS
910}
911#endif
912
586d17c5
JW
913static void virtnet_ack_link_announce(struct virtnet_info *vi)
914{
915 rtnl_lock();
916 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
f7bc9594 917 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))
586d17c5
JW
918 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
919 rtnl_unlock();
920}
921
986a4f4d
JW
922static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
923{
924 struct scatterlist sg;
925 struct virtio_net_ctrl_mq s;
926 struct net_device *dev = vi->dev;
927
928 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
929 return 0;
930
931 s.virtqueue_pairs = queue_pairs;
932 sg_init_one(&sg, &s, sizeof(s));
933
934 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
f7bc9594 935 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) {
986a4f4d
JW
936 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
937 queue_pairs);
938 return -EINVAL;
55257d72 939 } else {
986a4f4d 940 vi->curr_queue_pairs = queue_pairs;
35ed159b
JW
941 /* virtnet_open() will refill when device is going to up. */
942 if (dev->flags & IFF_UP)
943 schedule_delayed_work(&vi->refill, 0);
55257d72 944 }
986a4f4d
JW
945
946 return 0;
947}
948
296f96fc
RR
949static int virtnet_close(struct net_device *dev)
950{
951 struct virtnet_info *vi = netdev_priv(dev);
986a4f4d 952 int i;
296f96fc 953
b2baed69
RR
954 /* Make sure refill_work doesn't re-enable napi! */
955 cancel_delayed_work_sync(&vi->refill);
986a4f4d
JW
956
957 for (i = 0; i < vi->max_queue_pairs; i++)
958 napi_disable(&vi->rq[i].napi);
296f96fc 959
296f96fc
RR
960 return 0;
961}
962
2af7698e
AW
963static void virtnet_set_rx_mode(struct net_device *dev)
964{
965 struct virtnet_info *vi = netdev_priv(dev);
f565a7c2 966 struct scatterlist sg[2];
2af7698e 967 u8 promisc, allmulti;
f565a7c2 968 struct virtio_net_ctrl_mac *mac_data;
ccffad25 969 struct netdev_hw_addr *ha;
32e7bfc4 970 int uc_count;
4cd24eaf 971 int mc_count;
f565a7c2
AW
972 void *buf;
973 int i;
2af7698e
AW
974
975 /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
976 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
977 return;
978
f565a7c2
AW
979 promisc = ((dev->flags & IFF_PROMISC) != 0);
980 allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
2af7698e 981
23e258e1 982 sg_init_one(sg, &promisc, sizeof(promisc));
2af7698e
AW
983
984 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
985 VIRTIO_NET_CTRL_RX_PROMISC,
f7bc9594 986 sg, NULL))
2af7698e
AW
987 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
988 promisc ? "en" : "dis");
989
23e258e1 990 sg_init_one(sg, &allmulti, sizeof(allmulti));
2af7698e
AW
991
992 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
993 VIRTIO_NET_CTRL_RX_ALLMULTI,
f7bc9594 994 sg, NULL))
2af7698e
AW
995 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
996 allmulti ? "en" : "dis");
f565a7c2 997
32e7bfc4 998 uc_count = netdev_uc_count(dev);
4cd24eaf 999 mc_count = netdev_mc_count(dev);
f565a7c2 1000 /* MAC filter - use one buffer for both lists */
4cd24eaf
JP
1001 buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
1002 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
1003 mac_data = buf;
e68ed8f0 1004 if (!buf)
f565a7c2 1005 return;
f565a7c2 1006
23e258e1
AW
1007 sg_init_table(sg, 2);
1008
f565a7c2 1009 /* Store the unicast list and count in the front of the buffer */
32e7bfc4 1010 mac_data->entries = uc_count;
ccffad25 1011 i = 0;
32e7bfc4 1012 netdev_for_each_uc_addr(ha, dev)
ccffad25 1013 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
f565a7c2
AW
1014
1015 sg_set_buf(&sg[0], mac_data,
32e7bfc4 1016 sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
f565a7c2
AW
1017
1018 /* multicast list and count fill the end */
32e7bfc4 1019 mac_data = (void *)&mac_data->macs[uc_count][0];
f565a7c2 1020
4cd24eaf 1021 mac_data->entries = mc_count;
567ec874 1022 i = 0;
22bedad3
JP
1023 netdev_for_each_mc_addr(ha, dev)
1024 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
f565a7c2
AW
1025
1026 sg_set_buf(&sg[1], mac_data,
4cd24eaf 1027 sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
f565a7c2
AW
1028
1029 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1030 VIRTIO_NET_CTRL_MAC_TABLE_SET,
f7bc9594 1031 sg, NULL))
f565a7c2
AW
1032 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
1033
1034 kfree(buf);
2af7698e
AW
1035}
1036
80d5c368
PM
1037static int virtnet_vlan_rx_add_vid(struct net_device *dev,
1038 __be16 proto, u16 vid)
0bde9569
AW
1039{
1040 struct virtnet_info *vi = netdev_priv(dev);
1041 struct scatterlist sg;
1042
23e258e1 1043 sg_init_one(&sg, &vid, sizeof(vid));
0bde9569
AW
1044
1045 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
f7bc9594 1046 VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL))
0bde9569 1047 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
8e586137 1048 return 0;
0bde9569
AW
1049}
1050
80d5c368
PM
1051static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
1052 __be16 proto, u16 vid)
0bde9569
AW
1053{
1054 struct virtnet_info *vi = netdev_priv(dev);
1055 struct scatterlist sg;
1056
23e258e1 1057 sg_init_one(&sg, &vid, sizeof(vid));
0bde9569
AW
1058
1059 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
f7bc9594 1060 VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL))
0bde9569 1061 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
8e586137 1062 return 0;
0bde9569
AW
1063}
1064
8898c21c 1065static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
986a4f4d
JW
1066{
1067 int i;
47be2479 1068 int cpu;
986a4f4d 1069
8898c21c
WG
1070 if (vi->affinity_hint_set) {
1071 for (i = 0; i < vi->max_queue_pairs; i++) {
47be2479
WG
1072 virtqueue_set_affinity(vi->rq[i].vq, -1);
1073 virtqueue_set_affinity(vi->sq[i].vq, -1);
1074 }
1075
8898c21c
WG
1076 vi->affinity_hint_set = false;
1077 }
1078
1079 i = 0;
1080 for_each_online_cpu(cpu) {
1081 if (cpu == hcpu) {
1082 *per_cpu_ptr(vi->vq_index, cpu) = -1;
1083 } else {
47be2479
WG
1084 *per_cpu_ptr(vi->vq_index, cpu) =
1085 ++i % vi->curr_queue_pairs;
8898c21c
WG
1086 }
1087 }
1088}
47be2479 1089
8898c21c
WG
1090static void virtnet_set_affinity(struct virtnet_info *vi)
1091{
1092 int i;
1093 int cpu;
986a4f4d
JW
1094
1095 /* In multiqueue mode, when the number of cpu is equal to the number of
1096 * queue pairs, we let the queue pairs to be private to one cpu by
1097 * setting the affinity hint to eliminate the contention.
1098 */
8898c21c
WG
1099 if (vi->curr_queue_pairs == 1 ||
1100 vi->max_queue_pairs != num_online_cpus()) {
1101 virtnet_clean_affinity(vi, -1);
1102 return;
986a4f4d
JW
1103 }
1104
8898c21c
WG
1105 i = 0;
1106 for_each_online_cpu(cpu) {
986a4f4d
JW
1107 virtqueue_set_affinity(vi->rq[i].vq, cpu);
1108 virtqueue_set_affinity(vi->sq[i].vq, cpu);
8898c21c
WG
1109 *per_cpu_ptr(vi->vq_index, cpu) = i;
1110 i++;
986a4f4d
JW
1111 }
1112
8898c21c 1113 vi->affinity_hint_set = true;
986a4f4d
JW
1114}
1115
8de4b2f3
WG
1116static int virtnet_cpu_callback(struct notifier_block *nfb,
1117 unsigned long action, void *hcpu)
1118{
1119 struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
1120
3ab098df
JW
1121 mutex_lock(&vi->config_lock);
1122
1123 if (!vi->config_enable)
1124 goto done;
1125
8de4b2f3
WG
1126 switch(action & ~CPU_TASKS_FROZEN) {
1127 case CPU_ONLINE:
1128 case CPU_DOWN_FAILED:
1129 case CPU_DEAD:
1130 virtnet_set_affinity(vi);
1131 break;
1132 case CPU_DOWN_PREPARE:
1133 virtnet_clean_affinity(vi, (long)hcpu);
1134 break;
1135 default:
1136 break;
1137 }
3ab098df
JW
1138
1139done:
1140 mutex_unlock(&vi->config_lock);
8de4b2f3 1141 return NOTIFY_OK;
986a4f4d
JW
1142}
1143
8f9f4668
RJ
1144static void virtnet_get_ringparam(struct net_device *dev,
1145 struct ethtool_ringparam *ring)
1146{
1147 struct virtnet_info *vi = netdev_priv(dev);
1148
986a4f4d
JW
1149 ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
1150 ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
8f9f4668
RJ
1151 ring->rx_pending = ring->rx_max_pending;
1152 ring->tx_pending = ring->tx_max_pending;
8f9f4668
RJ
1153}
1154
66846048
RJ
1155
1156static void virtnet_get_drvinfo(struct net_device *dev,
1157 struct ethtool_drvinfo *info)
1158{
1159 struct virtnet_info *vi = netdev_priv(dev);
1160 struct virtio_device *vdev = vi->vdev;
1161
1162 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1163 strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
1164 strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
1165
1166}
1167
d73bcd2c
JW
1168/* TODO: Eliminate OOO packets during switching */
1169static int virtnet_set_channels(struct net_device *dev,
1170 struct ethtool_channels *channels)
1171{
1172 struct virtnet_info *vi = netdev_priv(dev);
1173 u16 queue_pairs = channels->combined_count;
1174 int err;
1175
1176 /* We don't support separate rx/tx channels.
1177 * We don't allow setting 'other' channels.
1178 */
1179 if (channels->rx_count || channels->tx_count || channels->other_count)
1180 return -EINVAL;
1181
1182 if (queue_pairs > vi->max_queue_pairs)
1183 return -EINVAL;
1184
47be2479 1185 get_online_cpus();
d73bcd2c
JW
1186 err = virtnet_set_queues(vi, queue_pairs);
1187 if (!err) {
1188 netif_set_real_num_tx_queues(dev, queue_pairs);
1189 netif_set_real_num_rx_queues(dev, queue_pairs);
1190
8898c21c 1191 virtnet_set_affinity(vi);
d73bcd2c 1192 }
47be2479 1193 put_online_cpus();
d73bcd2c
JW
1194
1195 return err;
1196}
1197
1198static void virtnet_get_channels(struct net_device *dev,
1199 struct ethtool_channels *channels)
1200{
1201 struct virtnet_info *vi = netdev_priv(dev);
1202
1203 channels->combined_count = vi->curr_queue_pairs;
1204 channels->max_combined = vi->max_queue_pairs;
1205 channels->max_other = 0;
1206 channels->rx_count = 0;
1207 channels->tx_count = 0;
1208 channels->other_count = 0;
1209}
1210
0fc0b732 1211static const struct ethtool_ops virtnet_ethtool_ops = {
66846048 1212 .get_drvinfo = virtnet_get_drvinfo,
9f4d26d0 1213 .get_link = ethtool_op_get_link,
8f9f4668 1214 .get_ringparam = virtnet_get_ringparam,
d73bcd2c
JW
1215 .set_channels = virtnet_set_channels,
1216 .get_channels = virtnet_get_channels,
a9ea3fc6
HX
1217};
1218
39da5814
MM
1219#define MIN_MTU 68
1220#define MAX_MTU 65535
1221
1222static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
1223{
1224 if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
1225 return -EINVAL;
1226 dev->mtu = new_mtu;
1227 return 0;
1228}
1229
986a4f4d
JW
1230/* To avoid contending a lock hold by a vcpu who would exit to host, select the
1231 * txq based on the processor id.
986a4f4d
JW
1232 */
1233static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
1234{
47be2479
WG
1235 int txq;
1236 struct virtnet_info *vi = netdev_priv(dev);
1237
1238 if (skb_rx_queue_recorded(skb)) {
1239 txq = skb_get_rx_queue(skb);
1240 } else {
1241 txq = *__this_cpu_ptr(vi->vq_index);
1242 if (txq == -1)
1243 txq = 0;
1244 }
986a4f4d
JW
1245
1246 while (unlikely(txq >= dev->real_num_tx_queues))
1247 txq -= dev->real_num_tx_queues;
1248
1249 return txq;
1250}
1251
76288b4e
SH
1252static const struct net_device_ops virtnet_netdev = {
1253 .ndo_open = virtnet_open,
1254 .ndo_stop = virtnet_close,
1255 .ndo_start_xmit = start_xmit,
1256 .ndo_validate_addr = eth_validate_addr,
9c46f6d4 1257 .ndo_set_mac_address = virtnet_set_mac_address,
2af7698e 1258 .ndo_set_rx_mode = virtnet_set_rx_mode,
76288b4e 1259 .ndo_change_mtu = virtnet_change_mtu,
3fa2a1df 1260 .ndo_get_stats64 = virtnet_stats,
1824a989
AW
1261 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
1262 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
986a4f4d 1263 .ndo_select_queue = virtnet_select_queue,
76288b4e
SH
1264#ifdef CONFIG_NET_POLL_CONTROLLER
1265 .ndo_poll_controller = virtnet_netpoll,
1266#endif
1267};
1268
586d17c5 1269static void virtnet_config_changed_work(struct work_struct *work)
9f4d26d0 1270{
586d17c5
JW
1271 struct virtnet_info *vi =
1272 container_of(work, struct virtnet_info, config_work);
9f4d26d0
MM
1273 u16 v;
1274
586d17c5
JW
1275 mutex_lock(&vi->config_lock);
1276 if (!vi->config_enable)
1277 goto done;
1278
77dd7693 1279 if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
9f4d26d0 1280 offsetof(struct virtio_net_config, status),
77dd7693 1281 &v) < 0)
586d17c5
JW
1282 goto done;
1283
1284 if (v & VIRTIO_NET_S_ANNOUNCE) {
ee89bab1 1285 netdev_notify_peers(vi->dev);
586d17c5
JW
1286 virtnet_ack_link_announce(vi);
1287 }
9f4d26d0
MM
1288
1289 /* Ignore unknown (future) status bits */
1290 v &= VIRTIO_NET_S_LINK_UP;
1291
1292 if (vi->status == v)
586d17c5 1293 goto done;
9f4d26d0
MM
1294
1295 vi->status = v;
1296
1297 if (vi->status & VIRTIO_NET_S_LINK_UP) {
1298 netif_carrier_on(vi->dev);
986a4f4d 1299 netif_tx_wake_all_queues(vi->dev);
9f4d26d0
MM
1300 } else {
1301 netif_carrier_off(vi->dev);
986a4f4d 1302 netif_tx_stop_all_queues(vi->dev);
9f4d26d0 1303 }
586d17c5
JW
1304done:
1305 mutex_unlock(&vi->config_lock);
9f4d26d0
MM
1306}
1307
1308static void virtnet_config_changed(struct virtio_device *vdev)
1309{
1310 struct virtnet_info *vi = vdev->priv;
1311
3b07e9ca 1312 schedule_work(&vi->config_work);
9f4d26d0
MM
1313}
1314
986a4f4d
JW
1315static void virtnet_free_queues(struct virtnet_info *vi)
1316{
1317 kfree(vi->rq);
1318 kfree(vi->sq);
1319}
1320
1321static void free_receive_bufs(struct virtnet_info *vi)
1322{
1323 int i;
1324
1325 for (i = 0; i < vi->max_queue_pairs; i++) {
1326 while (vi->rq[i].pages)
1327 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
1328 }
1329}
1330
1331static void free_unused_bufs(struct virtnet_info *vi)
1332{
1333 void *buf;
1334 int i;
1335
1336 for (i = 0; i < vi->max_queue_pairs; i++) {
1337 struct virtqueue *vq = vi->sq[i].vq;
1338 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
1339 dev_kfree_skb(buf);
1340 }
1341
1342 for (i = 0; i < vi->max_queue_pairs; i++) {
1343 struct virtqueue *vq = vi->rq[i].vq;
1344
1345 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1346 if (vi->mergeable_rx_bufs || vi->big_packets)
1347 give_pages(&vi->rq[i], buf);
1348 else
1349 dev_kfree_skb(buf);
1350 --vi->rq[i].num;
1351 }
1352 BUG_ON(vi->rq[i].num != 0);
1353 }
1354}
1355
e9d7417b
JW
1356static void virtnet_del_vqs(struct virtnet_info *vi)
1357{
1358 struct virtio_device *vdev = vi->vdev;
1359
8898c21c 1360 virtnet_clean_affinity(vi, -1);
986a4f4d 1361
e9d7417b 1362 vdev->config->del_vqs(vdev);
986a4f4d
JW
1363
1364 virtnet_free_queues(vi);
e9d7417b
JW
1365}
1366
986a4f4d 1367static int virtnet_find_vqs(struct virtnet_info *vi)
3f9c10b0 1368{
986a4f4d
JW
1369 vq_callback_t **callbacks;
1370 struct virtqueue **vqs;
1371 int ret = -ENOMEM;
1372 int i, total_vqs;
1373 const char **names;
1374
1375 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
1376 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
1377 * possible control vq.
1378 */
1379 total_vqs = vi->max_queue_pairs * 2 +
1380 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
1381
1382 /* Allocate space for find_vqs parameters */
1383 vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL);
1384 if (!vqs)
1385 goto err_vq;
1386 callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL);
1387 if (!callbacks)
1388 goto err_callback;
1389 names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL);
1390 if (!names)
1391 goto err_names;
1392
1393 /* Parameters for control virtqueue, if any */
1394 if (vi->has_cvq) {
1395 callbacks[total_vqs - 1] = NULL;
1396 names[total_vqs - 1] = "control";
1397 }
3f9c10b0 1398
986a4f4d
JW
1399 /* Allocate/initialize parameters for send/receive virtqueues */
1400 for (i = 0; i < vi->max_queue_pairs; i++) {
1401 callbacks[rxq2vq(i)] = skb_recv_done;
1402 callbacks[txq2vq(i)] = skb_xmit_done;
1403 sprintf(vi->rq[i].name, "input.%d", i);
1404 sprintf(vi->sq[i].name, "output.%d", i);
1405 names[rxq2vq(i)] = vi->rq[i].name;
1406 names[txq2vq(i)] = vi->sq[i].name;
1407 }
3f9c10b0 1408
986a4f4d
JW
1409 ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
1410 names);
1411 if (ret)
1412 goto err_find;
3f9c10b0 1413
986a4f4d
JW
1414 if (vi->has_cvq) {
1415 vi->cvq = vqs[total_vqs - 1];
3f9c10b0 1416 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
f646968f 1417 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3f9c10b0 1418 }
986a4f4d
JW
1419
1420 for (i = 0; i < vi->max_queue_pairs; i++) {
1421 vi->rq[i].vq = vqs[rxq2vq(i)];
1422 vi->sq[i].vq = vqs[txq2vq(i)];
1423 }
1424
1425 kfree(names);
1426 kfree(callbacks);
1427 kfree(vqs);
1428
3f9c10b0 1429 return 0;
986a4f4d
JW
1430
1431err_find:
1432 kfree(names);
1433err_names:
1434 kfree(callbacks);
1435err_callback:
1436 kfree(vqs);
1437err_vq:
1438 return ret;
1439}
1440
1441static int virtnet_alloc_queues(struct virtnet_info *vi)
1442{
1443 int i;
1444
1445 vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
1446 if (!vi->sq)
1447 goto err_sq;
1448 vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL);
008d4278 1449 if (!vi->rq)
986a4f4d
JW
1450 goto err_rq;
1451
1452 INIT_DELAYED_WORK(&vi->refill, refill_work);
1453 for (i = 0; i < vi->max_queue_pairs; i++) {
1454 vi->rq[i].pages = NULL;
1455 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
1456 napi_weight);
1457
1458 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
1459 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
1460 }
1461
1462 return 0;
1463
1464err_rq:
1465 kfree(vi->sq);
1466err_sq:
1467 return -ENOMEM;
1468}
1469
1470static int init_vqs(struct virtnet_info *vi)
1471{
1472 int ret;
1473
1474 /* Allocate send & receive queues */
1475 ret = virtnet_alloc_queues(vi);
1476 if (ret)
1477 goto err;
1478
1479 ret = virtnet_find_vqs(vi);
1480 if (ret)
1481 goto err_free;
1482
47be2479 1483 get_online_cpus();
8898c21c 1484 virtnet_set_affinity(vi);
47be2479
WG
1485 put_online_cpus();
1486
986a4f4d
JW
1487 return 0;
1488
1489err_free:
1490 virtnet_free_queues(vi);
1491err:
1492 return ret;
3f9c10b0
AS
1493}
1494
296f96fc
RR
1495static int virtnet_probe(struct virtio_device *vdev)
1496{
986a4f4d 1497 int i, err;
296f96fc
RR
1498 struct net_device *dev;
1499 struct virtnet_info *vi;
986a4f4d
JW
1500 u16 max_queue_pairs;
1501
1502 /* Find if host supports multiqueue virtio_net device */
1503 err = virtio_config_val(vdev, VIRTIO_NET_F_MQ,
1504 offsetof(struct virtio_net_config,
1505 max_virtqueue_pairs), &max_queue_pairs);
1506
1507 /* We need at least 2 queue's */
1508 if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
1509 max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
1510 !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
1511 max_queue_pairs = 1;
296f96fc
RR
1512
1513 /* Allocate ourselves a network device with room for our info */
986a4f4d 1514 dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
296f96fc
RR
1515 if (!dev)
1516 return -ENOMEM;
1517
1518 /* Set up network device as normal. */
f2f2c8b4 1519 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
76288b4e 1520 dev->netdev_ops = &virtnet_netdev;
296f96fc 1521 dev->features = NETIF_F_HIGHDMA;
3fa2a1df 1522
a9ea3fc6 1523 SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
296f96fc
RR
1524 SET_NETDEV_DEV(dev, &vdev->dev);
1525
1526 /* Do we support "hardware" checksums? */
98e778c9 1527 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
296f96fc 1528 /* This opens up the world of extra features. */
98e778c9
MM
1529 dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1530 if (csum)
1531 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1532
1533 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
1534 dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
34a48579
RR
1535 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
1536 }
5539ae96 1537 /* Individual feature bits: what can host handle? */
98e778c9
MM
1538 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
1539 dev->hw_features |= NETIF_F_TSO;
1540 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
1541 dev->hw_features |= NETIF_F_TSO6;
1542 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
1543 dev->hw_features |= NETIF_F_TSO_ECN;
1544 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
1545 dev->hw_features |= NETIF_F_UFO;
1546
1547 if (gso)
1548 dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
1549 /* (!csum && gso) case will be fixed by register_netdev() */
296f96fc 1550 }
4f49129b
TH
1551 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
1552 dev->features |= NETIF_F_RXCSUM;
296f96fc 1553
4fda8302
JW
1554 dev->vlan_features = dev->features;
1555
296f96fc 1556 /* Configuration may specify what MAC to use. Otherwise random. */
77dd7693 1557 if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC,
a586d4f6 1558 offsetof(struct virtio_net_config, mac),
77dd7693 1559 dev->dev_addr, dev->addr_len) < 0)
f2cedb63 1560 eth_hw_addr_random(dev);
296f96fc
RR
1561
1562 /* Set up our device-specific information */
1563 vi = netdev_priv(dev);
296f96fc
RR
1564 vi->dev = dev;
1565 vi->vdev = vdev;
d9d5dcc8 1566 vdev->priv = vi;
3fa2a1df 1567 vi->stats = alloc_percpu(struct virtnet_stats);
1568 err = -ENOMEM;
1569 if (vi->stats == NULL)
1570 goto free;
1571
47be2479
WG
1572 vi->vq_index = alloc_percpu(int);
1573 if (vi->vq_index == NULL)
1574 goto free_stats;
1575
586d17c5
JW
1576 mutex_init(&vi->config_lock);
1577 vi->config_enable = true;
1578 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
296f96fc 1579
97402b96 1580 /* If we can receive ANY GSO packets, we must allocate large ones. */
8e95a202
JP
1581 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1582 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1583 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
97402b96
HX
1584 vi->big_packets = true;
1585
3f2c31d9
MM
1586 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
1587 vi->mergeable_rx_bufs = true;
1588
e7428e95
MT
1589 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
1590 vi->any_header_sg = true;
1591
986a4f4d
JW
1592 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
1593 vi->has_cvq = true;
1594
1595 /* Use single tx/rx queue pair as default */
1596 vi->curr_queue_pairs = 1;
1597 vi->max_queue_pairs = max_queue_pairs;
1598
1599 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
3f9c10b0 1600 err = init_vqs(vi);
d2a7ddda 1601 if (err)
47be2479 1602 goto free_index;
296f96fc 1603
986a4f4d
JW
1604 netif_set_real_num_tx_queues(dev, 1);
1605 netif_set_real_num_rx_queues(dev, 1);
1606
296f96fc
RR
1607 err = register_netdev(dev);
1608 if (err) {
1609 pr_debug("virtio_net: registering device failed\n");
d2a7ddda 1610 goto free_vqs;
296f96fc 1611 }
b3369c1f
RR
1612
1613 /* Last of all, set up some receive buffers. */
55257d72 1614 for (i = 0; i < vi->curr_queue_pairs; i++) {
986a4f4d
JW
1615 try_fill_recv(&vi->rq[i], GFP_KERNEL);
1616
1617 /* If we didn't even get one input buffer, we're useless. */
1618 if (vi->rq[i].num == 0) {
1619 free_unused_bufs(vi);
1620 err = -ENOMEM;
1621 goto free_recv_bufs;
1622 }
b3369c1f
RR
1623 }
1624
8de4b2f3
WG
1625 vi->nb.notifier_call = &virtnet_cpu_callback;
1626 err = register_hotcpu_notifier(&vi->nb);
1627 if (err) {
1628 pr_debug("virtio_net: registering cpu notifier failed\n");
1629 goto free_recv_bufs;
1630 }
1631
167c25e4
JW
1632 /* Assume link up if device can't report link status,
1633 otherwise get link status from config. */
1634 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
1635 netif_carrier_off(dev);
3b07e9ca 1636 schedule_work(&vi->config_work);
167c25e4
JW
1637 } else {
1638 vi->status = VIRTIO_NET_S_LINK_UP;
1639 netif_carrier_on(dev);
1640 }
9f4d26d0 1641
986a4f4d
JW
1642 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
1643 dev->name, max_queue_pairs);
1644
296f96fc
RR
1645 return 0;
1646
986a4f4d
JW
1647free_recv_bufs:
1648 free_receive_bufs(vi);
b3369c1f 1649 unregister_netdev(dev);
d2a7ddda 1650free_vqs:
986a4f4d 1651 cancel_delayed_work_sync(&vi->refill);
e9d7417b 1652 virtnet_del_vqs(vi);
47be2479
WG
1653free_index:
1654 free_percpu(vi->vq_index);
3fa2a1df 1655free_stats:
1656 free_percpu(vi->stats);
296f96fc
RR
1657free:
1658 free_netdev(dev);
1659 return err;
1660}
1661
04486ed0 1662static void remove_vq_common(struct virtnet_info *vi)
296f96fc 1663{
04486ed0 1664 vi->vdev->config->reset(vi->vdev);
830a8a97
SM
1665
1666 /* Free unused buffers in both send and recv, if any. */
9ab86bbc 1667 free_unused_bufs(vi);
fb6813f4 1668
986a4f4d 1669 free_receive_bufs(vi);
d2a7ddda 1670
986a4f4d 1671 virtnet_del_vqs(vi);
04486ed0
AS
1672}
1673
8cc085d6 1674static void virtnet_remove(struct virtio_device *vdev)
04486ed0
AS
1675{
1676 struct virtnet_info *vi = vdev->priv;
1677
8de4b2f3
WG
1678 unregister_hotcpu_notifier(&vi->nb);
1679
586d17c5
JW
1680 /* Prevent config work handler from accessing the device. */
1681 mutex_lock(&vi->config_lock);
1682 vi->config_enable = false;
1683 mutex_unlock(&vi->config_lock);
1684
04486ed0
AS
1685 unregister_netdev(vi->dev);
1686
1687 remove_vq_common(vi);
fb6813f4 1688
586d17c5
JW
1689 flush_work(&vi->config_work);
1690
47be2479 1691 free_percpu(vi->vq_index);
2e66f55b 1692 free_percpu(vi->stats);
74b2553f 1693 free_netdev(vi->dev);
296f96fc
RR
1694}
1695
0741bcb5
AS
1696#ifdef CONFIG_PM
1697static int virtnet_freeze(struct virtio_device *vdev)
1698{
1699 struct virtnet_info *vi = vdev->priv;
986a4f4d 1700 int i;
0741bcb5 1701
586d17c5
JW
1702 /* Prevent config work handler from accessing the device */
1703 mutex_lock(&vi->config_lock);
1704 vi->config_enable = false;
1705 mutex_unlock(&vi->config_lock);
1706
0741bcb5
AS
1707 netif_device_detach(vi->dev);
1708 cancel_delayed_work_sync(&vi->refill);
1709
1710 if (netif_running(vi->dev))
986a4f4d
JW
1711 for (i = 0; i < vi->max_queue_pairs; i++) {
1712 napi_disable(&vi->rq[i].napi);
1713 netif_napi_del(&vi->rq[i].napi);
1714 }
0741bcb5
AS
1715
1716 remove_vq_common(vi);
1717
586d17c5
JW
1718 flush_work(&vi->config_work);
1719
0741bcb5
AS
1720 return 0;
1721}
1722
1723static int virtnet_restore(struct virtio_device *vdev)
1724{
1725 struct virtnet_info *vi = vdev->priv;
986a4f4d 1726 int err, i;
0741bcb5
AS
1727
1728 err = init_vqs(vi);
1729 if (err)
1730 return err;
1731
1732 if (netif_running(vi->dev))
986a4f4d
JW
1733 for (i = 0; i < vi->max_queue_pairs; i++)
1734 virtnet_napi_enable(&vi->rq[i]);
0741bcb5
AS
1735
1736 netif_device_attach(vi->dev);
1737
55257d72 1738 for (i = 0; i < vi->curr_queue_pairs; i++)
986a4f4d
JW
1739 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1740 schedule_delayed_work(&vi->refill, 0);
0741bcb5 1741
586d17c5
JW
1742 mutex_lock(&vi->config_lock);
1743 vi->config_enable = true;
1744 mutex_unlock(&vi->config_lock);
1745
35ed159b 1746 rtnl_lock();
986a4f4d 1747 virtnet_set_queues(vi, vi->curr_queue_pairs);
35ed159b 1748 rtnl_unlock();
986a4f4d 1749
0741bcb5
AS
1750 return 0;
1751}
1752#endif
1753
296f96fc
RR
1754static struct virtio_device_id id_table[] = {
1755 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
1756 { 0 },
1757};
1758
c45a6816 1759static unsigned int features[] = {
5e4fe5c4
MM
1760 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
1761 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
c45a6816 1762 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
97402b96 1763 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
5c516751 1764 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
2a41f71d 1765 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
0bde9569 1766 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
986a4f4d 1767 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
7e58d5ae 1768 VIRTIO_NET_F_CTRL_MAC_ADDR,
e7428e95 1769 VIRTIO_F_ANY_LAYOUT,
c45a6816
RR
1770};
1771
22402529 1772static struct virtio_driver virtio_net_driver = {
c45a6816
RR
1773 .feature_table = features,
1774 .feature_table_size = ARRAY_SIZE(features),
296f96fc
RR
1775 .driver.name = KBUILD_MODNAME,
1776 .driver.owner = THIS_MODULE,
1777 .id_table = id_table,
1778 .probe = virtnet_probe,
8cc085d6 1779 .remove = virtnet_remove,
9f4d26d0 1780 .config_changed = virtnet_config_changed,
0741bcb5
AS
1781#ifdef CONFIG_PM
1782 .freeze = virtnet_freeze,
1783 .restore = virtnet_restore,
1784#endif
296f96fc
RR
1785};
1786
b2a17029 1787module_virtio_driver(virtio_net_driver);
296f96fc
RR
1788
1789MODULE_DEVICE_TABLE(virtio, id_table);
1790MODULE_DESCRIPTION("Virtio network driver");
1791MODULE_LICENSE("GPL");