]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/virtio_net.c
s2io: Formatting log message
[mirror_ubuntu-bionic-kernel.git] / drivers / net / virtio_net.c
CommitLineData
296f96fc
RR
1/* A simple network driver using virtio.
2 *
3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19//#define DEBUG
20#include <linux/netdevice.h>
21#include <linux/etherdevice.h>
a9ea3fc6 22#include <linux/ethtool.h>
296f96fc
RR
23#include <linux/module.h>
24#include <linux/virtio.h>
25#include <linux/virtio_net.h>
26#include <linux/scatterlist.h>
e918085a 27#include <linux/if_vlan.h>
296f96fc 28
6c0cd7c0
DL
29static int napi_weight = 128;
30module_param(napi_weight, int, 0444);
31
34a48579
RR
32static int csum = 1, gso = 1;
33module_param(csum, bool, 0444);
34module_param(gso, bool, 0444);
35
296f96fc 36/* FIXME: MTU in config. */
e918085a 37#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
3f2c31d9 38#define GOOD_COPY_LEN 128
296f96fc
RR
39
40struct virtnet_info
41{
42 struct virtio_device *vdev;
43 struct virtqueue *rvq, *svq;
44 struct net_device *dev;
45 struct napi_struct napi;
9f4d26d0 46 unsigned int status;
296f96fc 47
99ffc696
RR
48 /* The skb we couldn't send because buffers were full. */
49 struct sk_buff *last_xmit_skb;
50
363f1514 51 /* If we need to free in a timer, this is it. */
14c998f0
MM
52 struct timer_list xmit_free_timer;
53
296f96fc
RR
54 /* Number of input buffers, and max we've ever had. */
55 unsigned int num, max;
56
11a3a154
RR
57 /* For cleaning up after transmission. */
58 struct tasklet_struct tasklet;
363f1514 59 bool free_in_tasklet;
11a3a154 60
97402b96
HX
61 /* I like... big packets and I cannot lie! */
62 bool big_packets;
63
3f2c31d9
MM
64 /* Host will merge rx buffers for big packets (shake it! shake it!) */
65 bool mergeable_rx_bufs;
66
296f96fc
RR
67 /* Receive & send queues. */
68 struct sk_buff_head recv;
69 struct sk_buff_head send;
fb6813f4
RR
70
71 /* Chain pages by the private ptr. */
72 struct page *pages;
296f96fc
RR
73};
74
3f2c31d9 75static inline void *skb_vnet_hdr(struct sk_buff *skb)
296f96fc
RR
76{
77 return (struct virtio_net_hdr *)skb->cb;
78}
79
fb6813f4
RR
80static void give_a_page(struct virtnet_info *vi, struct page *page)
81{
82 page->private = (unsigned long)vi->pages;
83 vi->pages = page;
84}
85
0a888fd1
MM
86static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb)
87{
88 unsigned int i;
89
90 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
91 give_a_page(vi, skb_shinfo(skb)->frags[i].page);
92 skb_shinfo(skb)->nr_frags = 0;
93 skb->data_len = 0;
94}
95
fb6813f4
RR
96static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
97{
98 struct page *p = vi->pages;
99
100 if (p)
101 vi->pages = (struct page *)p->private;
102 else
103 p = alloc_page(gfp_mask);
104 return p;
105}
106
2cb9c6ba 107static void skb_xmit_done(struct virtqueue *svq)
296f96fc 108{
2cb9c6ba 109 struct virtnet_info *vi = svq->vdev->priv;
296f96fc 110
2cb9c6ba
RR
111 /* Suppress further interrupts. */
112 svq->vq_ops->disable_cb(svq);
11a3a154 113
363f1514 114 /* We were probably waiting for more output buffers. */
296f96fc 115 netif_wake_queue(vi->dev);
11a3a154
RR
116
117 /* Make sure we re-xmit last_xmit_skb: if there are no more packets
118 * queued, start_xmit won't be called. */
119 tasklet_schedule(&vi->tasklet);
296f96fc
RR
120}
121
122static void receive_skb(struct net_device *dev, struct sk_buff *skb,
123 unsigned len)
124{
3f2c31d9 125 struct virtnet_info *vi = netdev_priv(dev);
296f96fc 126 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
97402b96 127 int err;
3f2c31d9 128 int i;
296f96fc
RR
129
130 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
131 pr_debug("%s: short packet %i\n", dev->name, len);
132 dev->stats.rx_length_errors++;
133 goto drop;
134 }
23cde76d 135
3f2c31d9
MM
136 if (vi->mergeable_rx_bufs) {
137 struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
138 unsigned int copy;
139 char *p = page_address(skb_shinfo(skb)->frags[0].page);
fb6813f4 140
3f2c31d9
MM
141 if (len > PAGE_SIZE)
142 len = PAGE_SIZE;
143 len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
144
145 memcpy(hdr, p, sizeof(*mhdr));
146 p += sizeof(*mhdr);
147
148 copy = len;
149 if (copy > skb_tailroom(skb))
150 copy = skb_tailroom(skb);
151
152 memcpy(skb_put(skb, copy), p, copy);
153
154 len -= copy;
155
156 if (!len) {
157 give_a_page(vi, skb_shinfo(skb)->frags[0].page);
158 skb_shinfo(skb)->nr_frags--;
159 } else {
160 skb_shinfo(skb)->frags[0].page_offset +=
161 sizeof(*mhdr) + copy;
162 skb_shinfo(skb)->frags[0].size = len;
163 skb->data_len += len;
164 skb->len += len;
165 }
166
167 while (--mhdr->num_buffers) {
168 struct sk_buff *nskb;
169
170 i = skb_shinfo(skb)->nr_frags;
171 if (i >= MAX_SKB_FRAGS) {
172 pr_debug("%s: packet too long %d\n", dev->name,
173 len);
174 dev->stats.rx_length_errors++;
175 goto drop;
176 }
177
178 nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
179 if (!nskb) {
180 pr_debug("%s: rx error: %d buffers missing\n",
181 dev->name, mhdr->num_buffers);
182 dev->stats.rx_length_errors++;
183 goto drop;
184 }
185
186 __skb_unlink(nskb, &vi->recv);
187 vi->num--;
188
189 skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0];
190 skb_shinfo(nskb)->nr_frags = 0;
191 kfree_skb(nskb);
192
193 if (len > PAGE_SIZE)
194 len = PAGE_SIZE;
195
196 skb_shinfo(skb)->frags[i].size = len;
197 skb_shinfo(skb)->nr_frags++;
198 skb->data_len += len;
199 skb->len += len;
200 }
201 } else {
202 len -= sizeof(struct virtio_net_hdr);
203
204 if (len <= MAX_PACKET_LEN)
205 trim_pages(vi, skb);
206
207 err = pskb_trim(skb, len);
208 if (err) {
209 pr_debug("%s: pskb_trim failed %i %d\n", dev->name,
210 len, err);
211 dev->stats.rx_dropped++;
212 goto drop;
213 }
97402b96 214 }
3f2c31d9 215
97402b96 216 skb->truesize += skb->data_len;
296f96fc
RR
217 dev->stats.rx_bytes += skb->len;
218 dev->stats.rx_packets++;
219
220 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
221 pr_debug("Needs csum!\n");
f35d9d8a 222 if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset))
296f96fc 223 goto frame_err;
296f96fc
RR
224 }
225
23cde76d
MM
226 skb->protocol = eth_type_trans(skb, dev);
227 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
228 ntohs(skb->protocol), skb->len, skb->pkt_type);
229
296f96fc
RR
230 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
231 pr_debug("GSO!\n");
34a48579 232 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
296f96fc
RR
233 case VIRTIO_NET_HDR_GSO_TCPV4:
234 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
235 break;
296f96fc
RR
236 case VIRTIO_NET_HDR_GSO_UDP:
237 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
238 break;
239 case VIRTIO_NET_HDR_GSO_TCPV6:
240 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
241 break;
242 default:
243 if (net_ratelimit())
244 printk(KERN_WARNING "%s: bad gso type %u.\n",
245 dev->name, hdr->gso_type);
246 goto frame_err;
247 }
248
34a48579
RR
249 if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
250 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
251
296f96fc
RR
252 skb_shinfo(skb)->gso_size = hdr->gso_size;
253 if (skb_shinfo(skb)->gso_size == 0) {
254 if (net_ratelimit())
255 printk(KERN_WARNING "%s: zero gso size.\n",
256 dev->name);
257 goto frame_err;
258 }
259
260 /* Header must be checked, and gso_segs computed. */
261 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
262 skb_shinfo(skb)->gso_segs = 0;
263 }
264
265 netif_receive_skb(skb);
266 return;
267
268frame_err:
269 dev->stats.rx_frame_errors++;
270drop:
271 dev_kfree_skb(skb);
272}
273
3f2c31d9 274static void try_fill_recv_maxbufs(struct virtnet_info *vi)
296f96fc
RR
275{
276 struct sk_buff *skb;
05271685 277 struct scatterlist sg[2+MAX_SKB_FRAGS];
97402b96 278 int num, err, i;
296f96fc 279
05271685 280 sg_init_table(sg, 2+MAX_SKB_FRAGS);
296f96fc 281 for (;;) {
3f2c31d9
MM
282 struct virtio_net_hdr *hdr;
283
296f96fc
RR
284 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN);
285 if (unlikely(!skb))
286 break;
287
288 skb_put(skb, MAX_PACKET_LEN);
3f2c31d9
MM
289
290 hdr = skb_vnet_hdr(skb);
8527bec5 291 sg_set_buf(sg, hdr, sizeof(*hdr));
97402b96
HX
292
293 if (vi->big_packets) {
294 for (i = 0; i < MAX_SKB_FRAGS; i++) {
295 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
fb6813f4 296 f->page = get_a_page(vi, GFP_ATOMIC);
97402b96
HX
297 if (!f->page)
298 break;
299
300 f->page_offset = 0;
301 f->size = PAGE_SIZE;
302
303 skb->data_len += PAGE_SIZE;
304 skb->len += PAGE_SIZE;
305
306 skb_shinfo(skb)->nr_frags++;
307 }
308 }
309
296f96fc
RR
310 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
311 skb_queue_head(&vi->recv, skb);
312
313 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
314 if (err) {
315 skb_unlink(skb, &vi->recv);
0a888fd1 316 trim_pages(vi, skb);
296f96fc
RR
317 kfree_skb(skb);
318 break;
319 }
320 vi->num++;
321 }
322 if (unlikely(vi->num > vi->max))
323 vi->max = vi->num;
324 vi->rvq->vq_ops->kick(vi->rvq);
325}
326
3f2c31d9
MM
327static void try_fill_recv(struct virtnet_info *vi)
328{
329 struct sk_buff *skb;
330 struct scatterlist sg[1];
331 int err;
332
333 if (!vi->mergeable_rx_bufs) {
334 try_fill_recv_maxbufs(vi);
335 return;
336 }
337
338 for (;;) {
339 skb_frag_t *f;
340
341 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
342 if (unlikely(!skb))
343 break;
344
345 skb_reserve(skb, NET_IP_ALIGN);
346
347 f = &skb_shinfo(skb)->frags[0];
348 f->page = get_a_page(vi, GFP_ATOMIC);
349 if (!f->page) {
350 kfree_skb(skb);
351 break;
352 }
353
354 f->page_offset = 0;
355 f->size = PAGE_SIZE;
356
357 skb_shinfo(skb)->nr_frags++;
358
359 sg_init_one(sg, page_address(f->page), PAGE_SIZE);
360 skb_queue_head(&vi->recv, skb);
361
362 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
363 if (err) {
364 skb_unlink(skb, &vi->recv);
365 kfree_skb(skb);
366 break;
367 }
368 vi->num++;
369 }
370 if (unlikely(vi->num > vi->max))
371 vi->max = vi->num;
372 vi->rvq->vq_ops->kick(vi->rvq);
373}
374
18445c4d 375static void skb_recv_done(struct virtqueue *rvq)
296f96fc
RR
376{
377 struct virtnet_info *vi = rvq->vdev->priv;
18445c4d 378 /* Schedule NAPI, Suppress further interrupts if successful. */
288379f0 379 if (napi_schedule_prep(&vi->napi)) {
18445c4d 380 rvq->vq_ops->disable_cb(rvq);
288379f0 381 __napi_schedule(&vi->napi);
18445c4d 382 }
296f96fc
RR
383}
384
385static int virtnet_poll(struct napi_struct *napi, int budget)
386{
387 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
388 struct sk_buff *skb = NULL;
389 unsigned int len, received = 0;
390
391again:
392 while (received < budget &&
393 (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
394 __skb_unlink(skb, &vi->recv);
395 receive_skb(vi->dev, skb, len);
396 vi->num--;
397 received++;
398 }
399
400 /* FIXME: If we oom and completely run out of inbufs, we need
401 * to start a timer trying to fill more. */
402 if (vi->num < vi->max / 2)
403 try_fill_recv(vi);
404
8329d98e
RR
405 /* Out of packets? */
406 if (received < budget) {
288379f0 407 napi_complete(napi);
18445c4d 408 if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
4265f161
CB
409 && napi_schedule_prep(napi)) {
410 vi->rvq->vq_ops->disable_cb(vi->rvq);
288379f0 411 __napi_schedule(napi);
296f96fc 412 goto again;
4265f161 413 }
296f96fc
RR
414 }
415
416 return received;
417}
418
419static void free_old_xmit_skbs(struct virtnet_info *vi)
420{
421 struct sk_buff *skb;
422 unsigned int len;
423
424 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
425 pr_debug("Sent skb %p\n", skb);
426 __skb_unlink(skb, &vi->send);
655aa31f 427 vi->dev->stats.tx_bytes += skb->len;
296f96fc
RR
428 vi->dev->stats.tx_packets++;
429 kfree_skb(skb);
430 }
431}
432
363f1514
RR
433/* If the virtio transport doesn't always notify us when all in-flight packets
434 * are consumed, we fall back to using this function on a timer to free them. */
14c998f0
MM
435static void xmit_free(unsigned long data)
436{
437 struct virtnet_info *vi = (void *)data;
438
439 netif_tx_lock(vi->dev);
440
441 free_old_xmit_skbs(vi);
442
443 if (!skb_queue_empty(&vi->send))
444 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
445
446 netif_tx_unlock(vi->dev);
447}
448
99ffc696 449static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
296f96fc 450{
14c998f0 451 int num, err;
05271685 452 struct scatterlist sg[2+MAX_SKB_FRAGS];
3f2c31d9
MM
453 struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
454 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
296f96fc 455 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
296f96fc 456
05271685 457 sg_init_table(sg, 2+MAX_SKB_FRAGS);
4d125de3 458
e174961c 459 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
296f96fc 460
296f96fc
RR
461 if (skb->ip_summed == CHECKSUM_PARTIAL) {
462 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
463 hdr->csum_start = skb->csum_start - skb_headroom(skb);
464 hdr->csum_offset = skb->csum_offset;
465 } else {
466 hdr->flags = 0;
467 hdr->csum_offset = hdr->csum_start = 0;
468 }
469
470 if (skb_is_gso(skb)) {
50c8ea80 471 hdr->hdr_len = skb_transport_header(skb) - skb->data;
296f96fc 472 hdr->gso_size = skb_shinfo(skb)->gso_size;
34a48579 473 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
296f96fc
RR
474 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
475 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
476 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
477 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
478 hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
479 else
480 BUG();
34a48579
RR
481 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
482 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
296f96fc
RR
483 } else {
484 hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
50c8ea80 485 hdr->gso_size = hdr->hdr_len = 0;
296f96fc
RR
486 }
487
3f2c31d9
MM
488 mhdr->num_buffers = 0;
489
490 /* Encode metadata header at front. */
491 if (vi->mergeable_rx_bufs)
8527bec5 492 sg_set_buf(sg, mhdr, sizeof(*mhdr));
3f2c31d9 493 else
8527bec5 494 sg_set_buf(sg, hdr, sizeof(*hdr));
3f2c31d9 495
296f96fc 496 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
99ffc696 497
14c998f0 498 err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
363f1514 499 if (!err && !vi->free_in_tasklet)
14c998f0
MM
500 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
501
502 return err;
99ffc696
RR
503}
504
11a3a154
RR
505static void xmit_tasklet(unsigned long data)
506{
507 struct virtnet_info *vi = (void *)data;
508
509 netif_tx_lock_bh(vi->dev);
510 if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) == 0) {
511 vi->svq->vq_ops->kick(vi->svq);
512 vi->last_xmit_skb = NULL;
513 }
363f1514
RR
514 if (vi->free_in_tasklet)
515 free_old_xmit_skbs(vi);
11a3a154
RR
516 netif_tx_unlock_bh(vi->dev);
517}
518
99ffc696
RR
519static int start_xmit(struct sk_buff *skb, struct net_device *dev)
520{
521 struct virtnet_info *vi = netdev_priv(dev);
2cb9c6ba
RR
522
523again:
524 /* Free up any pending old buffers before queueing new ones. */
525 free_old_xmit_skbs(vi);
99ffc696
RR
526
527 /* If we has a buffer left over from last time, send it now. */
9953ca6c
MM
528 if (unlikely(vi->last_xmit_skb) &&
529 xmit_skb(vi, vi->last_xmit_skb) != 0)
530 goto stop_queue;
531
532 vi->last_xmit_skb = NULL;
2cb9c6ba 533
99ffc696 534 /* Put new one in send queue and do transmit */
7eb2e251
RR
535 if (likely(skb)) {
536 __skb_queue_head(&vi->send, skb);
537 if (xmit_skb(vi, skb) != 0) {
538 vi->last_xmit_skb = skb;
539 skb = NULL;
540 goto stop_queue;
541 }
296f96fc 542 }
99ffc696 543done:
296f96fc 544 vi->svq->vq_ops->kick(vi->svq);
99ffc696
RR
545 return NETDEV_TX_OK;
546
547stop_queue:
548 pr_debug("%s: virtio not prepared to send\n", dev->name);
549 netif_stop_queue(dev);
550
551 /* Activate callback for using skbs: if this returns false it
552 * means some were used in the meantime. */
553 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
554 vi->svq->vq_ops->disable_cb(vi->svq);
555 netif_start_queue(dev);
556 goto again;
557 }
9953ca6c
MM
558 if (skb) {
559 /* Drop this skb: we only queue one. */
560 vi->dev->stats.tx_dropped++;
561 kfree_skb(skb);
562 }
99ffc696 563 goto done;
296f96fc
RR
564}
565
da74e89d
AS
566#ifdef CONFIG_NET_POLL_CONTROLLER
567static void virtnet_netpoll(struct net_device *dev)
568{
569 struct virtnet_info *vi = netdev_priv(dev);
570
571 napi_schedule(&vi->napi);
572}
573#endif
574
296f96fc
RR
575static int virtnet_open(struct net_device *dev)
576{
577 struct virtnet_info *vi = netdev_priv(dev);
578
296f96fc 579 napi_enable(&vi->napi);
a48bd8f6
RR
580
581 /* If all buffers were filled by other side before we napi_enabled, we
582 * won't get another interrupt, so process any outstanding packets
370076d9
CB
583 * now. virtnet_poll wants re-enable the queue, so we disable here.
584 * We synchronize against interrupts via NAPI_STATE_SCHED */
288379f0 585 if (napi_schedule_prep(&vi->napi)) {
370076d9 586 vi->rvq->vq_ops->disable_cb(vi->rvq);
288379f0 587 __napi_schedule(&vi->napi);
370076d9 588 }
296f96fc
RR
589 return 0;
590}
591
592static int virtnet_close(struct net_device *dev)
593{
594 struct virtnet_info *vi = netdev_priv(dev);
296f96fc
RR
595
596 napi_disable(&vi->napi);
597
296f96fc
RR
598 return 0;
599}
600
a9ea3fc6
HX
601static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
602{
603 struct virtnet_info *vi = netdev_priv(dev);
604 struct virtio_device *vdev = vi->vdev;
605
606 if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
607 return -ENOSYS;
608
609 return ethtool_op_set_tx_hw_csum(dev, data);
610}
611
612static struct ethtool_ops virtnet_ethtool_ops = {
613 .set_tx_csum = virtnet_set_tx_csum,
614 .set_sg = ethtool_op_set_sg,
0276b497 615 .set_tso = ethtool_op_set_tso,
9f4d26d0 616 .get_link = ethtool_op_get_link,
a9ea3fc6
HX
617};
618
39da5814
MM
619#define MIN_MTU 68
620#define MAX_MTU 65535
621
622static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
623{
624 if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
625 return -EINVAL;
626 dev->mtu = new_mtu;
627 return 0;
628}
629
76288b4e
SH
630static const struct net_device_ops virtnet_netdev = {
631 .ndo_open = virtnet_open,
632 .ndo_stop = virtnet_close,
633 .ndo_start_xmit = start_xmit,
634 .ndo_validate_addr = eth_validate_addr,
635 .ndo_set_mac_address = eth_mac_addr,
636 .ndo_change_mtu = virtnet_change_mtu,
637#ifdef CONFIG_NET_POLL_CONTROLLER
638 .ndo_poll_controller = virtnet_netpoll,
639#endif
640};
641
9f4d26d0
MM
642static void virtnet_update_status(struct virtnet_info *vi)
643{
644 u16 v;
645
646 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
647 return;
648
649 vi->vdev->config->get(vi->vdev,
650 offsetof(struct virtio_net_config, status),
651 &v, sizeof(v));
652
653 /* Ignore unknown (future) status bits */
654 v &= VIRTIO_NET_S_LINK_UP;
655
656 if (vi->status == v)
657 return;
658
659 vi->status = v;
660
661 if (vi->status & VIRTIO_NET_S_LINK_UP) {
662 netif_carrier_on(vi->dev);
663 netif_wake_queue(vi->dev);
664 } else {
665 netif_carrier_off(vi->dev);
666 netif_stop_queue(vi->dev);
667 }
668}
669
670static void virtnet_config_changed(struct virtio_device *vdev)
671{
672 struct virtnet_info *vi = vdev->priv;
673
674 virtnet_update_status(vi);
675}
676
296f96fc
RR
677static int virtnet_probe(struct virtio_device *vdev)
678{
679 int err;
296f96fc
RR
680 struct net_device *dev;
681 struct virtnet_info *vi;
296f96fc
RR
682
683 /* Allocate ourselves a network device with room for our info */
684 dev = alloc_etherdev(sizeof(struct virtnet_info));
685 if (!dev)
686 return -ENOMEM;
687
688 /* Set up network device as normal. */
76288b4e 689 dev->netdev_ops = &virtnet_netdev;
296f96fc 690 dev->features = NETIF_F_HIGHDMA;
a9ea3fc6 691 SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
296f96fc
RR
692 SET_NETDEV_DEV(dev, &vdev->dev);
693
694 /* Do we support "hardware" checksums? */
c45a6816 695 if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
296f96fc
RR
696 /* This opens up the world of extra features. */
697 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
c45a6816 698 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
34a48579
RR
699 dev->features |= NETIF_F_TSO | NETIF_F_UFO
700 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
701 }
5539ae96 702 /* Individual feature bits: what can host handle? */
c45a6816 703 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
5539ae96 704 dev->features |= NETIF_F_TSO;
c45a6816 705 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
5539ae96 706 dev->features |= NETIF_F_TSO6;
c45a6816 707 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
5539ae96 708 dev->features |= NETIF_F_TSO_ECN;
c45a6816 709 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
5539ae96 710 dev->features |= NETIF_F_UFO;
296f96fc
RR
711 }
712
713 /* Configuration may specify what MAC to use. Otherwise random. */
c45a6816 714 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
a586d4f6
RR
715 vdev->config->get(vdev,
716 offsetof(struct virtio_net_config, mac),
717 dev->dev_addr, dev->addr_len);
296f96fc
RR
718 } else
719 random_ether_addr(dev->dev_addr);
720
721 /* Set up our device-specific information */
722 vi = netdev_priv(dev);
6c0cd7c0 723 netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
296f96fc
RR
724 vi->dev = dev;
725 vi->vdev = vdev;
d9d5dcc8 726 vdev->priv = vi;
fb6813f4 727 vi->pages = NULL;
296f96fc 728
363f1514
RR
729 /* If they give us a callback when all buffers are done, we don't need
730 * the timer. */
731 vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY);
732
97402b96
HX
733 /* If we can receive ANY GSO packets, we must allocate large ones. */
734 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
735 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
736 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
737 vi->big_packets = true;
738
3f2c31d9
MM
739 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
740 vi->mergeable_rx_bufs = true;
741
296f96fc 742 /* We expect two virtqueues, receive then send. */
a586d4f6 743 vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done);
296f96fc
RR
744 if (IS_ERR(vi->rvq)) {
745 err = PTR_ERR(vi->rvq);
746 goto free;
747 }
748
a586d4f6 749 vi->svq = vdev->config->find_vq(vdev, 1, skb_xmit_done);
296f96fc
RR
750 if (IS_ERR(vi->svq)) {
751 err = PTR_ERR(vi->svq);
752 goto free_recv;
753 }
754
755 /* Initialize our empty receive and send queues. */
756 skb_queue_head_init(&vi->recv);
757 skb_queue_head_init(&vi->send);
758
11a3a154
RR
759 tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi);
760
363f1514
RR
761 if (!vi->free_in_tasklet)
762 setup_timer(&vi->xmit_free_timer, xmit_free, (unsigned long)vi);
14c998f0 763
296f96fc
RR
764 err = register_netdev(dev);
765 if (err) {
766 pr_debug("virtio_net: registering device failed\n");
767 goto free_send;
768 }
b3369c1f
RR
769
770 /* Last of all, set up some receive buffers. */
771 try_fill_recv(vi);
772
773 /* If we didn't even get one input buffer, we're useless. */
774 if (vi->num == 0) {
775 err = -ENOMEM;
776 goto unregister;
777 }
778
9f4d26d0
MM
779 vi->status = VIRTIO_NET_S_LINK_UP;
780 virtnet_update_status(vi);
781
296f96fc 782 pr_debug("virtnet: registered device %s\n", dev->name);
296f96fc
RR
783 return 0;
784
b3369c1f
RR
785unregister:
786 unregister_netdev(dev);
296f96fc
RR
787free_send:
788 vdev->config->del_vq(vi->svq);
789free_recv:
790 vdev->config->del_vq(vi->rvq);
791free:
792 free_netdev(dev);
793 return err;
794}
795
796static void virtnet_remove(struct virtio_device *vdev)
797{
74b2553f 798 struct virtnet_info *vi = vdev->priv;
b3369c1f
RR
799 struct sk_buff *skb;
800
6e5aa7ef
RR
801 /* Stop all the virtqueues. */
802 vdev->config->reset(vdev);
803
363f1514
RR
804 if (!vi->free_in_tasklet)
805 del_timer_sync(&vi->xmit_free_timer);
14c998f0 806
b3369c1f 807 /* Free our skbs in send and recv queues, if any. */
b3369c1f
RR
808 while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
809 kfree_skb(skb);
810 vi->num--;
811 }
288369cc 812 __skb_queue_purge(&vi->send);
b3369c1f
RR
813
814 BUG_ON(vi->num != 0);
74b2553f
RR
815
816 vdev->config->del_vq(vi->svq);
817 vdev->config->del_vq(vi->rvq);
818 unregister_netdev(vi->dev);
fb6813f4
RR
819
820 while (vi->pages)
821 __free_pages(get_a_page(vi, GFP_KERNEL), 0);
822
74b2553f 823 free_netdev(vi->dev);
296f96fc
RR
824}
825
826static struct virtio_device_id id_table[] = {
827 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
828 { 0 },
829};
830
c45a6816 831static unsigned int features[] = {
5e4fe5c4
MM
832 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
833 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
c45a6816 834 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
97402b96
HX
835 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
836 VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */
9f4d26d0 837 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS,
97402b96 838 VIRTIO_F_NOTIFY_ON_EMPTY,
c45a6816
RR
839};
840
296f96fc 841static struct virtio_driver virtio_net = {
c45a6816
RR
842 .feature_table = features,
843 .feature_table_size = ARRAY_SIZE(features),
296f96fc
RR
844 .driver.name = KBUILD_MODNAME,
845 .driver.owner = THIS_MODULE,
846 .id_table = id_table,
847 .probe = virtnet_probe,
848 .remove = __devexit_p(virtnet_remove),
9f4d26d0 849 .config_changed = virtnet_config_changed,
296f96fc
RR
850};
851
852static int __init init(void)
853{
854 return register_virtio_driver(&virtio_net);
855}
856
857static void __exit fini(void)
858{
859 unregister_virtio_driver(&virtio_net);
860}
861module_init(init);
862module_exit(fini);
863
864MODULE_DEVICE_TABLE(virtio, id_table);
865MODULE_DESCRIPTION("Virtio network driver");
866MODULE_LICENSE("GPL");