]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/virtio_net.c
Merge branch 'dell-laptop' into release
[mirror_ubuntu-bionic-kernel.git] / drivers / net / virtio_net.c
1 /* A network driver using virtio.
2 *
3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19 //#define DEBUG
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/module.h>
24 #include <linux/virtio.h>
25 #include <linux/virtio_net.h>
26 #include <linux/scatterlist.h>
27 #include <linux/if_vlan.h>
28
29 static int napi_weight = 128;
30 module_param(napi_weight, int, 0444);
31
32 static int csum = 1, gso = 1;
33 module_param(csum, bool, 0444);
34 module_param(gso, bool, 0444);
35
36 /* FIXME: MTU in config. */
37 #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
38 #define GOOD_COPY_LEN 128
39
40 #define VIRTNET_SEND_COMMAND_SG_MAX 2
41
42 struct virtnet_info
43 {
44 struct virtio_device *vdev;
45 struct virtqueue *rvq, *svq, *cvq;
46 struct net_device *dev;
47 struct napi_struct napi;
48 unsigned int status;
49
50 /* Number of input buffers, and max we've ever had. */
51 unsigned int num, max;
52
53 /* I like... big packets and I cannot lie! */
54 bool big_packets;
55
56 /* Host will merge rx buffers for big packets (shake it! shake it!) */
57 bool mergeable_rx_bufs;
58
59 /* Receive & send queues. */
60 struct sk_buff_head recv;
61 struct sk_buff_head send;
62
63 /* Work struct for refilling if we run low on memory. */
64 struct delayed_work refill;
65
66 /* Chain pages by the private ptr. */
67 struct page *pages;
68 };
69
70 struct skb_vnet_hdr {
71 union {
72 struct virtio_net_hdr hdr;
73 struct virtio_net_hdr_mrg_rxbuf mhdr;
74 };
75 unsigned int num_sg;
76 };
77
78 static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
79 {
80 return (struct skb_vnet_hdr *)skb->cb;
81 }
82
83 static void give_a_page(struct virtnet_info *vi, struct page *page)
84 {
85 page->private = (unsigned long)vi->pages;
86 vi->pages = page;
87 }
88
89 static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb)
90 {
91 unsigned int i;
92
93 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
94 give_a_page(vi, skb_shinfo(skb)->frags[i].page);
95 skb_shinfo(skb)->nr_frags = 0;
96 skb->data_len = 0;
97 }
98
99 static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
100 {
101 struct page *p = vi->pages;
102
103 if (p)
104 vi->pages = (struct page *)p->private;
105 else
106 p = alloc_page(gfp_mask);
107 return p;
108 }
109
110 static void skb_xmit_done(struct virtqueue *svq)
111 {
112 struct virtnet_info *vi = svq->vdev->priv;
113
114 /* Suppress further interrupts. */
115 svq->vq_ops->disable_cb(svq);
116
117 /* We were probably waiting for more output buffers. */
118 netif_wake_queue(vi->dev);
119 }
120
121 static void receive_skb(struct net_device *dev, struct sk_buff *skb,
122 unsigned len)
123 {
124 struct virtnet_info *vi = netdev_priv(dev);
125 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
126 int err;
127 int i;
128
129 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
130 pr_debug("%s: short packet %i\n", dev->name, len);
131 dev->stats.rx_length_errors++;
132 goto drop;
133 }
134
135 if (vi->mergeable_rx_bufs) {
136 unsigned int copy;
137 char *p = page_address(skb_shinfo(skb)->frags[0].page);
138
139 if (len > PAGE_SIZE)
140 len = PAGE_SIZE;
141 len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
142
143 memcpy(&hdr->mhdr, p, sizeof(hdr->mhdr));
144 p += sizeof(hdr->mhdr);
145
146 copy = len;
147 if (copy > skb_tailroom(skb))
148 copy = skb_tailroom(skb);
149
150 memcpy(skb_put(skb, copy), p, copy);
151
152 len -= copy;
153
154 if (!len) {
155 give_a_page(vi, skb_shinfo(skb)->frags[0].page);
156 skb_shinfo(skb)->nr_frags--;
157 } else {
158 skb_shinfo(skb)->frags[0].page_offset +=
159 sizeof(hdr->mhdr) + copy;
160 skb_shinfo(skb)->frags[0].size = len;
161 skb->data_len += len;
162 skb->len += len;
163 }
164
165 while (--hdr->mhdr.num_buffers) {
166 struct sk_buff *nskb;
167
168 i = skb_shinfo(skb)->nr_frags;
169 if (i >= MAX_SKB_FRAGS) {
170 pr_debug("%s: packet too long %d\n", dev->name,
171 len);
172 dev->stats.rx_length_errors++;
173 goto drop;
174 }
175
176 nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
177 if (!nskb) {
178 pr_debug("%s: rx error: %d buffers missing\n",
179 dev->name, hdr->mhdr.num_buffers);
180 dev->stats.rx_length_errors++;
181 goto drop;
182 }
183
184 __skb_unlink(nskb, &vi->recv);
185 vi->num--;
186
187 skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0];
188 skb_shinfo(nskb)->nr_frags = 0;
189 kfree_skb(nskb);
190
191 if (len > PAGE_SIZE)
192 len = PAGE_SIZE;
193
194 skb_shinfo(skb)->frags[i].size = len;
195 skb_shinfo(skb)->nr_frags++;
196 skb->data_len += len;
197 skb->len += len;
198 }
199 } else {
200 len -= sizeof(hdr->hdr);
201
202 if (len <= MAX_PACKET_LEN)
203 trim_pages(vi, skb);
204
205 err = pskb_trim(skb, len);
206 if (err) {
207 pr_debug("%s: pskb_trim failed %i %d\n", dev->name,
208 len, err);
209 dev->stats.rx_dropped++;
210 goto drop;
211 }
212 }
213
214 skb->truesize += skb->data_len;
215 dev->stats.rx_bytes += skb->len;
216 dev->stats.rx_packets++;
217
218 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
219 pr_debug("Needs csum!\n");
220 if (!skb_partial_csum_set(skb,
221 hdr->hdr.csum_start,
222 hdr->hdr.csum_offset))
223 goto frame_err;
224 }
225
226 skb->protocol = eth_type_trans(skb, dev);
227 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
228 ntohs(skb->protocol), skb->len, skb->pkt_type);
229
230 if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
231 pr_debug("GSO!\n");
232 switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
233 case VIRTIO_NET_HDR_GSO_TCPV4:
234 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
235 break;
236 case VIRTIO_NET_HDR_GSO_UDP:
237 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
238 break;
239 case VIRTIO_NET_HDR_GSO_TCPV6:
240 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
241 break;
242 default:
243 if (net_ratelimit())
244 printk(KERN_WARNING "%s: bad gso type %u.\n",
245 dev->name, hdr->hdr.gso_type);
246 goto frame_err;
247 }
248
249 if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
250 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
251
252 skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
253 if (skb_shinfo(skb)->gso_size == 0) {
254 if (net_ratelimit())
255 printk(KERN_WARNING "%s: zero gso size.\n",
256 dev->name);
257 goto frame_err;
258 }
259
260 /* Header must be checked, and gso_segs computed. */
261 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
262 skb_shinfo(skb)->gso_segs = 0;
263 }
264
265 netif_receive_skb(skb);
266 return;
267
268 frame_err:
269 dev->stats.rx_frame_errors++;
270 drop:
271 dev_kfree_skb(skb);
272 }
273
274 static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
275 {
276 struct sk_buff *skb;
277 struct scatterlist sg[2+MAX_SKB_FRAGS];
278 int num, err, i;
279 bool oom = false;
280
281 sg_init_table(sg, 2+MAX_SKB_FRAGS);
282 do {
283 struct skb_vnet_hdr *hdr;
284
285 skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
286 if (unlikely(!skb)) {
287 oom = true;
288 break;
289 }
290
291 skb_put(skb, MAX_PACKET_LEN);
292
293 hdr = skb_vnet_hdr(skb);
294 sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
295
296 if (vi->big_packets) {
297 for (i = 0; i < MAX_SKB_FRAGS; i++) {
298 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
299 f->page = get_a_page(vi, gfp);
300 if (!f->page)
301 break;
302
303 f->page_offset = 0;
304 f->size = PAGE_SIZE;
305
306 skb->data_len += PAGE_SIZE;
307 skb->len += PAGE_SIZE;
308
309 skb_shinfo(skb)->nr_frags++;
310 }
311 }
312
313 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
314 skb_queue_head(&vi->recv, skb);
315
316 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
317 if (err < 0) {
318 skb_unlink(skb, &vi->recv);
319 trim_pages(vi, skb);
320 kfree_skb(skb);
321 break;
322 }
323 vi->num++;
324 } while (err >= num);
325 if (unlikely(vi->num > vi->max))
326 vi->max = vi->num;
327 vi->rvq->vq_ops->kick(vi->rvq);
328 return !oom;
329 }
330
331 /* Returns false if we couldn't fill entirely (OOM). */
332 static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
333 {
334 struct sk_buff *skb;
335 struct scatterlist sg[1];
336 int err;
337 bool oom = false;
338
339 if (!vi->mergeable_rx_bufs)
340 return try_fill_recv_maxbufs(vi, gfp);
341
342 do {
343 skb_frag_t *f;
344
345 skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
346 if (unlikely(!skb)) {
347 oom = true;
348 break;
349 }
350
351 f = &skb_shinfo(skb)->frags[0];
352 f->page = get_a_page(vi, gfp);
353 if (!f->page) {
354 oom = true;
355 kfree_skb(skb);
356 break;
357 }
358
359 f->page_offset = 0;
360 f->size = PAGE_SIZE;
361
362 skb_shinfo(skb)->nr_frags++;
363
364 sg_init_one(sg, page_address(f->page), PAGE_SIZE);
365 skb_queue_head(&vi->recv, skb);
366
367 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
368 if (err < 0) {
369 skb_unlink(skb, &vi->recv);
370 kfree_skb(skb);
371 break;
372 }
373 vi->num++;
374 } while (err > 0);
375 if (unlikely(vi->num > vi->max))
376 vi->max = vi->num;
377 vi->rvq->vq_ops->kick(vi->rvq);
378 return !oom;
379 }
380
381 static void skb_recv_done(struct virtqueue *rvq)
382 {
383 struct virtnet_info *vi = rvq->vdev->priv;
384 /* Schedule NAPI, Suppress further interrupts if successful. */
385 if (napi_schedule_prep(&vi->napi)) {
386 rvq->vq_ops->disable_cb(rvq);
387 __napi_schedule(&vi->napi);
388 }
389 }
390
391 static void refill_work(struct work_struct *work)
392 {
393 struct virtnet_info *vi;
394 bool still_empty;
395
396 vi = container_of(work, struct virtnet_info, refill.work);
397 napi_disable(&vi->napi);
398 try_fill_recv(vi, GFP_KERNEL);
399 still_empty = (vi->num == 0);
400 napi_enable(&vi->napi);
401
402 /* In theory, this can happen: if we don't get any buffers in
403 * we will *never* try to fill again. */
404 if (still_empty)
405 schedule_delayed_work(&vi->refill, HZ/2);
406 }
407
408 static int virtnet_poll(struct napi_struct *napi, int budget)
409 {
410 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
411 struct sk_buff *skb = NULL;
412 unsigned int len, received = 0;
413
414 again:
415 while (received < budget &&
416 (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
417 __skb_unlink(skb, &vi->recv);
418 receive_skb(vi->dev, skb, len);
419 vi->num--;
420 received++;
421 }
422
423 if (vi->num < vi->max / 2) {
424 if (!try_fill_recv(vi, GFP_ATOMIC))
425 schedule_delayed_work(&vi->refill, 0);
426 }
427
428 /* Out of packets? */
429 if (received < budget) {
430 napi_complete(napi);
431 if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) &&
432 napi_schedule_prep(napi)) {
433 vi->rvq->vq_ops->disable_cb(vi->rvq);
434 __napi_schedule(napi);
435 goto again;
436 }
437 }
438
439 return received;
440 }
441
442 static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
443 {
444 struct sk_buff *skb;
445 unsigned int len, tot_sgs = 0;
446
447 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
448 pr_debug("Sent skb %p\n", skb);
449 __skb_unlink(skb, &vi->send);
450 vi->dev->stats.tx_bytes += skb->len;
451 vi->dev->stats.tx_packets++;
452 tot_sgs += skb_vnet_hdr(skb)->num_sg;
453 dev_kfree_skb_any(skb);
454 }
455 return tot_sgs;
456 }
457
458 static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
459 {
460 struct scatterlist sg[2+MAX_SKB_FRAGS];
461 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
462 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
463
464 sg_init_table(sg, 2+MAX_SKB_FRAGS);
465
466 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
467
468 if (skb->ip_summed == CHECKSUM_PARTIAL) {
469 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
470 hdr->hdr.csum_start = skb->csum_start - skb_headroom(skb);
471 hdr->hdr.csum_offset = skb->csum_offset;
472 } else {
473 hdr->hdr.flags = 0;
474 hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
475 }
476
477 if (skb_is_gso(skb)) {
478 hdr->hdr.hdr_len = skb_headlen(skb);
479 hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
480 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
481 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
482 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
483 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
484 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
485 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
486 else
487 BUG();
488 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
489 hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
490 } else {
491 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
492 hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
493 }
494
495 hdr->mhdr.num_buffers = 0;
496
497 /* Encode metadata header at front. */
498 if (vi->mergeable_rx_bufs)
499 sg_set_buf(sg, &hdr->mhdr, sizeof(hdr->mhdr));
500 else
501 sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
502
503 hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
504 return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
505 }
506
507 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
508 {
509 struct virtnet_info *vi = netdev_priv(dev);
510 int capacity;
511
512 again:
513 /* Free up any pending old buffers before queueing new ones. */
514 free_old_xmit_skbs(vi);
515
516 /* Try to transmit */
517 capacity = xmit_skb(vi, skb);
518
519 /* This can happen with OOM and indirect buffers. */
520 if (unlikely(capacity < 0)) {
521 netif_stop_queue(dev);
522 dev_warn(&dev->dev, "Unexpected full queue\n");
523 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
524 vi->svq->vq_ops->disable_cb(vi->svq);
525 netif_start_queue(dev);
526 goto again;
527 }
528 return NETDEV_TX_BUSY;
529 }
530 vi->svq->vq_ops->kick(vi->svq);
531
532 /*
533 * Put new one in send queue. You'd expect we'd need this before
534 * xmit_skb calls add_buf(), since the callback can be triggered
535 * immediately after that. But since the callback just triggers
536 * another call back here, normal network xmit locking prevents the
537 * race.
538 */
539 __skb_queue_head(&vi->send, skb);
540
541 /* Don't wait up for transmitted skbs to be freed. */
542 skb_orphan(skb);
543 nf_reset(skb);
544
545 /* Apparently nice girls don't return TX_BUSY; stop the queue
546 * before it gets out of hand. Naturally, this wastes entries. */
547 if (capacity < 2+MAX_SKB_FRAGS) {
548 netif_stop_queue(dev);
549 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
550 /* More just got used, free them then recheck. */
551 capacity += free_old_xmit_skbs(vi);
552 if (capacity >= 2+MAX_SKB_FRAGS) {
553 netif_start_queue(dev);
554 vi->svq->vq_ops->disable_cb(vi->svq);
555 }
556 }
557 }
558
559 return NETDEV_TX_OK;
560 }
561
562 static int virtnet_set_mac_address(struct net_device *dev, void *p)
563 {
564 struct virtnet_info *vi = netdev_priv(dev);
565 struct virtio_device *vdev = vi->vdev;
566 int ret;
567
568 ret = eth_mac_addr(dev, p);
569 if (ret)
570 return ret;
571
572 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
573 vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
574 dev->dev_addr, dev->addr_len);
575
576 return 0;
577 }
578
579 #ifdef CONFIG_NET_POLL_CONTROLLER
580 static void virtnet_netpoll(struct net_device *dev)
581 {
582 struct virtnet_info *vi = netdev_priv(dev);
583
584 napi_schedule(&vi->napi);
585 }
586 #endif
587
588 static int virtnet_open(struct net_device *dev)
589 {
590 struct virtnet_info *vi = netdev_priv(dev);
591
592 napi_enable(&vi->napi);
593
594 /* If all buffers were filled by other side before we napi_enabled, we
595 * won't get another interrupt, so process any outstanding packets
596 * now. virtnet_poll wants re-enable the queue, so we disable here.
597 * We synchronize against interrupts via NAPI_STATE_SCHED */
598 if (napi_schedule_prep(&vi->napi)) {
599 vi->rvq->vq_ops->disable_cb(vi->rvq);
600 __napi_schedule(&vi->napi);
601 }
602 return 0;
603 }
604
605 /*
606 * Send command via the control virtqueue and check status. Commands
607 * supported by the hypervisor, as indicated by feature bits, should
608 * never fail unless improperly formated.
609 */
610 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
611 struct scatterlist *data, int out, int in)
612 {
613 struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
614 struct virtio_net_ctrl_hdr ctrl;
615 virtio_net_ctrl_ack status = ~0;
616 unsigned int tmp;
617 int i;
618
619 /* Caller should know better */
620 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
621 (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
622
623 out++; /* Add header */
624 in++; /* Add return status */
625
626 ctrl.class = class;
627 ctrl.cmd = cmd;
628
629 sg_init_table(sg, out + in);
630
631 sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
632 for_each_sg(data, s, out + in - 2, i)
633 sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
634 sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
635
636 BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0);
637
638 vi->cvq->vq_ops->kick(vi->cvq);
639
640 /*
641 * Spin for a response, the kick causes an ioport write, trapping
642 * into the hypervisor, so the request should be handled immediately.
643 */
644 while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
645 cpu_relax();
646
647 return status == VIRTIO_NET_OK;
648 }
649
650 static int virtnet_close(struct net_device *dev)
651 {
652 struct virtnet_info *vi = netdev_priv(dev);
653
654 napi_disable(&vi->napi);
655
656 return 0;
657 }
658
659 static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
660 {
661 struct virtnet_info *vi = netdev_priv(dev);
662 struct virtio_device *vdev = vi->vdev;
663
664 if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
665 return -ENOSYS;
666
667 return ethtool_op_set_tx_hw_csum(dev, data);
668 }
669
670 static void virtnet_set_rx_mode(struct net_device *dev)
671 {
672 struct virtnet_info *vi = netdev_priv(dev);
673 struct scatterlist sg[2];
674 u8 promisc, allmulti;
675 struct virtio_net_ctrl_mac *mac_data;
676 struct dev_addr_list *addr;
677 struct netdev_hw_addr *ha;
678 void *buf;
679 int i;
680
681 /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
682 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
683 return;
684
685 promisc = ((dev->flags & IFF_PROMISC) != 0);
686 allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
687
688 sg_init_one(sg, &promisc, sizeof(promisc));
689
690 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
691 VIRTIO_NET_CTRL_RX_PROMISC,
692 sg, 1, 0))
693 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
694 promisc ? "en" : "dis");
695
696 sg_init_one(sg, &allmulti, sizeof(allmulti));
697
698 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
699 VIRTIO_NET_CTRL_RX_ALLMULTI,
700 sg, 1, 0))
701 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
702 allmulti ? "en" : "dis");
703
704 /* MAC filter - use one buffer for both lists */
705 mac_data = buf = kzalloc(((dev->uc.count + dev->mc_count) * ETH_ALEN) +
706 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
707 if (!buf) {
708 dev_warn(&dev->dev, "No memory for MAC address buffer\n");
709 return;
710 }
711
712 sg_init_table(sg, 2);
713
714 /* Store the unicast list and count in the front of the buffer */
715 mac_data->entries = dev->uc.count;
716 i = 0;
717 list_for_each_entry(ha, &dev->uc.list, list)
718 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
719
720 sg_set_buf(&sg[0], mac_data,
721 sizeof(mac_data->entries) + (dev->uc.count * ETH_ALEN));
722
723 /* multicast list and count fill the end */
724 mac_data = (void *)&mac_data->macs[dev->uc.count][0];
725
726 mac_data->entries = dev->mc_count;
727 addr = dev->mc_list;
728 for (i = 0; i < dev->mc_count; i++, addr = addr->next)
729 memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN);
730
731 sg_set_buf(&sg[1], mac_data,
732 sizeof(mac_data->entries) + (dev->mc_count * ETH_ALEN));
733
734 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
735 VIRTIO_NET_CTRL_MAC_TABLE_SET,
736 sg, 2, 0))
737 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
738
739 kfree(buf);
740 }
741
742 static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
743 {
744 struct virtnet_info *vi = netdev_priv(dev);
745 struct scatterlist sg;
746
747 sg_init_one(&sg, &vid, sizeof(vid));
748
749 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
750 VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
751 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
752 }
753
754 static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
755 {
756 struct virtnet_info *vi = netdev_priv(dev);
757 struct scatterlist sg;
758
759 sg_init_one(&sg, &vid, sizeof(vid));
760
761 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
762 VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
763 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
764 }
765
766 static const struct ethtool_ops virtnet_ethtool_ops = {
767 .set_tx_csum = virtnet_set_tx_csum,
768 .set_sg = ethtool_op_set_sg,
769 .set_tso = ethtool_op_set_tso,
770 .set_ufo = ethtool_op_set_ufo,
771 .get_link = ethtool_op_get_link,
772 };
773
774 #define MIN_MTU 68
775 #define MAX_MTU 65535
776
777 static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
778 {
779 if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
780 return -EINVAL;
781 dev->mtu = new_mtu;
782 return 0;
783 }
784
785 static const struct net_device_ops virtnet_netdev = {
786 .ndo_open = virtnet_open,
787 .ndo_stop = virtnet_close,
788 .ndo_start_xmit = start_xmit,
789 .ndo_validate_addr = eth_validate_addr,
790 .ndo_set_mac_address = virtnet_set_mac_address,
791 .ndo_set_rx_mode = virtnet_set_rx_mode,
792 .ndo_change_mtu = virtnet_change_mtu,
793 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
794 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
795 #ifdef CONFIG_NET_POLL_CONTROLLER
796 .ndo_poll_controller = virtnet_netpoll,
797 #endif
798 };
799
800 static void virtnet_update_status(struct virtnet_info *vi)
801 {
802 u16 v;
803
804 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
805 return;
806
807 vi->vdev->config->get(vi->vdev,
808 offsetof(struct virtio_net_config, status),
809 &v, sizeof(v));
810
811 /* Ignore unknown (future) status bits */
812 v &= VIRTIO_NET_S_LINK_UP;
813
814 if (vi->status == v)
815 return;
816
817 vi->status = v;
818
819 if (vi->status & VIRTIO_NET_S_LINK_UP) {
820 netif_carrier_on(vi->dev);
821 netif_wake_queue(vi->dev);
822 } else {
823 netif_carrier_off(vi->dev);
824 netif_stop_queue(vi->dev);
825 }
826 }
827
828 static void virtnet_config_changed(struct virtio_device *vdev)
829 {
830 struct virtnet_info *vi = vdev->priv;
831
832 virtnet_update_status(vi);
833 }
834
835 static int virtnet_probe(struct virtio_device *vdev)
836 {
837 int err;
838 struct net_device *dev;
839 struct virtnet_info *vi;
840 struct virtqueue *vqs[3];
841 vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
842 const char *names[] = { "input", "output", "control" };
843 int nvqs;
844
845 /* Allocate ourselves a network device with room for our info */
846 dev = alloc_etherdev(sizeof(struct virtnet_info));
847 if (!dev)
848 return -ENOMEM;
849
850 /* Set up network device as normal. */
851 dev->netdev_ops = &virtnet_netdev;
852 dev->features = NETIF_F_HIGHDMA;
853 SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
854 SET_NETDEV_DEV(dev, &vdev->dev);
855
856 /* Do we support "hardware" checksums? */
857 if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
858 /* This opens up the world of extra features. */
859 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
860 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
861 dev->features |= NETIF_F_TSO | NETIF_F_UFO
862 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
863 }
864 /* Individual feature bits: what can host handle? */
865 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
866 dev->features |= NETIF_F_TSO;
867 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
868 dev->features |= NETIF_F_TSO6;
869 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
870 dev->features |= NETIF_F_TSO_ECN;
871 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
872 dev->features |= NETIF_F_UFO;
873 }
874
875 /* Configuration may specify what MAC to use. Otherwise random. */
876 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
877 vdev->config->get(vdev,
878 offsetof(struct virtio_net_config, mac),
879 dev->dev_addr, dev->addr_len);
880 } else
881 random_ether_addr(dev->dev_addr);
882
883 /* Set up our device-specific information */
884 vi = netdev_priv(dev);
885 netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
886 vi->dev = dev;
887 vi->vdev = vdev;
888 vdev->priv = vi;
889 vi->pages = NULL;
890 INIT_DELAYED_WORK(&vi->refill, refill_work);
891
892 /* If we can receive ANY GSO packets, we must allocate large ones. */
893 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
894 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
895 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
896 vi->big_packets = true;
897
898 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
899 vi->mergeable_rx_bufs = true;
900
901 /* We expect two virtqueues, receive then send,
902 * and optionally control. */
903 nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
904
905 err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
906 if (err)
907 goto free;
908
909 vi->rvq = vqs[0];
910 vi->svq = vqs[1];
911
912 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
913 vi->cvq = vqs[2];
914
915 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
916 dev->features |= NETIF_F_HW_VLAN_FILTER;
917 }
918
919 /* Initialize our empty receive and send queues. */
920 skb_queue_head_init(&vi->recv);
921 skb_queue_head_init(&vi->send);
922
923 err = register_netdev(dev);
924 if (err) {
925 pr_debug("virtio_net: registering device failed\n");
926 goto free_vqs;
927 }
928
929 /* Last of all, set up some receive buffers. */
930 try_fill_recv(vi, GFP_KERNEL);
931
932 /* If we didn't even get one input buffer, we're useless. */
933 if (vi->num == 0) {
934 err = -ENOMEM;
935 goto unregister;
936 }
937
938 vi->status = VIRTIO_NET_S_LINK_UP;
939 virtnet_update_status(vi);
940 netif_carrier_on(dev);
941
942 pr_debug("virtnet: registered device %s\n", dev->name);
943 return 0;
944
945 unregister:
946 unregister_netdev(dev);
947 cancel_delayed_work_sync(&vi->refill);
948 free_vqs:
949 vdev->config->del_vqs(vdev);
950 free:
951 free_netdev(dev);
952 return err;
953 }
954
955 static void __devexit virtnet_remove(struct virtio_device *vdev)
956 {
957 struct virtnet_info *vi = vdev->priv;
958 struct sk_buff *skb;
959
960 /* Stop all the virtqueues. */
961 vdev->config->reset(vdev);
962
963 /* Free our skbs in send and recv queues, if any. */
964 while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
965 kfree_skb(skb);
966 vi->num--;
967 }
968 __skb_queue_purge(&vi->send);
969
970 BUG_ON(vi->num != 0);
971
972 unregister_netdev(vi->dev);
973 cancel_delayed_work_sync(&vi->refill);
974
975 vdev->config->del_vqs(vi->vdev);
976
977 while (vi->pages)
978 __free_pages(get_a_page(vi, GFP_KERNEL), 0);
979
980 free_netdev(vi->dev);
981 }
982
983 static struct virtio_device_id id_table[] = {
984 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
985 { 0 },
986 };
987
988 static unsigned int features[] = {
989 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
990 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
991 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
992 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
993 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
994 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
995 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
996 };
997
998 static struct virtio_driver virtio_net_driver = {
999 .feature_table = features,
1000 .feature_table_size = ARRAY_SIZE(features),
1001 .driver.name = KBUILD_MODNAME,
1002 .driver.owner = THIS_MODULE,
1003 .id_table = id_table,
1004 .probe = virtnet_probe,
1005 .remove = __devexit_p(virtnet_remove),
1006 .config_changed = virtnet_config_changed,
1007 };
1008
1009 static int __init init(void)
1010 {
1011 return register_virtio_driver(&virtio_net_driver);
1012 }
1013
1014 static void __exit fini(void)
1015 {
1016 unregister_virtio_driver(&virtio_net_driver);
1017 }
1018 module_init(init);
1019 module_exit(fini);
1020
1021 MODULE_DEVICE_TABLE(virtio, id_table);
1022 MODULE_DESCRIPTION("Virtio network driver");
1023 MODULE_LICENSE("GPL");