]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/tap.c
hv/netvsc: Fix NULL dereference at single queue mode fallback
[mirror_ubuntu-bionic-kernel.git] / drivers / net / tap.c
1 #include <linux/etherdevice.h>
2 #include <linux/if_tap.h>
3 #include <linux/if_vlan.h>
4 #include <linux/interrupt.h>
5 #include <linux/nsproxy.h>
6 #include <linux/compat.h>
7 #include <linux/if_tun.h>
8 #include <linux/module.h>
9 #include <linux/skbuff.h>
10 #include <linux/cache.h>
11 #include <linux/sched/signal.h>
12 #include <linux/types.h>
13 #include <linux/slab.h>
14 #include <linux/wait.h>
15 #include <linux/cdev.h>
16 #include <linux/idr.h>
17 #include <linux/fs.h>
18 #include <linux/uio.h>
19
20 #include <net/net_namespace.h>
21 #include <net/rtnetlink.h>
22 #include <net/sock.h>
23 #include <linux/virtio_net.h>
24 #include <linux/skb_array.h>
25
26 #define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
27
28 #define TAP_VNET_LE 0x80000000
29 #define TAP_VNET_BE 0x40000000
30
31 #ifdef CONFIG_TUN_VNET_CROSS_LE
32 static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
33 {
34 return q->flags & TAP_VNET_BE ? false :
35 virtio_legacy_is_little_endian();
36 }
37
38 static long tap_get_vnet_be(struct tap_queue *q, int __user *sp)
39 {
40 int s = !!(q->flags & TAP_VNET_BE);
41
42 if (put_user(s, sp))
43 return -EFAULT;
44
45 return 0;
46 }
47
48 static long tap_set_vnet_be(struct tap_queue *q, int __user *sp)
49 {
50 int s;
51
52 if (get_user(s, sp))
53 return -EFAULT;
54
55 if (s)
56 q->flags |= TAP_VNET_BE;
57 else
58 q->flags &= ~TAP_VNET_BE;
59
60 return 0;
61 }
62 #else
63 static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
64 {
65 return virtio_legacy_is_little_endian();
66 }
67
68 static long tap_get_vnet_be(struct tap_queue *q, int __user *argp)
69 {
70 return -EINVAL;
71 }
72
73 static long tap_set_vnet_be(struct tap_queue *q, int __user *argp)
74 {
75 return -EINVAL;
76 }
77 #endif /* CONFIG_TUN_VNET_CROSS_LE */
78
79 static inline bool tap_is_little_endian(struct tap_queue *q)
80 {
81 return q->flags & TAP_VNET_LE ||
82 tap_legacy_is_little_endian(q);
83 }
84
85 static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val)
86 {
87 return __virtio16_to_cpu(tap_is_little_endian(q), val);
88 }
89
90 static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val)
91 {
92 return __cpu_to_virtio16(tap_is_little_endian(q), val);
93 }
94
95 static struct proto tap_proto = {
96 .name = "tap",
97 .owner = THIS_MODULE,
98 .obj_size = sizeof(struct tap_queue),
99 };
100
101 #define TAP_NUM_DEVS (1U << MINORBITS)
102
103 static LIST_HEAD(major_list);
104
105 struct major_info {
106 struct rcu_head rcu;
107 dev_t major;
108 struct idr minor_idr;
109 spinlock_t minor_lock;
110 const char *device_name;
111 struct list_head next;
112 };
113
114 #define GOODCOPY_LEN 128
115
116 static const struct proto_ops tap_socket_ops;
117
118 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
119 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
120
121 static struct tap_dev *tap_dev_get_rcu(const struct net_device *dev)
122 {
123 return rcu_dereference(dev->rx_handler_data);
124 }
125
126 /*
127 * RCU usage:
128 * The tap_queue and the macvlan_dev are loosely coupled, the
129 * pointers from one to the other can only be read while rcu_read_lock
130 * or rtnl is held.
131 *
132 * Both the file and the macvlan_dev hold a reference on the tap_queue
133 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
134 * q->vlan becomes inaccessible. When the files gets closed,
135 * tap_get_queue() fails.
136 *
137 * There may still be references to the struct sock inside of the
138 * queue from outbound SKBs, but these never reference back to the
139 * file or the dev. The data structure is freed through __sk_free
140 * when both our references and any pending SKBs are gone.
141 */
142
143 static int tap_enable_queue(struct tap_dev *tap, struct file *file,
144 struct tap_queue *q)
145 {
146 int err = -EINVAL;
147
148 ASSERT_RTNL();
149
150 if (q->enabled)
151 goto out;
152
153 err = 0;
154 rcu_assign_pointer(tap->taps[tap->numvtaps], q);
155 q->queue_index = tap->numvtaps;
156 q->enabled = true;
157
158 tap->numvtaps++;
159 out:
160 return err;
161 }
162
163 /* Requires RTNL */
164 static int tap_set_queue(struct tap_dev *tap, struct file *file,
165 struct tap_queue *q)
166 {
167 if (tap->numqueues == MAX_TAP_QUEUES)
168 return -EBUSY;
169
170 rcu_assign_pointer(q->tap, tap);
171 rcu_assign_pointer(tap->taps[tap->numvtaps], q);
172 sock_hold(&q->sk);
173
174 q->file = file;
175 q->queue_index = tap->numvtaps;
176 q->enabled = true;
177 file->private_data = q;
178 list_add_tail(&q->next, &tap->queue_list);
179
180 tap->numvtaps++;
181 tap->numqueues++;
182
183 return 0;
184 }
185
186 static int tap_disable_queue(struct tap_queue *q)
187 {
188 struct tap_dev *tap;
189 struct tap_queue *nq;
190
191 ASSERT_RTNL();
192 if (!q->enabled)
193 return -EINVAL;
194
195 tap = rtnl_dereference(q->tap);
196
197 if (tap) {
198 int index = q->queue_index;
199 BUG_ON(index >= tap->numvtaps);
200 nq = rtnl_dereference(tap->taps[tap->numvtaps - 1]);
201 nq->queue_index = index;
202
203 rcu_assign_pointer(tap->taps[index], nq);
204 RCU_INIT_POINTER(tap->taps[tap->numvtaps - 1], NULL);
205 q->enabled = false;
206
207 tap->numvtaps--;
208 }
209
210 return 0;
211 }
212
213 /*
214 * The file owning the queue got closed, give up both
215 * the reference that the files holds as well as the
216 * one from the macvlan_dev if that still exists.
217 *
218 * Using the spinlock makes sure that we don't get
219 * to the queue again after destroying it.
220 */
221 static void tap_put_queue(struct tap_queue *q)
222 {
223 struct tap_dev *tap;
224
225 rtnl_lock();
226 tap = rtnl_dereference(q->tap);
227
228 if (tap) {
229 if (q->enabled)
230 BUG_ON(tap_disable_queue(q));
231
232 tap->numqueues--;
233 RCU_INIT_POINTER(q->tap, NULL);
234 sock_put(&q->sk);
235 list_del_init(&q->next);
236 }
237
238 rtnl_unlock();
239
240 synchronize_rcu();
241 sock_put(&q->sk);
242 }
243
244 /*
245 * Select a queue based on the rxq of the device on which this packet
246 * arrived. If the incoming device is not mq, calculate a flow hash
247 * to select a queue. If all fails, find the first available queue.
248 * Cache vlan->numvtaps since it can become zero during the execution
249 * of this function.
250 */
251 static struct tap_queue *tap_get_queue(struct tap_dev *tap,
252 struct sk_buff *skb)
253 {
254 struct tap_queue *queue = NULL;
255 /* Access to taps array is protected by rcu, but access to numvtaps
256 * isn't. Below we use it to lookup a queue, but treat it as a hint
257 * and validate that the result isn't NULL - in case we are
258 * racing against queue removal.
259 */
260 int numvtaps = READ_ONCE(tap->numvtaps);
261 __u32 rxq;
262
263 if (!numvtaps)
264 goto out;
265
266 if (numvtaps == 1)
267 goto single;
268
269 /* Check if we can use flow to select a queue */
270 rxq = skb_get_hash(skb);
271 if (rxq) {
272 queue = rcu_dereference(tap->taps[rxq % numvtaps]);
273 goto out;
274 }
275
276 if (likely(skb_rx_queue_recorded(skb))) {
277 rxq = skb_get_rx_queue(skb);
278
279 while (unlikely(rxq >= numvtaps))
280 rxq -= numvtaps;
281
282 queue = rcu_dereference(tap->taps[rxq]);
283 goto out;
284 }
285
286 single:
287 queue = rcu_dereference(tap->taps[0]);
288 out:
289 return queue;
290 }
291
292 /*
293 * The net_device is going away, give up the reference
294 * that it holds on all queues and safely set the pointer
295 * from the queues to NULL.
296 */
297 void tap_del_queues(struct tap_dev *tap)
298 {
299 struct tap_queue *q, *tmp;
300
301 ASSERT_RTNL();
302 list_for_each_entry_safe(q, tmp, &tap->queue_list, next) {
303 list_del_init(&q->next);
304 RCU_INIT_POINTER(q->tap, NULL);
305 if (q->enabled)
306 tap->numvtaps--;
307 tap->numqueues--;
308 sock_put(&q->sk);
309 }
310 BUG_ON(tap->numvtaps);
311 BUG_ON(tap->numqueues);
312 /* guarantee that any future tap_set_queue will fail */
313 tap->numvtaps = MAX_TAP_QUEUES;
314 }
315 EXPORT_SYMBOL_GPL(tap_del_queues);
316
317 rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
318 {
319 struct sk_buff *skb = *pskb;
320 struct net_device *dev = skb->dev;
321 struct tap_dev *tap;
322 struct tap_queue *q;
323 netdev_features_t features = TAP_FEATURES;
324
325 tap = tap_dev_get_rcu(dev);
326 if (!tap)
327 return RX_HANDLER_PASS;
328
329 q = tap_get_queue(tap, skb);
330 if (!q)
331 return RX_HANDLER_PASS;
332
333 if (__skb_array_full(&q->skb_array))
334 goto drop;
335
336 skb_push(skb, ETH_HLEN);
337
338 /* Apply the forward feature mask so that we perform segmentation
339 * according to users wishes. This only works if VNET_HDR is
340 * enabled.
341 */
342 if (q->flags & IFF_VNET_HDR)
343 features |= tap->tap_features;
344 if (netif_needs_gso(skb, features)) {
345 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
346
347 if (IS_ERR(segs))
348 goto drop;
349
350 if (!segs) {
351 if (skb_array_produce(&q->skb_array, skb))
352 goto drop;
353 goto wake_up;
354 }
355
356 consume_skb(skb);
357 while (segs) {
358 struct sk_buff *nskb = segs->next;
359
360 segs->next = NULL;
361 if (skb_array_produce(&q->skb_array, segs)) {
362 kfree_skb(segs);
363 kfree_skb_list(nskb);
364 break;
365 }
366 segs = nskb;
367 }
368 } else {
369 /* If we receive a partial checksum and the tap side
370 * doesn't support checksum offload, compute the checksum.
371 * Note: it doesn't matter which checksum feature to
372 * check, we either support them all or none.
373 */
374 if (skb->ip_summed == CHECKSUM_PARTIAL &&
375 !(features & NETIF_F_CSUM_MASK) &&
376 skb_checksum_help(skb))
377 goto drop;
378 if (skb_array_produce(&q->skb_array, skb))
379 goto drop;
380 }
381
382 wake_up:
383 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
384 return RX_HANDLER_CONSUMED;
385
386 drop:
387 /* Count errors/drops only here, thus don't care about args. */
388 if (tap->count_rx_dropped)
389 tap->count_rx_dropped(tap);
390 kfree_skb(skb);
391 return RX_HANDLER_CONSUMED;
392 }
393 EXPORT_SYMBOL_GPL(tap_handle_frame);
394
395 static struct major_info *tap_get_major(int major)
396 {
397 struct major_info *tap_major;
398
399 list_for_each_entry_rcu(tap_major, &major_list, next) {
400 if (tap_major->major == major)
401 return tap_major;
402 }
403
404 return NULL;
405 }
406
407 int tap_get_minor(dev_t major, struct tap_dev *tap)
408 {
409 int retval = -ENOMEM;
410 struct major_info *tap_major;
411
412 rcu_read_lock();
413 tap_major = tap_get_major(MAJOR(major));
414 if (!tap_major) {
415 retval = -EINVAL;
416 goto unlock;
417 }
418
419 spin_lock(&tap_major->minor_lock);
420 retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_ATOMIC);
421 if (retval >= 0) {
422 tap->minor = retval;
423 } else if (retval == -ENOSPC) {
424 netdev_err(tap->dev, "Too many tap devices\n");
425 retval = -EINVAL;
426 }
427 spin_unlock(&tap_major->minor_lock);
428
429 unlock:
430 rcu_read_unlock();
431 return retval < 0 ? retval : 0;
432 }
433 EXPORT_SYMBOL_GPL(tap_get_minor);
434
435 void tap_free_minor(dev_t major, struct tap_dev *tap)
436 {
437 struct major_info *tap_major;
438
439 rcu_read_lock();
440 tap_major = tap_get_major(MAJOR(major));
441 if (!tap_major) {
442 goto unlock;
443 }
444
445 spin_lock(&tap_major->minor_lock);
446 if (tap->minor) {
447 idr_remove(&tap_major->minor_idr, tap->minor);
448 tap->minor = 0;
449 }
450 spin_unlock(&tap_major->minor_lock);
451
452 unlock:
453 rcu_read_unlock();
454 }
455 EXPORT_SYMBOL_GPL(tap_free_minor);
456
457 static struct tap_dev *dev_get_by_tap_file(int major, int minor)
458 {
459 struct net_device *dev = NULL;
460 struct tap_dev *tap;
461 struct major_info *tap_major;
462
463 rcu_read_lock();
464 tap_major = tap_get_major(major);
465 if (!tap_major) {
466 tap = NULL;
467 goto unlock;
468 }
469
470 spin_lock(&tap_major->minor_lock);
471 tap = idr_find(&tap_major->minor_idr, minor);
472 if (tap) {
473 dev = tap->dev;
474 dev_hold(dev);
475 }
476 spin_unlock(&tap_major->minor_lock);
477
478 unlock:
479 rcu_read_unlock();
480 return tap;
481 }
482
483 static void tap_sock_write_space(struct sock *sk)
484 {
485 wait_queue_head_t *wqueue;
486
487 if (!sock_writeable(sk) ||
488 !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
489 return;
490
491 wqueue = sk_sleep(sk);
492 if (wqueue && waitqueue_active(wqueue))
493 wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
494 }
495
496 static void tap_sock_destruct(struct sock *sk)
497 {
498 struct tap_queue *q = container_of(sk, struct tap_queue, sk);
499
500 skb_array_cleanup(&q->skb_array);
501 }
502
503 static int tap_open(struct inode *inode, struct file *file)
504 {
505 struct net *net = current->nsproxy->net_ns;
506 struct tap_dev *tap;
507 struct tap_queue *q;
508 int err = -ENODEV;
509
510 rtnl_lock();
511 tap = dev_get_by_tap_file(imajor(inode), iminor(inode));
512 if (!tap)
513 goto err;
514
515 err = -ENOMEM;
516 q = (struct tap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
517 &tap_proto, 0);
518 if (!q)
519 goto err;
520 if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) {
521 sk_free(&q->sk);
522 goto err;
523 }
524
525 RCU_INIT_POINTER(q->sock.wq, &q->wq);
526 init_waitqueue_head(&q->wq.wait);
527 q->sock.type = SOCK_RAW;
528 q->sock.state = SS_CONNECTED;
529 q->sock.file = file;
530 q->sock.ops = &tap_socket_ops;
531 sock_init_data(&q->sock, &q->sk);
532 q->sk.sk_write_space = tap_sock_write_space;
533 q->sk.sk_destruct = tap_sock_destruct;
534 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
535 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
536
537 /*
538 * so far only KVM virtio_net uses tap, enable zero copy between
539 * guest kernel and host kernel when lower device supports zerocopy
540 *
541 * The macvlan supports zerocopy iff the lower device supports zero
542 * copy so we don't have to look at the lower device directly.
543 */
544 if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG))
545 sock_set_flag(&q->sk, SOCK_ZEROCOPY);
546
547 err = tap_set_queue(tap, file, q);
548 if (err) {
549 /* tap_sock_destruct() will take care of freeing skb_array */
550 goto err_put;
551 }
552
553 dev_put(tap->dev);
554
555 rtnl_unlock();
556 return err;
557
558 err_put:
559 sock_put(&q->sk);
560 err:
561 if (tap)
562 dev_put(tap->dev);
563
564 rtnl_unlock();
565 return err;
566 }
567
568 static int tap_release(struct inode *inode, struct file *file)
569 {
570 struct tap_queue *q = file->private_data;
571 tap_put_queue(q);
572 return 0;
573 }
574
575 static unsigned int tap_poll(struct file *file, poll_table *wait)
576 {
577 struct tap_queue *q = file->private_data;
578 unsigned int mask = POLLERR;
579
580 if (!q)
581 goto out;
582
583 mask = 0;
584 poll_wait(file, &q->wq.wait, wait);
585
586 if (!skb_array_empty(&q->skb_array))
587 mask |= POLLIN | POLLRDNORM;
588
589 if (sock_writeable(&q->sk) ||
590 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
591 sock_writeable(&q->sk)))
592 mask |= POLLOUT | POLLWRNORM;
593
594 out:
595 return mask;
596 }
597
598 static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad,
599 size_t len, size_t linear,
600 int noblock, int *err)
601 {
602 struct sk_buff *skb;
603
604 /* Under a page? Don't bother with paged skb. */
605 if (prepad + len < PAGE_SIZE || !linear)
606 linear = len;
607
608 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
609 err, 0);
610 if (!skb)
611 return NULL;
612
613 skb_reserve(skb, prepad);
614 skb_put(skb, linear);
615 skb->data_len = len - linear;
616 skb->len += len - linear;
617
618 return skb;
619 }
620
621 /* Neighbour code has some assumptions on HH_DATA_MOD alignment */
622 #define TAP_RESERVE HH_DATA_OFF(ETH_HLEN)
623
624 /* Get packet from user space buffer */
625 static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m,
626 struct iov_iter *from, int noblock)
627 {
628 int good_linear = SKB_MAX_HEAD(TAP_RESERVE);
629 struct sk_buff *skb;
630 struct tap_dev *tap;
631 unsigned long total_len = iov_iter_count(from);
632 unsigned long len = total_len;
633 int err;
634 struct virtio_net_hdr vnet_hdr = { 0 };
635 int vnet_hdr_len = 0;
636 int copylen = 0;
637 int depth;
638 bool zerocopy = false;
639 size_t linear;
640
641 if (q->flags & IFF_VNET_HDR) {
642 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
643
644 err = -EINVAL;
645 if (len < vnet_hdr_len)
646 goto err;
647 len -= vnet_hdr_len;
648
649 err = -EFAULT;
650 if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from))
651 goto err;
652 iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
653 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
654 tap16_to_cpu(q, vnet_hdr.csum_start) +
655 tap16_to_cpu(q, vnet_hdr.csum_offset) + 2 >
656 tap16_to_cpu(q, vnet_hdr.hdr_len))
657 vnet_hdr.hdr_len = cpu_to_tap16(q,
658 tap16_to_cpu(q, vnet_hdr.csum_start) +
659 tap16_to_cpu(q, vnet_hdr.csum_offset) + 2);
660 err = -EINVAL;
661 if (tap16_to_cpu(q, vnet_hdr.hdr_len) > len)
662 goto err;
663 }
664
665 err = -EINVAL;
666 if (unlikely(len < ETH_HLEN))
667 goto err;
668
669 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
670 struct iov_iter i;
671
672 copylen = vnet_hdr.hdr_len ?
673 tap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
674 if (copylen > good_linear)
675 copylen = good_linear;
676 else if (copylen < ETH_HLEN)
677 copylen = ETH_HLEN;
678 linear = copylen;
679 i = *from;
680 iov_iter_advance(&i, copylen);
681 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
682 zerocopy = true;
683 }
684
685 if (!zerocopy) {
686 copylen = len;
687 linear = tap16_to_cpu(q, vnet_hdr.hdr_len);
688 if (linear > good_linear)
689 linear = good_linear;
690 else if (linear < ETH_HLEN)
691 linear = ETH_HLEN;
692 }
693
694 skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen,
695 linear, noblock, &err);
696 if (!skb)
697 goto err;
698
699 if (zerocopy)
700 err = zerocopy_sg_from_iter(skb, from);
701 else
702 err = skb_copy_datagram_from_iter(skb, 0, from, len);
703
704 if (err)
705 goto err_kfree;
706
707 skb_set_network_header(skb, ETH_HLEN);
708 skb_reset_mac_header(skb);
709 skb->protocol = eth_hdr(skb)->h_proto;
710
711 if (vnet_hdr_len) {
712 err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
713 tap_is_little_endian(q));
714 if (err)
715 goto err_kfree;
716 }
717
718 skb_probe_transport_header(skb, ETH_HLEN);
719
720 /* Move network header to the right position for VLAN tagged packets */
721 if ((skb->protocol == htons(ETH_P_8021Q) ||
722 skb->protocol == htons(ETH_P_8021AD)) &&
723 __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
724 skb_set_network_header(skb, depth);
725
726 rcu_read_lock();
727 tap = rcu_dereference(q->tap);
728 /* copy skb_ubuf_info for callback when skb has no error */
729 if (zerocopy) {
730 skb_shinfo(skb)->destructor_arg = m->msg_control;
731 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
732 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
733 } else if (m && m->msg_control) {
734 struct ubuf_info *uarg = m->msg_control;
735 uarg->callback(uarg, false);
736 }
737
738 if (tap) {
739 skb->dev = tap->dev;
740 dev_queue_xmit(skb);
741 } else {
742 kfree_skb(skb);
743 }
744 rcu_read_unlock();
745
746 return total_len;
747
748 err_kfree:
749 kfree_skb(skb);
750
751 err:
752 rcu_read_lock();
753 tap = rcu_dereference(q->tap);
754 if (tap && tap->count_tx_dropped)
755 tap->count_tx_dropped(tap);
756 rcu_read_unlock();
757
758 return err;
759 }
760
761 static ssize_t tap_write_iter(struct kiocb *iocb, struct iov_iter *from)
762 {
763 struct file *file = iocb->ki_filp;
764 struct tap_queue *q = file->private_data;
765
766 return tap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK);
767 }
768
769 /* Put packet to the user space buffer */
770 static ssize_t tap_put_user(struct tap_queue *q,
771 const struct sk_buff *skb,
772 struct iov_iter *iter)
773 {
774 int ret;
775 int vnet_hdr_len = 0;
776 int vlan_offset = 0;
777 int total;
778
779 if (q->flags & IFF_VNET_HDR) {
780 int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
781 struct virtio_net_hdr vnet_hdr;
782
783 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
784 if (iov_iter_count(iter) < vnet_hdr_len)
785 return -EINVAL;
786
787 if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
788 tap_is_little_endian(q), true,
789 vlan_hlen))
790 BUG();
791
792 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
793 sizeof(vnet_hdr))
794 return -EFAULT;
795
796 iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
797 }
798 total = vnet_hdr_len;
799 total += skb->len;
800
801 if (skb_vlan_tag_present(skb)) {
802 struct {
803 __be16 h_vlan_proto;
804 __be16 h_vlan_TCI;
805 } veth;
806 veth.h_vlan_proto = skb->vlan_proto;
807 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
808
809 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
810 total += VLAN_HLEN;
811
812 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
813 if (ret || !iov_iter_count(iter))
814 goto done;
815
816 ret = copy_to_iter(&veth, sizeof(veth), iter);
817 if (ret != sizeof(veth) || !iov_iter_count(iter))
818 goto done;
819 }
820
821 ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
822 skb->len - vlan_offset);
823
824 done:
825 return ret ? ret : total;
826 }
827
828 static ssize_t tap_do_read(struct tap_queue *q,
829 struct iov_iter *to,
830 int noblock, struct sk_buff *skb)
831 {
832 DEFINE_WAIT(wait);
833 ssize_t ret = 0;
834
835 if (!iov_iter_count(to)) {
836 if (skb)
837 kfree_skb(skb);
838 return 0;
839 }
840
841 if (skb)
842 goto put;
843
844 while (1) {
845 if (!noblock)
846 prepare_to_wait(sk_sleep(&q->sk), &wait,
847 TASK_INTERRUPTIBLE);
848
849 /* Read frames from the queue */
850 skb = skb_array_consume(&q->skb_array);
851 if (skb)
852 break;
853 if (noblock) {
854 ret = -EAGAIN;
855 break;
856 }
857 if (signal_pending(current)) {
858 ret = -ERESTARTSYS;
859 break;
860 }
861 /* Nothing to read, let's sleep */
862 schedule();
863 }
864 if (!noblock)
865 finish_wait(sk_sleep(&q->sk), &wait);
866
867 put:
868 if (skb) {
869 ret = tap_put_user(q, skb, to);
870 if (unlikely(ret < 0))
871 kfree_skb(skb);
872 else
873 consume_skb(skb);
874 }
875 return ret;
876 }
877
878 static ssize_t tap_read_iter(struct kiocb *iocb, struct iov_iter *to)
879 {
880 struct file *file = iocb->ki_filp;
881 struct tap_queue *q = file->private_data;
882 ssize_t len = iov_iter_count(to), ret;
883
884 ret = tap_do_read(q, to, file->f_flags & O_NONBLOCK, NULL);
885 ret = min_t(ssize_t, ret, len);
886 if (ret > 0)
887 iocb->ki_pos = ret;
888 return ret;
889 }
890
891 static struct tap_dev *tap_get_tap_dev(struct tap_queue *q)
892 {
893 struct tap_dev *tap;
894
895 ASSERT_RTNL();
896 tap = rtnl_dereference(q->tap);
897 if (tap)
898 dev_hold(tap->dev);
899
900 return tap;
901 }
902
903 static void tap_put_tap_dev(struct tap_dev *tap)
904 {
905 dev_put(tap->dev);
906 }
907
908 static int tap_ioctl_set_queue(struct file *file, unsigned int flags)
909 {
910 struct tap_queue *q = file->private_data;
911 struct tap_dev *tap;
912 int ret;
913
914 tap = tap_get_tap_dev(q);
915 if (!tap)
916 return -EINVAL;
917
918 if (flags & IFF_ATTACH_QUEUE)
919 ret = tap_enable_queue(tap, file, q);
920 else if (flags & IFF_DETACH_QUEUE)
921 ret = tap_disable_queue(q);
922 else
923 ret = -EINVAL;
924
925 tap_put_tap_dev(tap);
926 return ret;
927 }
928
929 static int set_offload(struct tap_queue *q, unsigned long arg)
930 {
931 struct tap_dev *tap;
932 netdev_features_t features;
933 netdev_features_t feature_mask = 0;
934
935 tap = rtnl_dereference(q->tap);
936 if (!tap)
937 return -ENOLINK;
938
939 features = tap->dev->features;
940
941 if (arg & TUN_F_CSUM) {
942 feature_mask = NETIF_F_HW_CSUM;
943
944 if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
945 if (arg & TUN_F_TSO_ECN)
946 feature_mask |= NETIF_F_TSO_ECN;
947 if (arg & TUN_F_TSO4)
948 feature_mask |= NETIF_F_TSO;
949 if (arg & TUN_F_TSO6)
950 feature_mask |= NETIF_F_TSO6;
951 }
952 }
953
954 /* tun/tap driver inverts the usage for TSO offloads, where
955 * setting the TSO bit means that the userspace wants to
956 * accept TSO frames and turning it off means that user space
957 * does not support TSO.
958 * For tap, we have to invert it to mean the same thing.
959 * When user space turns off TSO, we turn off GSO/LRO so that
960 * user-space will not receive TSO frames.
961 */
962 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6))
963 features |= RX_OFFLOADS;
964 else
965 features &= ~RX_OFFLOADS;
966
967 /* tap_features are the same as features on tun/tap and
968 * reflect user expectations.
969 */
970 tap->tap_features = feature_mask;
971 if (tap->update_features)
972 tap->update_features(tap, features);
973
974 return 0;
975 }
976
977 /*
978 * provide compatibility with generic tun/tap interface
979 */
980 static long tap_ioctl(struct file *file, unsigned int cmd,
981 unsigned long arg)
982 {
983 struct tap_queue *q = file->private_data;
984 struct tap_dev *tap;
985 void __user *argp = (void __user *)arg;
986 struct ifreq __user *ifr = argp;
987 unsigned int __user *up = argp;
988 unsigned short u;
989 int __user *sp = argp;
990 struct sockaddr sa;
991 int s;
992 int ret;
993
994 switch (cmd) {
995 case TUNSETIFF:
996 /* ignore the name, just look at flags */
997 if (get_user(u, &ifr->ifr_flags))
998 return -EFAULT;
999
1000 ret = 0;
1001 if ((u & ~TAP_IFFEATURES) != (IFF_NO_PI | IFF_TAP))
1002 ret = -EINVAL;
1003 else
1004 q->flags = (q->flags & ~TAP_IFFEATURES) | u;
1005
1006 return ret;
1007
1008 case TUNGETIFF:
1009 rtnl_lock();
1010 tap = tap_get_tap_dev(q);
1011 if (!tap) {
1012 rtnl_unlock();
1013 return -ENOLINK;
1014 }
1015
1016 ret = 0;
1017 u = q->flags;
1018 if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
1019 put_user(u, &ifr->ifr_flags))
1020 ret = -EFAULT;
1021 tap_put_tap_dev(tap);
1022 rtnl_unlock();
1023 return ret;
1024
1025 case TUNSETQUEUE:
1026 if (get_user(u, &ifr->ifr_flags))
1027 return -EFAULT;
1028 rtnl_lock();
1029 ret = tap_ioctl_set_queue(file, u);
1030 rtnl_unlock();
1031 return ret;
1032
1033 case TUNGETFEATURES:
1034 if (put_user(IFF_TAP | IFF_NO_PI | TAP_IFFEATURES, up))
1035 return -EFAULT;
1036 return 0;
1037
1038 case TUNSETSNDBUF:
1039 if (get_user(s, sp))
1040 return -EFAULT;
1041 if (s <= 0)
1042 return -EINVAL;
1043
1044 q->sk.sk_sndbuf = s;
1045 return 0;
1046
1047 case TUNGETVNETHDRSZ:
1048 s = q->vnet_hdr_sz;
1049 if (put_user(s, sp))
1050 return -EFAULT;
1051 return 0;
1052
1053 case TUNSETVNETHDRSZ:
1054 if (get_user(s, sp))
1055 return -EFAULT;
1056 if (s < (int)sizeof(struct virtio_net_hdr))
1057 return -EINVAL;
1058
1059 q->vnet_hdr_sz = s;
1060 return 0;
1061
1062 case TUNGETVNETLE:
1063 s = !!(q->flags & TAP_VNET_LE);
1064 if (put_user(s, sp))
1065 return -EFAULT;
1066 return 0;
1067
1068 case TUNSETVNETLE:
1069 if (get_user(s, sp))
1070 return -EFAULT;
1071 if (s)
1072 q->flags |= TAP_VNET_LE;
1073 else
1074 q->flags &= ~TAP_VNET_LE;
1075 return 0;
1076
1077 case TUNGETVNETBE:
1078 return tap_get_vnet_be(q, sp);
1079
1080 case TUNSETVNETBE:
1081 return tap_set_vnet_be(q, sp);
1082
1083 case TUNSETOFFLOAD:
1084 /* let the user check for future flags */
1085 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
1086 TUN_F_TSO_ECN | TUN_F_UFO))
1087 return -EINVAL;
1088
1089 rtnl_lock();
1090 ret = set_offload(q, arg);
1091 rtnl_unlock();
1092 return ret;
1093
1094 case SIOCGIFHWADDR:
1095 rtnl_lock();
1096 tap = tap_get_tap_dev(q);
1097 if (!tap) {
1098 rtnl_unlock();
1099 return -ENOLINK;
1100 }
1101 ret = 0;
1102 u = tap->dev->type;
1103 if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
1104 copy_to_user(&ifr->ifr_hwaddr.sa_data, tap->dev->dev_addr, ETH_ALEN) ||
1105 put_user(u, &ifr->ifr_hwaddr.sa_family))
1106 ret = -EFAULT;
1107 tap_put_tap_dev(tap);
1108 rtnl_unlock();
1109 return ret;
1110
1111 case SIOCSIFHWADDR:
1112 if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
1113 return -EFAULT;
1114 rtnl_lock();
1115 tap = tap_get_tap_dev(q);
1116 if (!tap) {
1117 rtnl_unlock();
1118 return -ENOLINK;
1119 }
1120 ret = dev_set_mac_address(tap->dev, &sa);
1121 tap_put_tap_dev(tap);
1122 rtnl_unlock();
1123 return ret;
1124
1125 default:
1126 return -EINVAL;
1127 }
1128 }
1129
1130 #ifdef CONFIG_COMPAT
1131 static long tap_compat_ioctl(struct file *file, unsigned int cmd,
1132 unsigned long arg)
1133 {
1134 return tap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1135 }
1136 #endif
1137
1138 static const struct file_operations tap_fops = {
1139 .owner = THIS_MODULE,
1140 .open = tap_open,
1141 .release = tap_release,
1142 .read_iter = tap_read_iter,
1143 .write_iter = tap_write_iter,
1144 .poll = tap_poll,
1145 .llseek = no_llseek,
1146 .unlocked_ioctl = tap_ioctl,
1147 #ifdef CONFIG_COMPAT
1148 .compat_ioctl = tap_compat_ioctl,
1149 #endif
1150 };
1151
1152 static int tap_sendmsg(struct socket *sock, struct msghdr *m,
1153 size_t total_len)
1154 {
1155 struct tap_queue *q = container_of(sock, struct tap_queue, sock);
1156 return tap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT);
1157 }
1158
1159 static int tap_recvmsg(struct socket *sock, struct msghdr *m,
1160 size_t total_len, int flags)
1161 {
1162 struct tap_queue *q = container_of(sock, struct tap_queue, sock);
1163 struct sk_buff *skb = m->msg_control;
1164 int ret;
1165 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
1166 if (skb)
1167 kfree_skb(skb);
1168 return -EINVAL;
1169 }
1170 ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb);
1171 if (ret > total_len) {
1172 m->msg_flags |= MSG_TRUNC;
1173 ret = flags & MSG_TRUNC ? ret : total_len;
1174 }
1175 return ret;
1176 }
1177
1178 static int tap_peek_len(struct socket *sock)
1179 {
1180 struct tap_queue *q = container_of(sock, struct tap_queue,
1181 sock);
1182 return skb_array_peek_len(&q->skb_array);
1183 }
1184
1185 /* Ops structure to mimic raw sockets with tun */
1186 static const struct proto_ops tap_socket_ops = {
1187 .sendmsg = tap_sendmsg,
1188 .recvmsg = tap_recvmsg,
1189 .peek_len = tap_peek_len,
1190 };
1191
1192 /* Get an underlying socket object from tun file. Returns error unless file is
1193 * attached to a device. The returned object works like a packet socket, it
1194 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
1195 * holding a reference to the file for as long as the socket is in use. */
1196 struct socket *tap_get_socket(struct file *file)
1197 {
1198 struct tap_queue *q;
1199 if (file->f_op != &tap_fops)
1200 return ERR_PTR(-EINVAL);
1201 q = file->private_data;
1202 if (!q)
1203 return ERR_PTR(-EBADFD);
1204 return &q->sock;
1205 }
1206 EXPORT_SYMBOL_GPL(tap_get_socket);
1207
1208 struct skb_array *tap_get_skb_array(struct file *file)
1209 {
1210 struct tap_queue *q;
1211
1212 if (file->f_op != &tap_fops)
1213 return ERR_PTR(-EINVAL);
1214 q = file->private_data;
1215 if (!q)
1216 return ERR_PTR(-EBADFD);
1217 return &q->skb_array;
1218 }
1219 EXPORT_SYMBOL_GPL(tap_get_skb_array);
1220
1221 int tap_queue_resize(struct tap_dev *tap)
1222 {
1223 struct net_device *dev = tap->dev;
1224 struct tap_queue *q;
1225 struct skb_array **arrays;
1226 int n = tap->numqueues;
1227 int ret, i = 0;
1228
1229 arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL);
1230 if (!arrays)
1231 return -ENOMEM;
1232
1233 list_for_each_entry(q, &tap->queue_list, next)
1234 arrays[i++] = &q->skb_array;
1235
1236 ret = skb_array_resize_multiple(arrays, n,
1237 dev->tx_queue_len, GFP_KERNEL);
1238
1239 kfree(arrays);
1240 return ret;
1241 }
1242 EXPORT_SYMBOL_GPL(tap_queue_resize);
1243
1244 static int tap_list_add(dev_t major, const char *device_name)
1245 {
1246 struct major_info *tap_major;
1247
1248 tap_major = kzalloc(sizeof(*tap_major), GFP_ATOMIC);
1249 if (!tap_major)
1250 return -ENOMEM;
1251
1252 tap_major->major = MAJOR(major);
1253
1254 idr_init(&tap_major->minor_idr);
1255 spin_lock_init(&tap_major->minor_lock);
1256
1257 tap_major->device_name = device_name;
1258
1259 list_add_tail_rcu(&tap_major->next, &major_list);
1260 return 0;
1261 }
1262
1263 int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
1264 const char *device_name, struct module *module)
1265 {
1266 int err;
1267
1268 err = alloc_chrdev_region(tap_major, 0, TAP_NUM_DEVS, device_name);
1269 if (err)
1270 goto out1;
1271
1272 cdev_init(tap_cdev, &tap_fops);
1273 tap_cdev->owner = module;
1274 err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS);
1275 if (err)
1276 goto out2;
1277
1278 err = tap_list_add(*tap_major, device_name);
1279 if (err)
1280 goto out3;
1281
1282 return 0;
1283
1284 out3:
1285 cdev_del(tap_cdev);
1286 out2:
1287 unregister_chrdev_region(*tap_major, TAP_NUM_DEVS);
1288 out1:
1289 return err;
1290 }
1291 EXPORT_SYMBOL_GPL(tap_create_cdev);
1292
1293 void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev)
1294 {
1295 struct major_info *tap_major, *tmp;
1296
1297 cdev_del(tap_cdev);
1298 unregister_chrdev_region(major, TAP_NUM_DEVS);
1299 list_for_each_entry_safe(tap_major, tmp, &major_list, next) {
1300 if (tap_major->major == MAJOR(major)) {
1301 idr_destroy(&tap_major->minor_idr);
1302 list_del_rcu(&tap_major->next);
1303 kfree_rcu(tap_major, rcu);
1304 }
1305 }
1306 }
1307 EXPORT_SYMBOL_GPL(tap_destroy_cdev);
1308
1309 MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
1310 MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>");
1311 MODULE_LICENSE("GPL");