2 * TUN - Universal TUN/TAP device driver.
3 * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
21 * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
22 * Add TUNSETLINK ioctl to set the link encapsulation
24 * Mark Smith <markzzzsmith@yahoo.com.au>
25 * Use eth_random_addr() for tap MAC address.
27 * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20
28 * Fixes in packet dropping, queue length setting and queue wakeup.
29 * Increased default tx queue length.
33 * Daniel Podlejski <underley@underley.eu.org>
34 * Modifications for 2.3.99-pre5 kernel.
37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39 #define DRV_NAME "tun"
40 #define DRV_VERSION "1.6"
41 #define DRV_DESCRIPTION "Universal TUN/TAP device driver"
42 #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
44 #include <linux/module.h>
45 #include <linux/errno.h>
46 #include <linux/kernel.h>
47 #include <linux/major.h>
48 #include <linux/slab.h>
49 #include <linux/poll.h>
50 #include <linux/fcntl.h>
51 #include <linux/init.h>
52 #include <linux/skbuff.h>
53 #include <linux/netdevice.h>
54 #include <linux/etherdevice.h>
55 #include <linux/miscdevice.h>
56 #include <linux/ethtool.h>
57 #include <linux/rtnetlink.h>
58 #include <linux/compat.h>
60 #include <linux/if_arp.h>
61 #include <linux/if_ether.h>
62 #include <linux/if_tun.h>
63 #include <linux/if_vlan.h>
64 #include <linux/crc32.h>
65 #include <linux/nsproxy.h>
66 #include <linux/virtio_net.h>
67 #include <linux/rcupdate.h>
69 #include <net/net_namespace.h>
70 #include <net/netns/generic.h>
71 #include <net/rtnetlink.h>
73 #include <linux/seq_file.h>
74 #include <linux/uio.h>
76 #include <asm/uaccess.h>
78 /* Uncomment to enable debugging */
79 /* #define TUN_DEBUG 1 */
84 #define tun_debug(level, tun, fmt, args...) \
87 netdev_printk(level, tun->dev, fmt, ##args); \
89 #define DBG1(level, fmt, args...) \
92 printk(level fmt, ##args); \
95 #define tun_debug(level, tun, fmt, args...) \
98 netdev_printk(level, tun->dev, fmt, ##args); \
100 #define DBG1(level, fmt, args...) \
103 printk(level fmt, ##args); \
107 #define GOODCOPY_LEN 128
109 #define FLT_EXACT_COUNT 8
111 unsigned int count
; /* Number of addrs. Zero means disabled */
112 u32 mask
[2]; /* Mask of the hashed addrs */
113 unsigned char addr
[FLT_EXACT_COUNT
][ETH_ALEN
];
116 /* DEFAULT_MAX_NUM_RSS_QUEUES were chosen to let the rx/tx queues allocated for
117 * the netdevice to be fit in one page. So we can make sure the success of
118 * memory allocation. TODO: increase the limit. */
119 #define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
120 #define MAX_TAP_FLOWS 4096
122 #define TUN_FLOW_EXPIRE (3 * HZ)
124 /* A tun_file connects an open character device to a tuntap netdevice. It
125 * also contains all socket related structures (except sock_fprog and tap_filter)
126 * to serve as one transmit queue for tuntap device. The sock_fprog and
127 * tap_filter were kept in tun_struct since they were used for filtering for the
128 * netdevice not for a specific queue (at least I didn't see the requirement for
132 * The tun_file and tun_struct are loosely coupled, the pointer from one to the
133 * other can only be read while rcu_read_lock or rtnl_lock is held.
137 struct socket socket
;
139 struct tun_struct __rcu
*tun
;
141 struct fasync_struct
*fasync
;
142 /* only used for fasnyc */
146 unsigned int ifindex
;
148 struct list_head next
;
149 struct tun_struct
*detached
;
152 struct tun_flow_entry
{
153 struct hlist_node hash_link
;
155 struct tun_struct
*tun
;
160 unsigned long updated
;
163 #define TUN_NUM_FLOW_ENTRIES 1024
165 /* Since the socket were moved to tun_file, to preserve the behavior of persist
166 * device, socket filter, sndbuf and vnet header size were restore when the
167 * file were attached to a persist device.
170 struct tun_file __rcu
*tfiles
[MAX_TAP_QUEUES
];
171 unsigned int numqueues
;
176 struct net_device
*dev
;
177 netdev_features_t set_features
;
178 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
183 struct tap_filter txflt
;
184 struct sock_fprog fprog
;
185 /* protected by rtnl lock */
186 bool filter_attached
;
191 struct hlist_head flows
[TUN_NUM_FLOW_ENTRIES
];
192 struct timer_list flow_gc_timer
;
193 unsigned long ageing_time
;
194 unsigned int numdisabled
;
195 struct list_head disabled
;
200 static inline u32
tun_hashfn(u32 rxhash
)
202 return rxhash
& 0x3ff;
205 static struct tun_flow_entry
*tun_flow_find(struct hlist_head
*head
, u32 rxhash
)
207 struct tun_flow_entry
*e
;
209 hlist_for_each_entry_rcu(e
, head
, hash_link
) {
210 if (e
->rxhash
== rxhash
)
216 static struct tun_flow_entry
*tun_flow_create(struct tun_struct
*tun
,
217 struct hlist_head
*head
,
218 u32 rxhash
, u16 queue_index
)
220 struct tun_flow_entry
*e
= kmalloc(sizeof(*e
), GFP_ATOMIC
);
223 tun_debug(KERN_INFO
, tun
, "create flow: hash %u index %u\n",
224 rxhash
, queue_index
);
225 e
->updated
= jiffies
;
228 e
->queue_index
= queue_index
;
230 hlist_add_head_rcu(&e
->hash_link
, head
);
236 static void tun_flow_delete(struct tun_struct
*tun
, struct tun_flow_entry
*e
)
238 tun_debug(KERN_INFO
, tun
, "delete flow: hash %u index %u\n",
239 e
->rxhash
, e
->queue_index
);
240 sock_rps_reset_flow_hash(e
->rps_rxhash
);
241 hlist_del_rcu(&e
->hash_link
);
246 static void tun_flow_flush(struct tun_struct
*tun
)
250 spin_lock_bh(&tun
->lock
);
251 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++) {
252 struct tun_flow_entry
*e
;
253 struct hlist_node
*n
;
255 hlist_for_each_entry_safe(e
, n
, &tun
->flows
[i
], hash_link
)
256 tun_flow_delete(tun
, e
);
258 spin_unlock_bh(&tun
->lock
);
261 static void tun_flow_delete_by_queue(struct tun_struct
*tun
, u16 queue_index
)
265 spin_lock_bh(&tun
->lock
);
266 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++) {
267 struct tun_flow_entry
*e
;
268 struct hlist_node
*n
;
270 hlist_for_each_entry_safe(e
, n
, &tun
->flows
[i
], hash_link
) {
271 if (e
->queue_index
== queue_index
)
272 tun_flow_delete(tun
, e
);
275 spin_unlock_bh(&tun
->lock
);
278 static void tun_flow_cleanup(unsigned long data
)
280 struct tun_struct
*tun
= (struct tun_struct
*)data
;
281 unsigned long delay
= tun
->ageing_time
;
282 unsigned long next_timer
= jiffies
+ delay
;
283 unsigned long count
= 0;
286 tun_debug(KERN_INFO
, tun
, "tun_flow_cleanup\n");
288 spin_lock_bh(&tun
->lock
);
289 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++) {
290 struct tun_flow_entry
*e
;
291 struct hlist_node
*n
;
293 hlist_for_each_entry_safe(e
, n
, &tun
->flows
[i
], hash_link
) {
294 unsigned long this_timer
;
296 this_timer
= e
->updated
+ delay
;
297 if (time_before_eq(this_timer
, jiffies
))
298 tun_flow_delete(tun
, e
);
299 else if (time_before(this_timer
, next_timer
))
300 next_timer
= this_timer
;
305 mod_timer(&tun
->flow_gc_timer
, round_jiffies_up(next_timer
));
306 spin_unlock_bh(&tun
->lock
);
309 static void tun_flow_update(struct tun_struct
*tun
, u32 rxhash
,
310 struct tun_file
*tfile
)
312 struct hlist_head
*head
;
313 struct tun_flow_entry
*e
;
314 unsigned long delay
= tun
->ageing_time
;
315 u16 queue_index
= tfile
->queue_index
;
320 head
= &tun
->flows
[tun_hashfn(rxhash
)];
324 /* We may get a very small possibility of OOO during switching, not
325 * worth to optimize.*/
326 if (tun
->numqueues
== 1 || tfile
->detached
)
329 e
= tun_flow_find(head
, rxhash
);
331 /* TODO: keep queueing to old queue until it's empty? */
332 e
->queue_index
= queue_index
;
333 e
->updated
= jiffies
;
334 sock_rps_record_flow_hash(e
->rps_rxhash
);
336 spin_lock_bh(&tun
->lock
);
337 if (!tun_flow_find(head
, rxhash
) &&
338 tun
->flow_count
< MAX_TAP_FLOWS
)
339 tun_flow_create(tun
, head
, rxhash
, queue_index
);
341 if (!timer_pending(&tun
->flow_gc_timer
))
342 mod_timer(&tun
->flow_gc_timer
,
343 round_jiffies_up(jiffies
+ delay
));
344 spin_unlock_bh(&tun
->lock
);
352 * Save the hash received in the stack receive path and update the
353 * flow_hash table accordingly.
355 static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry
*e
, u32 hash
)
357 if (unlikely(e
->rps_rxhash
!= hash
)) {
358 sock_rps_reset_flow_hash(e
->rps_rxhash
);
359 e
->rps_rxhash
= hash
;
363 /* We try to identify a flow through its rxhash first. The reason that
364 * we do not check rxq no. is because some cards(e.g 82599), chooses
365 * the rxq based on the txq where the last packet of the flow comes. As
366 * the userspace application move between processors, we may get a
367 * different rxq no. here. If we could not get rxhash, then we would
368 * hope the rxq no. may help here.
370 static u16
tun_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
371 void *accel_priv
, select_queue_fallback_t fallback
)
373 struct tun_struct
*tun
= netdev_priv(dev
);
374 struct tun_flow_entry
*e
;
379 numqueues
= ACCESS_ONCE(tun
->numqueues
);
381 txq
= skb_get_hash(skb
);
383 e
= tun_flow_find(&tun
->flows
[tun_hashfn(txq
)], txq
);
385 tun_flow_save_rps_rxhash(e
, txq
);
386 txq
= e
->queue_index
;
388 /* use multiply and shift instead of expensive divide */
389 txq
= ((u64
)txq
* numqueues
) >> 32;
390 } else if (likely(skb_rx_queue_recorded(skb
))) {
391 txq
= skb_get_rx_queue(skb
);
392 while (unlikely(txq
>= numqueues
))
400 static inline bool tun_not_capable(struct tun_struct
*tun
)
402 const struct cred
*cred
= current_cred();
403 struct net
*net
= dev_net(tun
->dev
);
405 return ((uid_valid(tun
->owner
) && !uid_eq(cred
->euid
, tun
->owner
)) ||
406 (gid_valid(tun
->group
) && !in_egroup_p(tun
->group
))) &&
407 !ns_capable(net
->user_ns
, CAP_NET_ADMIN
);
410 static void tun_set_real_num_queues(struct tun_struct
*tun
)
412 netif_set_real_num_tx_queues(tun
->dev
, tun
->numqueues
);
413 netif_set_real_num_rx_queues(tun
->dev
, tun
->numqueues
);
416 static void tun_disable_queue(struct tun_struct
*tun
, struct tun_file
*tfile
)
418 tfile
->detached
= tun
;
419 list_add_tail(&tfile
->next
, &tun
->disabled
);
423 static struct tun_struct
*tun_enable_queue(struct tun_file
*tfile
)
425 struct tun_struct
*tun
= tfile
->detached
;
427 tfile
->detached
= NULL
;
428 list_del_init(&tfile
->next
);
433 static void tun_queue_purge(struct tun_file
*tfile
)
435 skb_queue_purge(&tfile
->sk
.sk_receive_queue
);
436 skb_queue_purge(&tfile
->sk
.sk_error_queue
);
439 static void __tun_detach(struct tun_file
*tfile
, bool clean
)
441 struct tun_file
*ntfile
;
442 struct tun_struct
*tun
;
444 tun
= rtnl_dereference(tfile
->tun
);
446 if (tun
&& !tfile
->detached
) {
447 u16 index
= tfile
->queue_index
;
448 BUG_ON(index
>= tun
->numqueues
);
450 rcu_assign_pointer(tun
->tfiles
[index
],
451 tun
->tfiles
[tun
->numqueues
- 1]);
452 ntfile
= rtnl_dereference(tun
->tfiles
[index
]);
453 ntfile
->queue_index
= index
;
457 RCU_INIT_POINTER(tfile
->tun
, NULL
);
458 sock_put(&tfile
->sk
);
460 tun_disable_queue(tun
, tfile
);
463 tun_flow_delete_by_queue(tun
, tun
->numqueues
+ 1);
464 /* Drop read queue */
465 tun_queue_purge(tfile
);
466 tun_set_real_num_queues(tun
);
467 } else if (tfile
->detached
&& clean
) {
468 tun
= tun_enable_queue(tfile
);
469 sock_put(&tfile
->sk
);
473 if (tun
&& tun
->numqueues
== 0 && tun
->numdisabled
== 0) {
474 netif_carrier_off(tun
->dev
);
476 if (!(tun
->flags
& TUN_PERSIST
) &&
477 tun
->dev
->reg_state
== NETREG_REGISTERED
)
478 unregister_netdevice(tun
->dev
);
481 BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED
,
482 &tfile
->socket
.flags
));
483 sk_release_kernel(&tfile
->sk
);
487 static void tun_detach(struct tun_file
*tfile
, bool clean
)
490 __tun_detach(tfile
, clean
);
494 static void tun_detach_all(struct net_device
*dev
)
496 struct tun_struct
*tun
= netdev_priv(dev
);
497 struct tun_file
*tfile
, *tmp
;
498 int i
, n
= tun
->numqueues
;
500 for (i
= 0; i
< n
; i
++) {
501 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
503 tfile
->socket
.sk
->sk_data_ready(tfile
->socket
.sk
);
504 RCU_INIT_POINTER(tfile
->tun
, NULL
);
507 list_for_each_entry(tfile
, &tun
->disabled
, next
) {
508 tfile
->socket
.sk
->sk_data_ready(tfile
->socket
.sk
);
509 RCU_INIT_POINTER(tfile
->tun
, NULL
);
511 BUG_ON(tun
->numqueues
!= 0);
514 for (i
= 0; i
< n
; i
++) {
515 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
516 /* Drop read queue */
517 tun_queue_purge(tfile
);
518 sock_put(&tfile
->sk
);
520 list_for_each_entry_safe(tfile
, tmp
, &tun
->disabled
, next
) {
521 tun_enable_queue(tfile
);
522 tun_queue_purge(tfile
);
523 sock_put(&tfile
->sk
);
525 BUG_ON(tun
->numdisabled
!= 0);
527 if (tun
->flags
& TUN_PERSIST
)
528 module_put(THIS_MODULE
);
531 static int tun_attach(struct tun_struct
*tun
, struct file
*file
, bool skip_filter
)
533 struct tun_file
*tfile
= file
->private_data
;
536 err
= security_tun_dev_attach(tfile
->socket
.sk
, tun
->security
);
541 if (rtnl_dereference(tfile
->tun
) && !tfile
->detached
)
545 if (!(tun
->flags
& TUN_TAP_MQ
) && tun
->numqueues
== 1)
549 if (!tfile
->detached
&&
550 tun
->numqueues
+ tun
->numdisabled
== MAX_TAP_QUEUES
)
555 /* Re-attach the filter to persist device */
556 if (!skip_filter
&& (tun
->filter_attached
== true)) {
557 err
= sk_attach_filter(&tun
->fprog
, tfile
->socket
.sk
);
561 tfile
->queue_index
= tun
->numqueues
;
562 rcu_assign_pointer(tfile
->tun
, tun
);
563 rcu_assign_pointer(tun
->tfiles
[tun
->numqueues
], tfile
);
567 tun_enable_queue(tfile
);
569 sock_hold(&tfile
->sk
);
571 tun_set_real_num_queues(tun
);
573 /* device is allowed to go away first, so no need to hold extra
581 static struct tun_struct
*__tun_get(struct tun_file
*tfile
)
583 struct tun_struct
*tun
;
586 tun
= rcu_dereference(tfile
->tun
);
594 static struct tun_struct
*tun_get(struct file
*file
)
596 return __tun_get(file
->private_data
);
599 static void tun_put(struct tun_struct
*tun
)
605 static void addr_hash_set(u32
*mask
, const u8
*addr
)
607 int n
= ether_crc(ETH_ALEN
, addr
) >> 26;
608 mask
[n
>> 5] |= (1 << (n
& 31));
611 static unsigned int addr_hash_test(const u32
*mask
, const u8
*addr
)
613 int n
= ether_crc(ETH_ALEN
, addr
) >> 26;
614 return mask
[n
>> 5] & (1 << (n
& 31));
617 static int update_filter(struct tap_filter
*filter
, void __user
*arg
)
619 struct { u8 u
[ETH_ALEN
]; } *addr
;
620 struct tun_filter uf
;
621 int err
, alen
, n
, nexact
;
623 if (copy_from_user(&uf
, arg
, sizeof(uf
)))
632 alen
= ETH_ALEN
* uf
.count
;
633 addr
= kmalloc(alen
, GFP_KERNEL
);
637 if (copy_from_user(addr
, arg
+ sizeof(uf
), alen
)) {
642 /* The filter is updated without holding any locks. Which is
643 * perfectly safe. We disable it first and in the worst
644 * case we'll accept a few undesired packets. */
648 /* Use first set of addresses as an exact filter */
649 for (n
= 0; n
< uf
.count
&& n
< FLT_EXACT_COUNT
; n
++)
650 memcpy(filter
->addr
[n
], addr
[n
].u
, ETH_ALEN
);
654 /* Remaining multicast addresses are hashed,
655 * unicast will leave the filter disabled. */
656 memset(filter
->mask
, 0, sizeof(filter
->mask
));
657 for (; n
< uf
.count
; n
++) {
658 if (!is_multicast_ether_addr(addr
[n
].u
)) {
659 err
= 0; /* no filter */
662 addr_hash_set(filter
->mask
, addr
[n
].u
);
665 /* For ALLMULTI just set the mask to all ones.
666 * This overrides the mask populated above. */
667 if ((uf
.flags
& TUN_FLT_ALLMULTI
))
668 memset(filter
->mask
, ~0, sizeof(filter
->mask
));
670 /* Now enable the filter */
672 filter
->count
= nexact
;
674 /* Return the number of exact filters */
682 /* Returns: 0 - drop, !=0 - accept */
683 static int run_filter(struct tap_filter
*filter
, const struct sk_buff
*skb
)
685 /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
687 struct ethhdr
*eh
= (struct ethhdr
*) skb
->data
;
691 for (i
= 0; i
< filter
->count
; i
++)
692 if (ether_addr_equal(eh
->h_dest
, filter
->addr
[i
]))
695 /* Inexact match (multicast only) */
696 if (is_multicast_ether_addr(eh
->h_dest
))
697 return addr_hash_test(filter
->mask
, eh
->h_dest
);
703 * Checks whether the packet is accepted or not.
704 * Returns: 0 - drop, !=0 - accept
706 static int check_filter(struct tap_filter
*filter
, const struct sk_buff
*skb
)
711 return run_filter(filter
, skb
);
714 /* Network device part of the driver */
716 static const struct ethtool_ops tun_ethtool_ops
;
718 /* Net device detach from fd. */
719 static void tun_net_uninit(struct net_device
*dev
)
724 /* Net device open. */
725 static int tun_net_open(struct net_device
*dev
)
727 netif_tx_start_all_queues(dev
);
731 /* Net device close. */
732 static int tun_net_close(struct net_device
*dev
)
734 netif_tx_stop_all_queues(dev
);
738 /* Net device start xmit */
739 static netdev_tx_t
tun_net_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
741 struct tun_struct
*tun
= netdev_priv(dev
);
742 int txq
= skb
->queue_mapping
;
743 struct tun_file
*tfile
;
747 tfile
= rcu_dereference(tun
->tfiles
[txq
]);
748 numqueues
= ACCESS_ONCE(tun
->numqueues
);
750 /* Drop packet if interface is not attached */
751 if (txq
>= numqueues
)
754 if (numqueues
== 1) {
755 /* Select queue was not called for the skbuff, so we extract the
756 * RPS hash and save it into the flow_table here.
760 rxhash
= skb_get_hash(skb
);
762 struct tun_flow_entry
*e
;
763 e
= tun_flow_find(&tun
->flows
[tun_hashfn(rxhash
)],
766 tun_flow_save_rps_rxhash(e
, rxhash
);
770 tun_debug(KERN_INFO
, tun
, "tun_net_xmit %d\n", skb
->len
);
774 /* Drop if the filter does not like it.
775 * This is a noop if the filter is disabled.
776 * Filter can be enabled only for the TAP devices. */
777 if (!check_filter(&tun
->txflt
, skb
))
780 if (tfile
->socket
.sk
->sk_filter
&&
781 sk_filter(tfile
->socket
.sk
, skb
))
784 /* Limit the number of packets queued by dividing txq length with the
787 if (skb_queue_len(&tfile
->socket
.sk
->sk_receive_queue
) * numqueues
788 >= dev
->tx_queue_len
)
791 if (unlikely(skb_orphan_frags(skb
, GFP_ATOMIC
)))
795 sock_tx_timestamp(skb
->sk
, &skb_shinfo(skb
)->tx_flags
);
796 sw_tx_timestamp(skb
);
799 /* Orphan the skb - required as we might hang on to it
800 * for indefinite time.
807 skb_queue_tail(&tfile
->socket
.sk
->sk_receive_queue
, skb
);
809 /* Notify and wake up reader process */
810 if (tfile
->flags
& TUN_FASYNC
)
811 kill_fasync(&tfile
->fasync
, SIGIO
, POLL_IN
);
812 tfile
->socket
.sk
->sk_data_ready(tfile
->socket
.sk
);
818 dev
->stats
.tx_dropped
++;
822 return NET_XMIT_DROP
;
825 static void tun_net_mclist(struct net_device
*dev
)
828 * This callback is supposed to deal with mc filter in
829 * _rx_ path and has nothing to do with the _tx_ path.
830 * In rx path we always accept everything userspace gives us.
835 #define MAX_MTU 65535
838 tun_net_change_mtu(struct net_device
*dev
, int new_mtu
)
840 if (new_mtu
< MIN_MTU
|| new_mtu
+ dev
->hard_header_len
> MAX_MTU
)
846 static netdev_features_t
tun_net_fix_features(struct net_device
*dev
,
847 netdev_features_t features
)
849 struct tun_struct
*tun
= netdev_priv(dev
);
851 return (features
& tun
->set_features
) | (features
& ~TUN_USER_FEATURES
);
853 #ifdef CONFIG_NET_POLL_CONTROLLER
854 static void tun_poll_controller(struct net_device
*dev
)
857 * Tun only receives frames when:
858 * 1) the char device endpoint gets data from user space
859 * 2) the tun socket gets a sendmsg call from user space
860 * Since both of those are synchronous operations, we are guaranteed
861 * never to have pending data when we poll for it
862 * so there is nothing to do here but return.
863 * We need this though so netpoll recognizes us as an interface that
864 * supports polling, which enables bridge devices in virt setups to
865 * still use netconsole
870 static const struct net_device_ops tun_netdev_ops
= {
871 .ndo_uninit
= tun_net_uninit
,
872 .ndo_open
= tun_net_open
,
873 .ndo_stop
= tun_net_close
,
874 .ndo_start_xmit
= tun_net_xmit
,
875 .ndo_change_mtu
= tun_net_change_mtu
,
876 .ndo_fix_features
= tun_net_fix_features
,
877 .ndo_select_queue
= tun_select_queue
,
878 #ifdef CONFIG_NET_POLL_CONTROLLER
879 .ndo_poll_controller
= tun_poll_controller
,
883 static const struct net_device_ops tap_netdev_ops
= {
884 .ndo_uninit
= tun_net_uninit
,
885 .ndo_open
= tun_net_open
,
886 .ndo_stop
= tun_net_close
,
887 .ndo_start_xmit
= tun_net_xmit
,
888 .ndo_change_mtu
= tun_net_change_mtu
,
889 .ndo_fix_features
= tun_net_fix_features
,
890 .ndo_set_rx_mode
= tun_net_mclist
,
891 .ndo_set_mac_address
= eth_mac_addr
,
892 .ndo_validate_addr
= eth_validate_addr
,
893 .ndo_select_queue
= tun_select_queue
,
894 #ifdef CONFIG_NET_POLL_CONTROLLER
895 .ndo_poll_controller
= tun_poll_controller
,
899 static void tun_flow_init(struct tun_struct
*tun
)
903 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++)
904 INIT_HLIST_HEAD(&tun
->flows
[i
]);
906 tun
->ageing_time
= TUN_FLOW_EXPIRE
;
907 setup_timer(&tun
->flow_gc_timer
, tun_flow_cleanup
, (unsigned long)tun
);
908 mod_timer(&tun
->flow_gc_timer
,
909 round_jiffies_up(jiffies
+ tun
->ageing_time
));
912 static void tun_flow_uninit(struct tun_struct
*tun
)
914 del_timer_sync(&tun
->flow_gc_timer
);
918 /* Initialize net device. */
919 static void tun_net_init(struct net_device
*dev
)
921 struct tun_struct
*tun
= netdev_priv(dev
);
923 switch (tun
->flags
& TUN_TYPE_MASK
) {
925 dev
->netdev_ops
= &tun_netdev_ops
;
927 /* Point-to-Point TUN Device */
928 dev
->hard_header_len
= 0;
932 /* Zero header length */
933 dev
->type
= ARPHRD_NONE
;
934 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
| IFF_MULTICAST
;
935 dev
->tx_queue_len
= TUN_READQ_SIZE
; /* We prefer our own queue length */
939 dev
->netdev_ops
= &tap_netdev_ops
;
940 /* Ethernet TAP Device */
942 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
943 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
945 eth_hw_addr_random(dev
);
947 dev
->tx_queue_len
= TUN_READQ_SIZE
; /* We prefer our own queue length */
952 /* Character device part */
955 static unsigned int tun_chr_poll(struct file
*file
, poll_table
*wait
)
957 struct tun_file
*tfile
= file
->private_data
;
958 struct tun_struct
*tun
= __tun_get(tfile
);
960 unsigned int mask
= 0;
965 sk
= tfile
->socket
.sk
;
967 tun_debug(KERN_INFO
, tun
, "tun_chr_poll\n");
969 poll_wait(file
, sk_sleep(sk
), wait
);
971 if (!skb_queue_empty(&sk
->sk_receive_queue
))
972 mask
|= POLLIN
| POLLRDNORM
;
974 if (sock_writeable(sk
) ||
975 (!test_and_set_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
) &&
977 mask
|= POLLOUT
| POLLWRNORM
;
979 if (tun
->dev
->reg_state
!= NETREG_REGISTERED
)
986 /* prepad is the amount to reserve at front. len is length after that.
987 * linear is a hint as to how much to copy (usually headers). */
988 static struct sk_buff
*tun_alloc_skb(struct tun_file
*tfile
,
989 size_t prepad
, size_t len
,
990 size_t linear
, int noblock
)
992 struct sock
*sk
= tfile
->socket
.sk
;
996 /* Under a page? Don't bother with paged skb. */
997 if (prepad
+ len
< PAGE_SIZE
|| !linear
)
1000 skb
= sock_alloc_send_pskb(sk
, prepad
+ linear
, len
- linear
, noblock
,
1003 return ERR_PTR(err
);
1005 skb_reserve(skb
, prepad
);
1006 skb_put(skb
, linear
);
1007 skb
->data_len
= len
- linear
;
1008 skb
->len
+= len
- linear
;
1013 /* Get packet from user space buffer */
1014 static ssize_t
tun_get_user(struct tun_struct
*tun
, struct tun_file
*tfile
,
1015 void *msg_control
, struct iov_iter
*from
,
1018 struct tun_pi pi
= { 0, cpu_to_be16(ETH_P_IP
) };
1019 struct sk_buff
*skb
;
1020 size_t total_len
= iov_iter_count(from
);
1021 size_t len
= total_len
, align
= NET_SKB_PAD
, linear
;
1022 struct virtio_net_hdr gso
= { 0 };
1025 bool zerocopy
= false;
1030 if (!(tun
->flags
& TUN_NO_PI
)) {
1031 if (len
< sizeof(pi
))
1035 n
= copy_from_iter(&pi
, sizeof(pi
), from
);
1036 if (n
!= sizeof(pi
))
1040 if (tun
->flags
& TUN_VNET_HDR
) {
1041 if (len
< tun
->vnet_hdr_sz
)
1043 len
-= tun
->vnet_hdr_sz
;
1045 n
= copy_from_iter(&gso
, sizeof(gso
), from
);
1046 if (n
!= sizeof(gso
))
1049 if ((gso
.flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) &&
1050 gso
.csum_start
+ gso
.csum_offset
+ 2 > gso
.hdr_len
)
1051 gso
.hdr_len
= gso
.csum_start
+ gso
.csum_offset
+ 2;
1053 if (gso
.hdr_len
> len
)
1055 iov_iter_advance(from
, tun
->vnet_hdr_sz
- sizeof(gso
));
1058 if ((tun
->flags
& TUN_TYPE_MASK
) == TUN_TAP_DEV
) {
1059 align
+= NET_IP_ALIGN
;
1060 if (unlikely(len
< ETH_HLEN
||
1061 (gso
.hdr_len
&& gso
.hdr_len
< ETH_HLEN
)))
1065 good_linear
= SKB_MAX_HEAD(align
);
1068 struct iov_iter i
= *from
;
1070 /* There are 256 bytes to be copied in skb, so there is
1071 * enough room for skb expand head in case it is used.
1072 * The rest of the buffer is mapped from userspace.
1074 copylen
= gso
.hdr_len
? gso
.hdr_len
: GOODCOPY_LEN
;
1075 if (copylen
> good_linear
)
1076 copylen
= good_linear
;
1078 iov_iter_advance(&i
, copylen
);
1079 if (iov_iter_npages(&i
, INT_MAX
) <= MAX_SKB_FRAGS
)
1085 if (gso
.hdr_len
> good_linear
)
1086 linear
= good_linear
;
1088 linear
= gso
.hdr_len
;
1091 skb
= tun_alloc_skb(tfile
, align
, copylen
, linear
, noblock
);
1093 if (PTR_ERR(skb
) != -EAGAIN
)
1094 tun
->dev
->stats
.rx_dropped
++;
1095 return PTR_ERR(skb
);
1099 err
= zerocopy_sg_from_iter(skb
, from
);
1101 err
= skb_copy_datagram_from_iter(skb
, 0, from
, len
);
1102 if (!err
&& msg_control
) {
1103 struct ubuf_info
*uarg
= msg_control
;
1104 uarg
->callback(uarg
, false);
1109 tun
->dev
->stats
.rx_dropped
++;
1114 if (gso
.flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) {
1115 if (!skb_partial_csum_set(skb
, gso
.csum_start
,
1117 tun
->dev
->stats
.rx_frame_errors
++;
1123 switch (tun
->flags
& TUN_TYPE_MASK
) {
1125 if (tun
->flags
& TUN_NO_PI
) {
1126 switch (skb
->data
[0] & 0xf0) {
1128 pi
.proto
= htons(ETH_P_IP
);
1131 pi
.proto
= htons(ETH_P_IPV6
);
1134 tun
->dev
->stats
.rx_dropped
++;
1140 skb_reset_mac_header(skb
);
1141 skb
->protocol
= pi
.proto
;
1142 skb
->dev
= tun
->dev
;
1145 skb
->protocol
= eth_type_trans(skb
, tun
->dev
);
1149 skb_reset_network_header(skb
);
1151 if (gso
.gso_type
!= VIRTIO_NET_HDR_GSO_NONE
) {
1153 switch (gso
.gso_type
& ~VIRTIO_NET_HDR_GSO_ECN
) {
1154 case VIRTIO_NET_HDR_GSO_TCPV4
:
1155 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1157 case VIRTIO_NET_HDR_GSO_TCPV6
:
1158 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1160 case VIRTIO_NET_HDR_GSO_UDP
:
1166 netdev_warn(tun
->dev
,
1167 "%s: using disabled UFO feature; please fix this program\n",
1170 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP
;
1171 if (skb
->protocol
== htons(ETH_P_IPV6
))
1172 ipv6_proxy_select_ident(skb
);
1176 tun
->dev
->stats
.rx_frame_errors
++;
1181 if (gso
.gso_type
& VIRTIO_NET_HDR_GSO_ECN
)
1182 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCP_ECN
;
1184 skb_shinfo(skb
)->gso_size
= gso
.gso_size
;
1185 if (skb_shinfo(skb
)->gso_size
== 0) {
1186 tun
->dev
->stats
.rx_frame_errors
++;
1191 /* Header must be checked, and gso_segs computed. */
1192 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
1193 skb_shinfo(skb
)->gso_segs
= 0;
1196 /* copy skb_ubuf_info for callback when skb has no error */
1198 skb_shinfo(skb
)->destructor_arg
= msg_control
;
1199 skb_shinfo(skb
)->tx_flags
|= SKBTX_DEV_ZEROCOPY
;
1200 skb_shinfo(skb
)->tx_flags
|= SKBTX_SHARED_FRAG
;
1203 skb_probe_transport_header(skb
, 0);
1205 rxhash
= skb_get_hash(skb
);
1208 tun
->dev
->stats
.rx_packets
++;
1209 tun
->dev
->stats
.rx_bytes
+= len
;
1211 tun_flow_update(tun
, rxhash
, tfile
);
1215 static ssize_t
tun_chr_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1217 struct file
*file
= iocb
->ki_filp
;
1218 struct tun_struct
*tun
= tun_get(file
);
1219 struct tun_file
*tfile
= file
->private_data
;
1225 result
= tun_get_user(tun
, tfile
, NULL
, from
, file
->f_flags
& O_NONBLOCK
);
1231 /* Put packet to the user space buffer */
1232 static ssize_t
tun_put_user(struct tun_struct
*tun
,
1233 struct tun_file
*tfile
,
1234 struct sk_buff
*skb
,
1235 struct iov_iter
*iter
)
1237 struct tun_pi pi
= { 0, skb
->protocol
};
1239 int vlan_offset
= 0;
1241 int vnet_hdr_sz
= 0;
1243 if (vlan_tx_tag_present(skb
))
1244 vlan_hlen
= VLAN_HLEN
;
1246 if (tun
->flags
& TUN_VNET_HDR
)
1247 vnet_hdr_sz
= tun
->vnet_hdr_sz
;
1249 total
= skb
->len
+ vlan_hlen
+ vnet_hdr_sz
;
1251 if (!(tun
->flags
& TUN_NO_PI
)) {
1252 if (iov_iter_count(iter
) < sizeof(pi
))
1255 total
+= sizeof(pi
);
1256 if (iov_iter_count(iter
) < total
) {
1257 /* Packet will be striped */
1258 pi
.flags
|= TUN_PKT_STRIP
;
1261 if (copy_to_iter(&pi
, sizeof(pi
), iter
) != sizeof(pi
))
1266 struct virtio_net_hdr gso
= { 0 }; /* no info leak */
1267 if (iov_iter_count(iter
) < vnet_hdr_sz
)
1270 if (skb_is_gso(skb
)) {
1271 struct skb_shared_info
*sinfo
= skb_shinfo(skb
);
1273 /* This is a hint as to how much should be linear. */
1274 gso
.hdr_len
= skb_headlen(skb
);
1275 gso
.gso_size
= sinfo
->gso_size
;
1276 if (sinfo
->gso_type
& SKB_GSO_TCPV4
)
1277 gso
.gso_type
= VIRTIO_NET_HDR_GSO_TCPV4
;
1278 else if (sinfo
->gso_type
& SKB_GSO_TCPV6
)
1279 gso
.gso_type
= VIRTIO_NET_HDR_GSO_TCPV6
;
1281 pr_err("unexpected GSO type: "
1282 "0x%x, gso_size %d, hdr_len %d\n",
1283 sinfo
->gso_type
, gso
.gso_size
,
1285 print_hex_dump(KERN_ERR
, "tun: ",
1288 min((int)gso
.hdr_len
, 64), true);
1292 if (sinfo
->gso_type
& SKB_GSO_TCP_ECN
)
1293 gso
.gso_type
|= VIRTIO_NET_HDR_GSO_ECN
;
1295 gso
.gso_type
= VIRTIO_NET_HDR_GSO_NONE
;
1297 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1298 gso
.flags
= VIRTIO_NET_HDR_F_NEEDS_CSUM
;
1299 gso
.csum_start
= skb_checksum_start_offset(skb
) +
1301 gso
.csum_offset
= skb
->csum_offset
;
1302 } else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
1303 gso
.flags
= VIRTIO_NET_HDR_F_DATA_VALID
;
1304 } /* else everything is zero */
1306 if (copy_to_iter(&gso
, sizeof(gso
), iter
) != sizeof(gso
))
1309 iov_iter_advance(iter
, vnet_hdr_sz
- sizeof(gso
));
1315 __be16 h_vlan_proto
;
1319 veth
.h_vlan_proto
= skb
->vlan_proto
;
1320 veth
.h_vlan_TCI
= htons(vlan_tx_tag_get(skb
));
1322 vlan_offset
= offsetof(struct vlan_ethhdr
, h_vlan_proto
);
1324 ret
= skb_copy_datagram_iter(skb
, 0, iter
, vlan_offset
);
1325 if (ret
|| !iov_iter_count(iter
))
1328 ret
= copy_to_iter(&veth
, sizeof(veth
), iter
);
1329 if (ret
!= sizeof(veth
) || !iov_iter_count(iter
))
1333 skb_copy_datagram_iter(skb
, vlan_offset
, iter
, skb
->len
- vlan_offset
);
1336 tun
->dev
->stats
.tx_packets
++;
1337 tun
->dev
->stats
.tx_bytes
+= skb
->len
+ vlan_hlen
;
1342 static ssize_t
tun_do_read(struct tun_struct
*tun
, struct tun_file
*tfile
,
1343 struct iov_iter
*to
,
1346 struct sk_buff
*skb
;
1348 int peeked
, err
, off
= 0;
1350 tun_debug(KERN_INFO
, tun
, "tun_do_read\n");
1352 if (!iov_iter_count(to
))
1355 if (tun
->dev
->reg_state
!= NETREG_REGISTERED
)
1358 /* Read frames from queue */
1359 skb
= __skb_recv_datagram(tfile
->socket
.sk
, noblock
? MSG_DONTWAIT
: 0,
1360 &peeked
, &off
, &err
);
1364 ret
= tun_put_user(tun
, tfile
, skb
, to
);
1365 if (unlikely(ret
< 0))
1373 static ssize_t
tun_chr_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
1375 struct file
*file
= iocb
->ki_filp
;
1376 struct tun_file
*tfile
= file
->private_data
;
1377 struct tun_struct
*tun
= __tun_get(tfile
);
1378 ssize_t len
= iov_iter_count(to
), ret
;
1382 ret
= tun_do_read(tun
, tfile
, to
, file
->f_flags
& O_NONBLOCK
);
1383 ret
= min_t(ssize_t
, ret
, len
);
1390 static void tun_free_netdev(struct net_device
*dev
)
1392 struct tun_struct
*tun
= netdev_priv(dev
);
1394 BUG_ON(!(list_empty(&tun
->disabled
)));
1395 tun_flow_uninit(tun
);
1396 security_tun_dev_free_security(tun
->security
);
1400 static void tun_setup(struct net_device
*dev
)
1402 struct tun_struct
*tun
= netdev_priv(dev
);
1404 tun
->owner
= INVALID_UID
;
1405 tun
->group
= INVALID_GID
;
1407 dev
->ethtool_ops
= &tun_ethtool_ops
;
1408 dev
->destructor
= tun_free_netdev
;
1411 /* Trivial set of netlink ops to allow deleting tun or tap
1412 * device with netlink.
1414 static int tun_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
1419 static struct rtnl_link_ops tun_link_ops __read_mostly
= {
1421 .priv_size
= sizeof(struct tun_struct
),
1423 .validate
= tun_validate
,
1426 static void tun_sock_write_space(struct sock
*sk
)
1428 struct tun_file
*tfile
;
1429 wait_queue_head_t
*wqueue
;
1431 if (!sock_writeable(sk
))
1434 if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
))
1437 wqueue
= sk_sleep(sk
);
1438 if (wqueue
&& waitqueue_active(wqueue
))
1439 wake_up_interruptible_sync_poll(wqueue
, POLLOUT
|
1440 POLLWRNORM
| POLLWRBAND
);
1442 tfile
= container_of(sk
, struct tun_file
, sk
);
1443 kill_fasync(&tfile
->fasync
, SIGIO
, POLL_OUT
);
1446 static int tun_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
1447 struct msghdr
*m
, size_t total_len
)
1450 struct tun_file
*tfile
= container_of(sock
, struct tun_file
, socket
);
1451 struct tun_struct
*tun
= __tun_get(tfile
);
1456 ret
= tun_get_user(tun
, tfile
, m
->msg_control
, &m
->msg_iter
,
1457 m
->msg_flags
& MSG_DONTWAIT
);
1462 static int tun_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1463 struct msghdr
*m
, size_t total_len
,
1466 struct tun_file
*tfile
= container_of(sock
, struct tun_file
, socket
);
1467 struct tun_struct
*tun
= __tun_get(tfile
);
1473 if (flags
& ~(MSG_DONTWAIT
|MSG_TRUNC
|MSG_ERRQUEUE
)) {
1477 if (flags
& MSG_ERRQUEUE
) {
1478 ret
= sock_recv_errqueue(sock
->sk
, m
, total_len
,
1479 SOL_PACKET
, TUN_TX_TIMESTAMP
);
1482 ret
= tun_do_read(tun
, tfile
, &m
->msg_iter
, flags
& MSG_DONTWAIT
);
1483 if (ret
> total_len
) {
1484 m
->msg_flags
|= MSG_TRUNC
;
1485 ret
= flags
& MSG_TRUNC
? ret
: total_len
;
1492 static int tun_release(struct socket
*sock
)
1499 /* Ops structure to mimic raw sockets with tun */
1500 static const struct proto_ops tun_socket_ops
= {
1501 .sendmsg
= tun_sendmsg
,
1502 .recvmsg
= tun_recvmsg
,
1503 .release
= tun_release
,
1506 static struct proto tun_proto
= {
1508 .owner
= THIS_MODULE
,
1509 .obj_size
= sizeof(struct tun_file
),
1512 static int tun_flags(struct tun_struct
*tun
)
1516 if (tun
->flags
& TUN_TUN_DEV
)
1521 if (tun
->flags
& TUN_NO_PI
)
1524 /* This flag has no real effect. We track the value for backwards
1527 if (tun
->flags
& TUN_ONE_QUEUE
)
1528 flags
|= IFF_ONE_QUEUE
;
1530 if (tun
->flags
& TUN_VNET_HDR
)
1531 flags
|= IFF_VNET_HDR
;
1533 if (tun
->flags
& TUN_TAP_MQ
)
1534 flags
|= IFF_MULTI_QUEUE
;
1536 if (tun
->flags
& TUN_PERSIST
)
1537 flags
|= IFF_PERSIST
;
1542 static ssize_t
tun_show_flags(struct device
*dev
, struct device_attribute
*attr
,
1545 struct tun_struct
*tun
= netdev_priv(to_net_dev(dev
));
1546 return sprintf(buf
, "0x%x\n", tun_flags(tun
));
1549 static ssize_t
tun_show_owner(struct device
*dev
, struct device_attribute
*attr
,
1552 struct tun_struct
*tun
= netdev_priv(to_net_dev(dev
));
1553 return uid_valid(tun
->owner
)?
1554 sprintf(buf
, "%u\n",
1555 from_kuid_munged(current_user_ns(), tun
->owner
)):
1556 sprintf(buf
, "-1\n");
1559 static ssize_t
tun_show_group(struct device
*dev
, struct device_attribute
*attr
,
1562 struct tun_struct
*tun
= netdev_priv(to_net_dev(dev
));
1563 return gid_valid(tun
->group
) ?
1564 sprintf(buf
, "%u\n",
1565 from_kgid_munged(current_user_ns(), tun
->group
)):
1566 sprintf(buf
, "-1\n");
1569 static DEVICE_ATTR(tun_flags
, 0444, tun_show_flags
, NULL
);
1570 static DEVICE_ATTR(owner
, 0444, tun_show_owner
, NULL
);
1571 static DEVICE_ATTR(group
, 0444, tun_show_group
, NULL
);
1573 static int tun_set_iff(struct net
*net
, struct file
*file
, struct ifreq
*ifr
)
1575 struct tun_struct
*tun
;
1576 struct tun_file
*tfile
= file
->private_data
;
1577 struct net_device
*dev
;
1580 if (tfile
->detached
)
1583 dev
= __dev_get_by_name(net
, ifr
->ifr_name
);
1585 if (ifr
->ifr_flags
& IFF_TUN_EXCL
)
1587 if ((ifr
->ifr_flags
& IFF_TUN
) && dev
->netdev_ops
== &tun_netdev_ops
)
1588 tun
= netdev_priv(dev
);
1589 else if ((ifr
->ifr_flags
& IFF_TAP
) && dev
->netdev_ops
== &tap_netdev_ops
)
1590 tun
= netdev_priv(dev
);
1594 if (!!(ifr
->ifr_flags
& IFF_MULTI_QUEUE
) !=
1595 !!(tun
->flags
& TUN_TAP_MQ
))
1598 if (tun_not_capable(tun
))
1600 err
= security_tun_dev_open(tun
->security
);
1604 err
= tun_attach(tun
, file
, ifr
->ifr_flags
& IFF_NOFILTER
);
1608 if (tun
->flags
& TUN_TAP_MQ
&&
1609 (tun
->numqueues
+ tun
->numdisabled
> 1)) {
1610 /* One or more queue has already been attached, no need
1611 * to initialize the device again.
1618 unsigned long flags
= 0;
1619 int queues
= ifr
->ifr_flags
& IFF_MULTI_QUEUE
?
1622 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1624 err
= security_tun_dev_create();
1629 if (ifr
->ifr_flags
& IFF_TUN
) {
1631 flags
|= TUN_TUN_DEV
;
1633 } else if (ifr
->ifr_flags
& IFF_TAP
) {
1635 flags
|= TUN_TAP_DEV
;
1641 name
= ifr
->ifr_name
;
1643 dev
= alloc_netdev_mqs(sizeof(struct tun_struct
), name
,
1644 NET_NAME_UNKNOWN
, tun_setup
, queues
,
1650 dev_net_set(dev
, net
);
1651 dev
->rtnl_link_ops
= &tun_link_ops
;
1652 dev
->ifindex
= tfile
->ifindex
;
1654 tun
= netdev_priv(dev
);
1657 tun
->txflt
.count
= 0;
1658 tun
->vnet_hdr_sz
= sizeof(struct virtio_net_hdr
);
1660 tun
->filter_attached
= false;
1661 tun
->sndbuf
= tfile
->socket
.sk
->sk_sndbuf
;
1663 spin_lock_init(&tun
->lock
);
1665 err
= security_tun_dev_alloc_security(&tun
->security
);
1672 dev
->hw_features
= NETIF_F_SG
| NETIF_F_FRAGLIST
|
1673 TUN_USER_FEATURES
| NETIF_F_HW_VLAN_CTAG_TX
|
1674 NETIF_F_HW_VLAN_STAG_TX
;
1675 dev
->features
= dev
->hw_features
;
1676 dev
->vlan_features
= dev
->features
&
1677 ~(NETIF_F_HW_VLAN_CTAG_TX
|
1678 NETIF_F_HW_VLAN_STAG_TX
);
1680 INIT_LIST_HEAD(&tun
->disabled
);
1681 err
= tun_attach(tun
, file
, false);
1685 err
= register_netdevice(tun
->dev
);
1689 if (device_create_file(&tun
->dev
->dev
, &dev_attr_tun_flags
) ||
1690 device_create_file(&tun
->dev
->dev
, &dev_attr_owner
) ||
1691 device_create_file(&tun
->dev
->dev
, &dev_attr_group
))
1692 pr_err("Failed to create tun sysfs files\n");
1695 netif_carrier_on(tun
->dev
);
1697 tun_debug(KERN_INFO
, tun
, "tun_set_iff\n");
1699 if (ifr
->ifr_flags
& IFF_NO_PI
)
1700 tun
->flags
|= TUN_NO_PI
;
1702 tun
->flags
&= ~TUN_NO_PI
;
1704 /* This flag has no real effect. We track the value for backwards
1707 if (ifr
->ifr_flags
& IFF_ONE_QUEUE
)
1708 tun
->flags
|= TUN_ONE_QUEUE
;
1710 tun
->flags
&= ~TUN_ONE_QUEUE
;
1712 if (ifr
->ifr_flags
& IFF_VNET_HDR
)
1713 tun
->flags
|= TUN_VNET_HDR
;
1715 tun
->flags
&= ~TUN_VNET_HDR
;
1717 if (ifr
->ifr_flags
& IFF_MULTI_QUEUE
)
1718 tun
->flags
|= TUN_TAP_MQ
;
1720 tun
->flags
&= ~TUN_TAP_MQ
;
1722 /* Make sure persistent devices do not get stuck in
1725 if (netif_running(tun
->dev
))
1726 netif_tx_wake_all_queues(tun
->dev
);
1728 strcpy(ifr
->ifr_name
, tun
->dev
->name
);
1732 tun_detach_all(dev
);
1734 tun_flow_uninit(tun
);
1735 security_tun_dev_free_security(tun
->security
);
1741 static void tun_get_iff(struct net
*net
, struct tun_struct
*tun
,
1744 tun_debug(KERN_INFO
, tun
, "tun_get_iff\n");
1746 strcpy(ifr
->ifr_name
, tun
->dev
->name
);
1748 ifr
->ifr_flags
= tun_flags(tun
);
1752 /* This is like a cut-down ethtool ops, except done via tun fd so no
1753 * privs required. */
1754 static int set_offload(struct tun_struct
*tun
, unsigned long arg
)
1756 netdev_features_t features
= 0;
1758 if (arg
& TUN_F_CSUM
) {
1759 features
|= NETIF_F_HW_CSUM
;
1762 if (arg
& (TUN_F_TSO4
|TUN_F_TSO6
)) {
1763 if (arg
& TUN_F_TSO_ECN
) {
1764 features
|= NETIF_F_TSO_ECN
;
1765 arg
&= ~TUN_F_TSO_ECN
;
1767 if (arg
& TUN_F_TSO4
)
1768 features
|= NETIF_F_TSO
;
1769 if (arg
& TUN_F_TSO6
)
1770 features
|= NETIF_F_TSO6
;
1771 arg
&= ~(TUN_F_TSO4
|TUN_F_TSO6
);
1775 /* This gives the user a way to test for new features in future by
1776 * trying to set them. */
1780 tun
->set_features
= features
;
1781 netdev_update_features(tun
->dev
);
1786 static void tun_detach_filter(struct tun_struct
*tun
, int n
)
1789 struct tun_file
*tfile
;
1791 for (i
= 0; i
< n
; i
++) {
1792 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
1793 sk_detach_filter(tfile
->socket
.sk
);
1796 tun
->filter_attached
= false;
1799 static int tun_attach_filter(struct tun_struct
*tun
)
1802 struct tun_file
*tfile
;
1804 for (i
= 0; i
< tun
->numqueues
; i
++) {
1805 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
1806 ret
= sk_attach_filter(&tun
->fprog
, tfile
->socket
.sk
);
1808 tun_detach_filter(tun
, i
);
1813 tun
->filter_attached
= true;
1817 static void tun_set_sndbuf(struct tun_struct
*tun
)
1819 struct tun_file
*tfile
;
1822 for (i
= 0; i
< tun
->numqueues
; i
++) {
1823 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
1824 tfile
->socket
.sk
->sk_sndbuf
= tun
->sndbuf
;
1828 static int tun_set_queue(struct file
*file
, struct ifreq
*ifr
)
1830 struct tun_file
*tfile
= file
->private_data
;
1831 struct tun_struct
*tun
;
1836 if (ifr
->ifr_flags
& IFF_ATTACH_QUEUE
) {
1837 tun
= tfile
->detached
;
1842 ret
= security_tun_dev_attach_queue(tun
->security
);
1845 ret
= tun_attach(tun
, file
, false);
1846 } else if (ifr
->ifr_flags
& IFF_DETACH_QUEUE
) {
1847 tun
= rtnl_dereference(tfile
->tun
);
1848 if (!tun
|| !(tun
->flags
& TUN_TAP_MQ
) || tfile
->detached
)
1851 __tun_detach(tfile
, false);
1860 static long __tun_chr_ioctl(struct file
*file
, unsigned int cmd
,
1861 unsigned long arg
, int ifreq_len
)
1863 struct tun_file
*tfile
= file
->private_data
;
1864 struct tun_struct
*tun
;
1865 void __user
* argp
= (void __user
*)arg
;
1871 unsigned int ifindex
;
1874 if (cmd
== TUNSETIFF
|| cmd
== TUNSETQUEUE
|| _IOC_TYPE(cmd
) == 0x89) {
1875 if (copy_from_user(&ifr
, argp
, ifreq_len
))
1878 memset(&ifr
, 0, sizeof(ifr
));
1880 if (cmd
== TUNGETFEATURES
) {
1881 /* Currently this just means: "what IFF flags are valid?".
1882 * This is needed because we never checked for invalid flags on
1884 return put_user(IFF_TUN
| IFF_TAP
| IFF_NO_PI
| IFF_ONE_QUEUE
|
1885 IFF_VNET_HDR
| IFF_MULTI_QUEUE
,
1886 (unsigned int __user
*)argp
);
1887 } else if (cmd
== TUNSETQUEUE
)
1888 return tun_set_queue(file
, &ifr
);
1893 tun
= __tun_get(tfile
);
1894 if (cmd
== TUNSETIFF
&& !tun
) {
1895 ifr
.ifr_name
[IFNAMSIZ
-1] = '\0';
1897 ret
= tun_set_iff(tfile
->net
, file
, &ifr
);
1902 if (copy_to_user(argp
, &ifr
, ifreq_len
))
1906 if (cmd
== TUNSETIFINDEX
) {
1912 if (copy_from_user(&ifindex
, argp
, sizeof(ifindex
)))
1916 tfile
->ifindex
= ifindex
;
1924 tun_debug(KERN_INFO
, tun
, "tun_chr_ioctl cmd %u\n", cmd
);
1929 tun_get_iff(current
->nsproxy
->net_ns
, tun
, &ifr
);
1931 if (tfile
->detached
)
1932 ifr
.ifr_flags
|= IFF_DETACH_QUEUE
;
1933 if (!tfile
->socket
.sk
->sk_filter
)
1934 ifr
.ifr_flags
|= IFF_NOFILTER
;
1936 if (copy_to_user(argp
, &ifr
, ifreq_len
))
1941 /* Disable/Enable checksum */
1943 /* [unimplemented] */
1944 tun_debug(KERN_INFO
, tun
, "ignored: set checksum %s\n",
1945 arg
? "disabled" : "enabled");
1949 /* Disable/Enable persist mode. Keep an extra reference to the
1950 * module to prevent the module being unprobed.
1952 if (arg
&& !(tun
->flags
& TUN_PERSIST
)) {
1953 tun
->flags
|= TUN_PERSIST
;
1954 __module_get(THIS_MODULE
);
1956 if (!arg
&& (tun
->flags
& TUN_PERSIST
)) {
1957 tun
->flags
&= ~TUN_PERSIST
;
1958 module_put(THIS_MODULE
);
1961 tun_debug(KERN_INFO
, tun
, "persist %s\n",
1962 arg
? "enabled" : "disabled");
1966 /* Set owner of the device */
1967 owner
= make_kuid(current_user_ns(), arg
);
1968 if (!uid_valid(owner
)) {
1973 tun_debug(KERN_INFO
, tun
, "owner set to %u\n",
1974 from_kuid(&init_user_ns
, tun
->owner
));
1978 /* Set group of the device */
1979 group
= make_kgid(current_user_ns(), arg
);
1980 if (!gid_valid(group
)) {
1985 tun_debug(KERN_INFO
, tun
, "group set to %u\n",
1986 from_kgid(&init_user_ns
, tun
->group
));
1990 /* Only allow setting the type when the interface is down */
1991 if (tun
->dev
->flags
& IFF_UP
) {
1992 tun_debug(KERN_INFO
, tun
,
1993 "Linktype set failed because interface is up\n");
1996 tun
->dev
->type
= (int) arg
;
1997 tun_debug(KERN_INFO
, tun
, "linktype set to %d\n",
2009 ret
= set_offload(tun
, arg
);
2012 case TUNSETTXFILTER
:
2013 /* Can be set only for TAPs */
2015 if ((tun
->flags
& TUN_TYPE_MASK
) != TUN_TAP_DEV
)
2017 ret
= update_filter(&tun
->txflt
, (void __user
*)arg
);
2021 /* Get hw address */
2022 memcpy(ifr
.ifr_hwaddr
.sa_data
, tun
->dev
->dev_addr
, ETH_ALEN
);
2023 ifr
.ifr_hwaddr
.sa_family
= tun
->dev
->type
;
2024 if (copy_to_user(argp
, &ifr
, ifreq_len
))
2029 /* Set hw address */
2030 tun_debug(KERN_DEBUG
, tun
, "set hw address: %pM\n",
2031 ifr
.ifr_hwaddr
.sa_data
);
2033 ret
= dev_set_mac_address(tun
->dev
, &ifr
.ifr_hwaddr
);
2037 sndbuf
= tfile
->socket
.sk
->sk_sndbuf
;
2038 if (copy_to_user(argp
, &sndbuf
, sizeof(sndbuf
)))
2043 if (copy_from_user(&sndbuf
, argp
, sizeof(sndbuf
))) {
2048 tun
->sndbuf
= sndbuf
;
2049 tun_set_sndbuf(tun
);
2052 case TUNGETVNETHDRSZ
:
2053 vnet_hdr_sz
= tun
->vnet_hdr_sz
;
2054 if (copy_to_user(argp
, &vnet_hdr_sz
, sizeof(vnet_hdr_sz
)))
2058 case TUNSETVNETHDRSZ
:
2059 if (copy_from_user(&vnet_hdr_sz
, argp
, sizeof(vnet_hdr_sz
))) {
2063 if (vnet_hdr_sz
< (int)sizeof(struct virtio_net_hdr
)) {
2068 tun
->vnet_hdr_sz
= vnet_hdr_sz
;
2071 case TUNATTACHFILTER
:
2072 /* Can be set only for TAPs */
2074 if ((tun
->flags
& TUN_TYPE_MASK
) != TUN_TAP_DEV
)
2077 if (copy_from_user(&tun
->fprog
, argp
, sizeof(tun
->fprog
)))
2080 ret
= tun_attach_filter(tun
);
2083 case TUNDETACHFILTER
:
2084 /* Can be set only for TAPs */
2086 if ((tun
->flags
& TUN_TYPE_MASK
) != TUN_TAP_DEV
)
2089 tun_detach_filter(tun
, tun
->numqueues
);
2094 if ((tun
->flags
& TUN_TYPE_MASK
) != TUN_TAP_DEV
)
2097 if (copy_to_user(argp
, &tun
->fprog
, sizeof(tun
->fprog
)))
2114 static long tun_chr_ioctl(struct file
*file
,
2115 unsigned int cmd
, unsigned long arg
)
2117 return __tun_chr_ioctl(file
, cmd
, arg
, sizeof (struct ifreq
));
2120 #ifdef CONFIG_COMPAT
2121 static long tun_chr_compat_ioctl(struct file
*file
,
2122 unsigned int cmd
, unsigned long arg
)
2127 case TUNSETTXFILTER
:
2132 arg
= (unsigned long)compat_ptr(arg
);
2135 arg
= (compat_ulong_t
)arg
;
2140 * compat_ifreq is shorter than ifreq, so we must not access beyond
2141 * the end of that structure. All fields that are used in this
2142 * driver are compatible though, we don't need to convert the
2145 return __tun_chr_ioctl(file
, cmd
, arg
, sizeof(struct compat_ifreq
));
2147 #endif /* CONFIG_COMPAT */
2149 static int tun_chr_fasync(int fd
, struct file
*file
, int on
)
2151 struct tun_file
*tfile
= file
->private_data
;
2154 if ((ret
= fasync_helper(fd
, file
, on
, &tfile
->fasync
)) < 0)
2158 __f_setown(file
, task_pid(current
), PIDTYPE_PID
, 0);
2159 tfile
->flags
|= TUN_FASYNC
;
2161 tfile
->flags
&= ~TUN_FASYNC
;
2167 static int tun_chr_open(struct inode
*inode
, struct file
* file
)
2169 struct tun_file
*tfile
;
2171 DBG1(KERN_INFO
, "tunX: tun_chr_open\n");
2173 tfile
= (struct tun_file
*)sk_alloc(&init_net
, AF_UNSPEC
, GFP_KERNEL
,
2177 RCU_INIT_POINTER(tfile
->tun
, NULL
);
2178 tfile
->net
= get_net(current
->nsproxy
->net_ns
);
2182 init_waitqueue_head(&tfile
->wq
.wait
);
2183 RCU_INIT_POINTER(tfile
->socket
.wq
, &tfile
->wq
);
2185 tfile
->socket
.file
= file
;
2186 tfile
->socket
.ops
= &tun_socket_ops
;
2188 sock_init_data(&tfile
->socket
, &tfile
->sk
);
2189 sk_change_net(&tfile
->sk
, tfile
->net
);
2191 tfile
->sk
.sk_write_space
= tun_sock_write_space
;
2192 tfile
->sk
.sk_sndbuf
= INT_MAX
;
2194 file
->private_data
= tfile
;
2195 set_bit(SOCK_EXTERNALLY_ALLOCATED
, &tfile
->socket
.flags
);
2196 INIT_LIST_HEAD(&tfile
->next
);
2198 sock_set_flag(&tfile
->sk
, SOCK_ZEROCOPY
);
2203 static int tun_chr_close(struct inode
*inode
, struct file
*file
)
2205 struct tun_file
*tfile
= file
->private_data
;
2206 struct net
*net
= tfile
->net
;
2208 tun_detach(tfile
, true);
2214 #ifdef CONFIG_PROC_FS
2215 static int tun_chr_show_fdinfo(struct seq_file
*m
, struct file
*f
)
2217 struct tun_struct
*tun
;
2220 memset(&ifr
, 0, sizeof(ifr
));
2225 tun_get_iff(current
->nsproxy
->net_ns
, tun
, &ifr
);
2231 return seq_printf(m
, "iff:\t%s\n", ifr
.ifr_name
);
2235 static const struct file_operations tun_fops
= {
2236 .owner
= THIS_MODULE
,
2237 .llseek
= no_llseek
,
2238 .read
= new_sync_read
,
2239 .write
= new_sync_write
,
2240 .read_iter
= tun_chr_read_iter
,
2241 .write_iter
= tun_chr_write_iter
,
2242 .poll
= tun_chr_poll
,
2243 .unlocked_ioctl
= tun_chr_ioctl
,
2244 #ifdef CONFIG_COMPAT
2245 .compat_ioctl
= tun_chr_compat_ioctl
,
2247 .open
= tun_chr_open
,
2248 .release
= tun_chr_close
,
2249 .fasync
= tun_chr_fasync
,
2250 #ifdef CONFIG_PROC_FS
2251 .show_fdinfo
= tun_chr_show_fdinfo
,
2255 static struct miscdevice tun_miscdev
= {
2258 .nodename
= "net/tun",
2262 /* ethtool interface */
2264 static int tun_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2267 cmd
->advertising
= 0;
2268 ethtool_cmd_speed_set(cmd
, SPEED_10
);
2269 cmd
->duplex
= DUPLEX_FULL
;
2270 cmd
->port
= PORT_TP
;
2271 cmd
->phy_address
= 0;
2272 cmd
->transceiver
= XCVR_INTERNAL
;
2273 cmd
->autoneg
= AUTONEG_DISABLE
;
2279 static void tun_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
2281 struct tun_struct
*tun
= netdev_priv(dev
);
2283 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
2284 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
2286 switch (tun
->flags
& TUN_TYPE_MASK
) {
2288 strlcpy(info
->bus_info
, "tun", sizeof(info
->bus_info
));
2291 strlcpy(info
->bus_info
, "tap", sizeof(info
->bus_info
));
2296 static u32
tun_get_msglevel(struct net_device
*dev
)
2299 struct tun_struct
*tun
= netdev_priv(dev
);
2306 static void tun_set_msglevel(struct net_device
*dev
, u32 value
)
2309 struct tun_struct
*tun
= netdev_priv(dev
);
2314 static const struct ethtool_ops tun_ethtool_ops
= {
2315 .get_settings
= tun_get_settings
,
2316 .get_drvinfo
= tun_get_drvinfo
,
2317 .get_msglevel
= tun_get_msglevel
,
2318 .set_msglevel
= tun_set_msglevel
,
2319 .get_link
= ethtool_op_get_link
,
2320 .get_ts_info
= ethtool_op_get_ts_info
,
2324 static int __init
tun_init(void)
2328 pr_info("%s, %s\n", DRV_DESCRIPTION
, DRV_VERSION
);
2329 pr_info("%s\n", DRV_COPYRIGHT
);
2331 ret
= rtnl_link_register(&tun_link_ops
);
2333 pr_err("Can't register link_ops\n");
2337 ret
= misc_register(&tun_miscdev
);
2339 pr_err("Can't register misc device %d\n", TUN_MINOR
);
2344 rtnl_link_unregister(&tun_link_ops
);
2349 static void tun_cleanup(void)
2351 misc_deregister(&tun_miscdev
);
2352 rtnl_link_unregister(&tun_link_ops
);
2355 /* Get an underlying socket object from tun file. Returns error unless file is
2356 * attached to a device. The returned object works like a packet socket, it
2357 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
2358 * holding a reference to the file for as long as the socket is in use. */
2359 struct socket
*tun_get_socket(struct file
*file
)
2361 struct tun_file
*tfile
;
2362 if (file
->f_op
!= &tun_fops
)
2363 return ERR_PTR(-EINVAL
);
2364 tfile
= file
->private_data
;
2366 return ERR_PTR(-EBADFD
);
2367 return &tfile
->socket
;
2369 EXPORT_SYMBOL_GPL(tun_get_socket
);
2371 module_init(tun_init
);
2372 module_exit(tun_cleanup
);
2373 MODULE_DESCRIPTION(DRV_DESCRIPTION
);
2374 MODULE_AUTHOR(DRV_COPYRIGHT
);
2375 MODULE_LICENSE("GPL");
2376 MODULE_ALIAS_MISCDEV(TUN_MINOR
);
2377 MODULE_ALIAS("devname:net/tun");