2 * TUN - Universal TUN/TAP device driver.
3 * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
21 * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
22 * Add TUNSETLINK ioctl to set the link encapsulation
24 * Mark Smith <markzzzsmith@yahoo.com.au>
25 * Use eth_random_addr() for tap MAC address.
27 * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20
28 * Fixes in packet dropping, queue length setting and queue wakeup.
29 * Increased default tx queue length.
33 * Daniel Podlejski <underley@underley.eu.org>
34 * Modifications for 2.3.99-pre5 kernel.
37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39 #define DRV_NAME "tun"
40 #define DRV_VERSION "1.6"
41 #define DRV_DESCRIPTION "Universal TUN/TAP device driver"
42 #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
44 #include <linux/module.h>
45 #include <linux/errno.h>
46 #include <linux/kernel.h>
47 #include <linux/sched/signal.h>
48 #include <linux/major.h>
49 #include <linux/slab.h>
50 #include <linux/poll.h>
51 #include <linux/fcntl.h>
52 #include <linux/init.h>
53 #include <linux/skbuff.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/miscdevice.h>
57 #include <linux/ethtool.h>
58 #include <linux/rtnetlink.h>
59 #include <linux/compat.h>
61 #include <linux/if_arp.h>
62 #include <linux/if_ether.h>
63 #include <linux/if_tun.h>
64 #include <linux/if_vlan.h>
65 #include <linux/crc32.h>
66 #include <linux/nsproxy.h>
67 #include <linux/virtio_net.h>
68 #include <linux/rcupdate.h>
69 #include <net/net_namespace.h>
70 #include <net/netns/generic.h>
71 #include <net/rtnetlink.h>
74 #include <linux/seq_file.h>
75 #include <linux/uio.h>
76 #include <linux/skb_array.h>
77 #include <linux/bpf.h>
78 #include <linux/bpf_trace.h>
79 #include <linux/mutex.h>
81 #include <linux/uaccess.h>
82 #include <linux/proc_fs.h>
84 /* Uncomment to enable debugging */
85 /* #define TUN_DEBUG 1 */
90 #define tun_debug(level, tun, fmt, args...) \
93 netdev_printk(level, tun->dev, fmt, ##args); \
95 #define DBG1(level, fmt, args...) \
98 printk(level fmt, ##args); \
101 #define tun_debug(level, tun, fmt, args...) \
104 netdev_printk(level, tun->dev, fmt, ##args); \
106 #define DBG1(level, fmt, args...) \
109 printk(level fmt, ##args); \
113 #define TUN_HEADROOM 256
114 #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
116 /* TUN device flags */
118 /* IFF_ATTACH_QUEUE is never stored in device flags,
119 * overload it to mean fasync when stored there.
121 #define TUN_FASYNC IFF_ATTACH_QUEUE
122 /* High bits in flags field are unused. */
123 #define TUN_VNET_LE 0x80000000
124 #define TUN_VNET_BE 0x40000000
126 #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
127 IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
129 #define GOODCOPY_LEN 128
131 #define FLT_EXACT_COUNT 8
133 unsigned int count
; /* Number of addrs. Zero means disabled */
134 u32 mask
[2]; /* Mask of the hashed addrs */
135 unsigned char addr
[FLT_EXACT_COUNT
][ETH_ALEN
];
138 /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
139 * to max number of VCPUs in guest. */
140 #define MAX_TAP_QUEUES 256
141 #define MAX_TAP_FLOWS 4096
143 #define TUN_FLOW_EXPIRE (3 * HZ)
145 struct tun_pcpu_stats
{
150 struct u64_stats_sync syncp
;
156 /* A tun_file connects an open character device to a tuntap netdevice. It
157 * also contains all socket related structures (except sock_fprog and tap_filter)
158 * to serve as one transmit queue for tuntap device. The sock_fprog and
159 * tap_filter were kept in tun_struct since they were used for filtering for the
160 * netdevice not for a specific queue (at least I didn't see the requirement for
164 * The tun_file and tun_struct are loosely coupled, the pointer from one to the
165 * other can only be read while rcu_read_lock or rtnl_lock is held.
169 struct socket socket
;
171 struct tun_struct __rcu
*tun
;
172 struct fasync_struct
*fasync
;
173 /* only used for fasnyc */
177 unsigned int ifindex
;
179 struct napi_struct napi
;
181 struct mutex napi_mutex
; /* Protects access to the above napi */
182 struct list_head next
;
183 struct tun_struct
*detached
;
184 struct ptr_ring tx_ring
;
185 struct xdp_rxq_info xdp_rxq
;
188 struct tun_flow_entry
{
189 struct hlist_node hash_link
;
191 struct tun_struct
*tun
;
196 unsigned long updated
;
199 #define TUN_NUM_FLOW_ENTRIES 1024
203 struct bpf_prog
*prog
;
206 /* Since the socket were moved to tun_file, to preserve the behavior of persist
207 * device, socket filter, sndbuf and vnet header size were restore when the
208 * file were attached to a persist device.
211 struct tun_file __rcu
*tfiles
[MAX_TAP_QUEUES
];
212 unsigned int numqueues
;
217 struct net_device
*dev
;
218 netdev_features_t set_features
;
219 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
225 struct tap_filter txflt
;
226 struct sock_fprog fprog
;
227 /* protected by rtnl lock */
228 bool filter_attached
;
233 struct hlist_head flows
[TUN_NUM_FLOW_ENTRIES
];
234 struct timer_list flow_gc_timer
;
235 unsigned long ageing_time
;
236 unsigned int numdisabled
;
237 struct list_head disabled
;
241 struct tun_pcpu_stats __percpu
*pcpu_stats
;
242 struct bpf_prog __rcu
*xdp_prog
;
243 struct tun_prog __rcu
*steering_prog
;
244 struct tun_prog __rcu
*filter_prog
;
252 bool tun_is_xdp_frame(void *ptr
)
254 return (unsigned long)ptr
& TUN_XDP_FLAG
;
256 EXPORT_SYMBOL(tun_is_xdp_frame
);
258 void *tun_xdp_to_ptr(void *ptr
)
260 return (void *)((unsigned long)ptr
| TUN_XDP_FLAG
);
262 EXPORT_SYMBOL(tun_xdp_to_ptr
);
264 void *tun_ptr_to_xdp(void *ptr
)
266 return (void *)((unsigned long)ptr
& ~TUN_XDP_FLAG
);
268 EXPORT_SYMBOL(tun_ptr_to_xdp
);
270 static int tun_napi_receive(struct napi_struct
*napi
, int budget
)
272 struct tun_file
*tfile
= container_of(napi
, struct tun_file
, napi
);
273 struct sk_buff_head
*queue
= &tfile
->sk
.sk_write_queue
;
274 struct sk_buff_head process_queue
;
278 __skb_queue_head_init(&process_queue
);
280 spin_lock(&queue
->lock
);
281 skb_queue_splice_tail_init(queue
, &process_queue
);
282 spin_unlock(&queue
->lock
);
284 while (received
< budget
&& (skb
= __skb_dequeue(&process_queue
))) {
285 napi_gro_receive(napi
, skb
);
289 if (!skb_queue_empty(&process_queue
)) {
290 spin_lock(&queue
->lock
);
291 skb_queue_splice(&process_queue
, queue
);
292 spin_unlock(&queue
->lock
);
298 static int tun_napi_poll(struct napi_struct
*napi
, int budget
)
300 unsigned int received
;
302 received
= tun_napi_receive(napi
, budget
);
304 if (received
< budget
)
305 napi_complete_done(napi
, received
);
310 static void tun_napi_init(struct tun_struct
*tun
, struct tun_file
*tfile
,
313 tfile
->napi_enabled
= napi_en
;
315 netif_napi_add(tun
->dev
, &tfile
->napi
, tun_napi_poll
,
317 napi_enable(&tfile
->napi
);
318 mutex_init(&tfile
->napi_mutex
);
322 static void tun_napi_disable(struct tun_struct
*tun
, struct tun_file
*tfile
)
324 if (tfile
->napi_enabled
)
325 napi_disable(&tfile
->napi
);
328 static void tun_napi_del(struct tun_struct
*tun
, struct tun_file
*tfile
)
330 if (tfile
->napi_enabled
)
331 netif_napi_del(&tfile
->napi
);
334 static bool tun_napi_frags_enabled(const struct tun_struct
*tun
)
336 return READ_ONCE(tun
->flags
) & IFF_NAPI_FRAGS
;
339 #ifdef CONFIG_TUN_VNET_CROSS_LE
340 static inline bool tun_legacy_is_little_endian(struct tun_struct
*tun
)
342 return tun
->flags
& TUN_VNET_BE
? false :
343 virtio_legacy_is_little_endian();
346 static long tun_get_vnet_be(struct tun_struct
*tun
, int __user
*argp
)
348 int be
= !!(tun
->flags
& TUN_VNET_BE
);
350 if (put_user(be
, argp
))
356 static long tun_set_vnet_be(struct tun_struct
*tun
, int __user
*argp
)
360 if (get_user(be
, argp
))
364 tun
->flags
|= TUN_VNET_BE
;
366 tun
->flags
&= ~TUN_VNET_BE
;
371 static inline bool tun_legacy_is_little_endian(struct tun_struct
*tun
)
373 return virtio_legacy_is_little_endian();
376 static long tun_get_vnet_be(struct tun_struct
*tun
, int __user
*argp
)
381 static long tun_set_vnet_be(struct tun_struct
*tun
, int __user
*argp
)
385 #endif /* CONFIG_TUN_VNET_CROSS_LE */
387 static inline bool tun_is_little_endian(struct tun_struct
*tun
)
389 return tun
->flags
& TUN_VNET_LE
||
390 tun_legacy_is_little_endian(tun
);
393 static inline u16
tun16_to_cpu(struct tun_struct
*tun
, __virtio16 val
)
395 return __virtio16_to_cpu(tun_is_little_endian(tun
), val
);
398 static inline __virtio16
cpu_to_tun16(struct tun_struct
*tun
, u16 val
)
400 return __cpu_to_virtio16(tun_is_little_endian(tun
), val
);
403 static inline u32
tun_hashfn(u32 rxhash
)
405 return rxhash
& 0x3ff;
408 static struct tun_flow_entry
*tun_flow_find(struct hlist_head
*head
, u32 rxhash
)
410 struct tun_flow_entry
*e
;
412 hlist_for_each_entry_rcu(e
, head
, hash_link
) {
413 if (e
->rxhash
== rxhash
)
419 static struct tun_flow_entry
*tun_flow_create(struct tun_struct
*tun
,
420 struct hlist_head
*head
,
421 u32 rxhash
, u16 queue_index
)
423 struct tun_flow_entry
*e
= kmalloc(sizeof(*e
), GFP_ATOMIC
);
426 tun_debug(KERN_INFO
, tun
, "create flow: hash %u index %u\n",
427 rxhash
, queue_index
);
428 e
->updated
= jiffies
;
431 e
->queue_index
= queue_index
;
433 hlist_add_head_rcu(&e
->hash_link
, head
);
439 static void tun_flow_delete(struct tun_struct
*tun
, struct tun_flow_entry
*e
)
441 tun_debug(KERN_INFO
, tun
, "delete flow: hash %u index %u\n",
442 e
->rxhash
, e
->queue_index
);
443 hlist_del_rcu(&e
->hash_link
);
448 static void tun_flow_flush(struct tun_struct
*tun
)
452 spin_lock_bh(&tun
->lock
);
453 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++) {
454 struct tun_flow_entry
*e
;
455 struct hlist_node
*n
;
457 hlist_for_each_entry_safe(e
, n
, &tun
->flows
[i
], hash_link
)
458 tun_flow_delete(tun
, e
);
460 spin_unlock_bh(&tun
->lock
);
463 static void tun_flow_delete_by_queue(struct tun_struct
*tun
, u16 queue_index
)
467 spin_lock_bh(&tun
->lock
);
468 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++) {
469 struct tun_flow_entry
*e
;
470 struct hlist_node
*n
;
472 hlist_for_each_entry_safe(e
, n
, &tun
->flows
[i
], hash_link
) {
473 if (e
->queue_index
== queue_index
)
474 tun_flow_delete(tun
, e
);
477 spin_unlock_bh(&tun
->lock
);
480 static void tun_flow_cleanup(struct timer_list
*t
)
482 struct tun_struct
*tun
= from_timer(tun
, t
, flow_gc_timer
);
483 unsigned long delay
= tun
->ageing_time
;
484 unsigned long next_timer
= jiffies
+ delay
;
485 unsigned long count
= 0;
488 tun_debug(KERN_INFO
, tun
, "tun_flow_cleanup\n");
490 spin_lock(&tun
->lock
);
491 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++) {
492 struct tun_flow_entry
*e
;
493 struct hlist_node
*n
;
495 hlist_for_each_entry_safe(e
, n
, &tun
->flows
[i
], hash_link
) {
496 unsigned long this_timer
;
498 this_timer
= e
->updated
+ delay
;
499 if (time_before_eq(this_timer
, jiffies
)) {
500 tun_flow_delete(tun
, e
);
504 if (time_before(this_timer
, next_timer
))
505 next_timer
= this_timer
;
510 mod_timer(&tun
->flow_gc_timer
, round_jiffies_up(next_timer
));
511 spin_unlock(&tun
->lock
);
514 static void tun_flow_update(struct tun_struct
*tun
, u32 rxhash
,
515 struct tun_file
*tfile
)
517 struct hlist_head
*head
;
518 struct tun_flow_entry
*e
;
519 unsigned long delay
= tun
->ageing_time
;
520 u16 queue_index
= tfile
->queue_index
;
525 head
= &tun
->flows
[tun_hashfn(rxhash
)];
529 e
= tun_flow_find(head
, rxhash
);
531 /* TODO: keep queueing to old queue until it's empty? */
532 e
->queue_index
= queue_index
;
533 e
->updated
= jiffies
;
534 sock_rps_record_flow_hash(e
->rps_rxhash
);
536 spin_lock_bh(&tun
->lock
);
537 if (!tun_flow_find(head
, rxhash
) &&
538 tun
->flow_count
< MAX_TAP_FLOWS
)
539 tun_flow_create(tun
, head
, rxhash
, queue_index
);
541 if (!timer_pending(&tun
->flow_gc_timer
))
542 mod_timer(&tun
->flow_gc_timer
,
543 round_jiffies_up(jiffies
+ delay
));
544 spin_unlock_bh(&tun
->lock
);
551 * Save the hash received in the stack receive path and update the
552 * flow_hash table accordingly.
554 static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry
*e
, u32 hash
)
556 if (unlikely(e
->rps_rxhash
!= hash
))
557 e
->rps_rxhash
= hash
;
560 /* We try to identify a flow through its rxhash first. The reason that
561 * we do not check rxq no. is because some cards(e.g 82599), chooses
562 * the rxq based on the txq where the last packet of the flow comes. As
563 * the userspace application move between processors, we may get a
564 * different rxq no. here. If we could not get rxhash, then we would
565 * hope the rxq no. may help here.
567 static u16
tun_automq_select_queue(struct tun_struct
*tun
, struct sk_buff
*skb
)
569 struct tun_flow_entry
*e
;
573 numqueues
= READ_ONCE(tun
->numqueues
);
575 txq
= __skb_get_hash_symmetric(skb
);
577 e
= tun_flow_find(&tun
->flows
[tun_hashfn(txq
)], txq
);
579 tun_flow_save_rps_rxhash(e
, txq
);
580 txq
= e
->queue_index
;
582 /* use multiply and shift instead of expensive divide */
583 txq
= ((u64
)txq
* numqueues
) >> 32;
584 } else if (likely(skb_rx_queue_recorded(skb
))) {
585 txq
= skb_get_rx_queue(skb
);
586 while (unlikely(txq
>= numqueues
))
593 static u16
tun_ebpf_select_queue(struct tun_struct
*tun
, struct sk_buff
*skb
)
595 struct tun_prog
*prog
;
598 prog
= rcu_dereference(tun
->steering_prog
);
600 ret
= bpf_prog_run_clear_cb(prog
->prog
, skb
);
602 return ret
% tun
->numqueues
;
605 static u16
tun_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
606 void *accel_priv
, select_queue_fallback_t fallback
)
608 struct tun_struct
*tun
= netdev_priv(dev
);
612 if (rcu_dereference(tun
->steering_prog
))
613 ret
= tun_ebpf_select_queue(tun
, skb
);
615 ret
= tun_automq_select_queue(tun
, skb
);
621 static inline bool tun_not_capable(struct tun_struct
*tun
)
623 const struct cred
*cred
= current_cred();
624 struct net
*net
= dev_net(tun
->dev
);
626 return ((uid_valid(tun
->owner
) && !uid_eq(cred
->euid
, tun
->owner
)) ||
627 (gid_valid(tun
->group
) && !in_egroup_p(tun
->group
))) &&
628 !ns_capable(net
->user_ns
, CAP_NET_ADMIN
);
631 static void tun_set_real_num_queues(struct tun_struct
*tun
)
633 netif_set_real_num_tx_queues(tun
->dev
, tun
->numqueues
);
634 netif_set_real_num_rx_queues(tun
->dev
, tun
->numqueues
);
637 static void tun_disable_queue(struct tun_struct
*tun
, struct tun_file
*tfile
)
639 tfile
->detached
= tun
;
640 list_add_tail(&tfile
->next
, &tun
->disabled
);
644 static struct tun_struct
*tun_enable_queue(struct tun_file
*tfile
)
646 struct tun_struct
*tun
= tfile
->detached
;
648 tfile
->detached
= NULL
;
649 list_del_init(&tfile
->next
);
654 void tun_ptr_free(void *ptr
)
658 if (tun_is_xdp_frame(ptr
)) {
659 struct xdp_frame
*xdpf
= tun_ptr_to_xdp(ptr
);
661 xdp_return_frame(xdpf
);
663 __skb_array_destroy_skb(ptr
);
666 EXPORT_SYMBOL_GPL(tun_ptr_free
);
668 static void tun_queue_purge(struct tun_file
*tfile
)
672 while ((ptr
= ptr_ring_consume(&tfile
->tx_ring
)) != NULL
)
675 skb_queue_purge(&tfile
->sk
.sk_write_queue
);
676 skb_queue_purge(&tfile
->sk
.sk_error_queue
);
679 static void __tun_detach(struct tun_file
*tfile
, bool clean
)
681 struct tun_file
*ntfile
;
682 struct tun_struct
*tun
;
684 tun
= rtnl_dereference(tfile
->tun
);
687 tun_napi_disable(tun
, tfile
);
688 tun_napi_del(tun
, tfile
);
691 if (tun
&& !tfile
->detached
) {
692 u16 index
= tfile
->queue_index
;
693 BUG_ON(index
>= tun
->numqueues
);
695 rcu_assign_pointer(tun
->tfiles
[index
],
696 tun
->tfiles
[tun
->numqueues
- 1]);
697 ntfile
= rtnl_dereference(tun
->tfiles
[index
]);
698 ntfile
->queue_index
= index
;
702 RCU_INIT_POINTER(tfile
->tun
, NULL
);
703 sock_put(&tfile
->sk
);
705 tun_disable_queue(tun
, tfile
);
708 tun_flow_delete_by_queue(tun
, tun
->numqueues
+ 1);
709 /* Drop read queue */
710 tun_queue_purge(tfile
);
711 tun_set_real_num_queues(tun
);
712 } else if (tfile
->detached
&& clean
) {
713 tun
= tun_enable_queue(tfile
);
714 sock_put(&tfile
->sk
);
718 if (tun
&& tun
->numqueues
== 0 && tun
->numdisabled
== 0) {
719 netif_carrier_off(tun
->dev
);
721 if (!(tun
->flags
& IFF_PERSIST
) &&
722 tun
->dev
->reg_state
== NETREG_REGISTERED
)
723 unregister_netdevice(tun
->dev
);
726 xdp_rxq_info_unreg(&tfile
->xdp_rxq
);
727 ptr_ring_cleanup(&tfile
->tx_ring
, tun_ptr_free
);
728 sock_put(&tfile
->sk
);
732 static void tun_detach(struct tun_file
*tfile
, bool clean
)
734 struct tun_struct
*tun
;
735 struct net_device
*dev
;
738 tun
= rtnl_dereference(tfile
->tun
);
739 dev
= tun
? tun
->dev
: NULL
;
740 __tun_detach(tfile
, clean
);
742 netdev_state_change(dev
);
746 static void tun_detach_all(struct net_device
*dev
)
748 struct tun_struct
*tun
= netdev_priv(dev
);
749 struct tun_file
*tfile
, *tmp
;
750 int i
, n
= tun
->numqueues
;
752 for (i
= 0; i
< n
; i
++) {
753 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
755 tun_napi_disable(tun
, tfile
);
756 tfile
->socket
.sk
->sk_shutdown
= RCV_SHUTDOWN
;
757 tfile
->socket
.sk
->sk_data_ready(tfile
->socket
.sk
);
758 RCU_INIT_POINTER(tfile
->tun
, NULL
);
761 list_for_each_entry(tfile
, &tun
->disabled
, next
) {
762 tfile
->socket
.sk
->sk_shutdown
= RCV_SHUTDOWN
;
763 tfile
->socket
.sk
->sk_data_ready(tfile
->socket
.sk
);
764 RCU_INIT_POINTER(tfile
->tun
, NULL
);
766 BUG_ON(tun
->numqueues
!= 0);
769 for (i
= 0; i
< n
; i
++) {
770 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
771 tun_napi_del(tun
, tfile
);
772 /* Drop read queue */
773 tun_queue_purge(tfile
);
774 xdp_rxq_info_unreg(&tfile
->xdp_rxq
);
775 sock_put(&tfile
->sk
);
777 list_for_each_entry_safe(tfile
, tmp
, &tun
->disabled
, next
) {
778 tun_enable_queue(tfile
);
779 tun_queue_purge(tfile
);
780 xdp_rxq_info_unreg(&tfile
->xdp_rxq
);
781 sock_put(&tfile
->sk
);
783 BUG_ON(tun
->numdisabled
!= 0);
785 if (tun
->flags
& IFF_PERSIST
)
786 module_put(THIS_MODULE
);
789 static int tun_attach(struct tun_struct
*tun
, struct file
*file
,
790 bool skip_filter
, bool napi
)
792 struct tun_file
*tfile
= file
->private_data
;
793 struct net_device
*dev
= tun
->dev
;
796 err
= security_tun_dev_attach(tfile
->socket
.sk
, tun
->security
);
801 if (rtnl_dereference(tfile
->tun
) && !tfile
->detached
)
805 if (!(tun
->flags
& IFF_MULTI_QUEUE
) && tun
->numqueues
== 1)
809 if (!tfile
->detached
&&
810 tun
->numqueues
+ tun
->numdisabled
== MAX_TAP_QUEUES
)
815 /* Re-attach the filter to persist device */
816 if (!skip_filter
&& (tun
->filter_attached
== true)) {
817 lock_sock(tfile
->socket
.sk
);
818 err
= sk_attach_filter(&tun
->fprog
, tfile
->socket
.sk
);
819 release_sock(tfile
->socket
.sk
);
824 if (!tfile
->detached
&&
825 ptr_ring_resize(&tfile
->tx_ring
, dev
->tx_queue_len
,
826 GFP_KERNEL
, tun_ptr_free
)) {
831 tfile
->queue_index
= tun
->numqueues
;
832 tfile
->socket
.sk
->sk_shutdown
&= ~RCV_SHUTDOWN
;
834 if (tfile
->detached
) {
835 /* Re-attach detached tfile, updating XDP queue_index */
836 WARN_ON(!xdp_rxq_info_is_reg(&tfile
->xdp_rxq
));
838 if (tfile
->xdp_rxq
.queue_index
!= tfile
->queue_index
)
839 tfile
->xdp_rxq
.queue_index
= tfile
->queue_index
;
841 /* Setup XDP RX-queue info, for new tfile getting attached */
842 err
= xdp_rxq_info_reg(&tfile
->xdp_rxq
,
843 tun
->dev
, tfile
->queue_index
);
846 err
= xdp_rxq_info_reg_mem_model(&tfile
->xdp_rxq
,
847 MEM_TYPE_PAGE_SHARED
, NULL
);
849 xdp_rxq_info_unreg(&tfile
->xdp_rxq
);
855 rcu_assign_pointer(tfile
->tun
, tun
);
856 rcu_assign_pointer(tun
->tfiles
[tun
->numqueues
], tfile
);
859 if (tfile
->detached
) {
860 tun_enable_queue(tfile
);
862 sock_hold(&tfile
->sk
);
863 tun_napi_init(tun
, tfile
, napi
);
866 tun_set_real_num_queues(tun
);
868 /* device is allowed to go away first, so no need to hold extra
876 static struct tun_struct
*tun_get(struct tun_file
*tfile
)
878 struct tun_struct
*tun
;
881 tun
= rcu_dereference(tfile
->tun
);
889 static void tun_put(struct tun_struct
*tun
)
895 static void addr_hash_set(u32
*mask
, const u8
*addr
)
897 int n
= ether_crc(ETH_ALEN
, addr
) >> 26;
898 mask
[n
>> 5] |= (1 << (n
& 31));
901 static unsigned int addr_hash_test(const u32
*mask
, const u8
*addr
)
903 int n
= ether_crc(ETH_ALEN
, addr
) >> 26;
904 return mask
[n
>> 5] & (1 << (n
& 31));
907 static int update_filter(struct tap_filter
*filter
, void __user
*arg
)
909 struct { u8 u
[ETH_ALEN
]; } *addr
;
910 struct tun_filter uf
;
911 int err
, alen
, n
, nexact
;
913 if (copy_from_user(&uf
, arg
, sizeof(uf
)))
922 alen
= ETH_ALEN
* uf
.count
;
923 addr
= memdup_user(arg
+ sizeof(uf
), alen
);
925 return PTR_ERR(addr
);
927 /* The filter is updated without holding any locks. Which is
928 * perfectly safe. We disable it first and in the worst
929 * case we'll accept a few undesired packets. */
933 /* Use first set of addresses as an exact filter */
934 for (n
= 0; n
< uf
.count
&& n
< FLT_EXACT_COUNT
; n
++)
935 memcpy(filter
->addr
[n
], addr
[n
].u
, ETH_ALEN
);
939 /* Remaining multicast addresses are hashed,
940 * unicast will leave the filter disabled. */
941 memset(filter
->mask
, 0, sizeof(filter
->mask
));
942 for (; n
< uf
.count
; n
++) {
943 if (!is_multicast_ether_addr(addr
[n
].u
)) {
944 err
= 0; /* no filter */
947 addr_hash_set(filter
->mask
, addr
[n
].u
);
950 /* For ALLMULTI just set the mask to all ones.
951 * This overrides the mask populated above. */
952 if ((uf
.flags
& TUN_FLT_ALLMULTI
))
953 memset(filter
->mask
, ~0, sizeof(filter
->mask
));
955 /* Now enable the filter */
957 filter
->count
= nexact
;
959 /* Return the number of exact filters */
966 /* Returns: 0 - drop, !=0 - accept */
967 static int run_filter(struct tap_filter
*filter
, const struct sk_buff
*skb
)
969 /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
971 struct ethhdr
*eh
= (struct ethhdr
*) skb
->data
;
975 for (i
= 0; i
< filter
->count
; i
++)
976 if (ether_addr_equal(eh
->h_dest
, filter
->addr
[i
]))
979 /* Inexact match (multicast only) */
980 if (is_multicast_ether_addr(eh
->h_dest
))
981 return addr_hash_test(filter
->mask
, eh
->h_dest
);
987 * Checks whether the packet is accepted or not.
988 * Returns: 0 - drop, !=0 - accept
990 static int check_filter(struct tap_filter
*filter
, const struct sk_buff
*skb
)
995 return run_filter(filter
, skb
);
998 /* Network device part of the driver */
1000 static const struct ethtool_ops tun_ethtool_ops
;
1002 /* Net device detach from fd. */
1003 static void tun_net_uninit(struct net_device
*dev
)
1005 tun_detach_all(dev
);
1008 /* Net device open. */
1009 static int tun_net_open(struct net_device
*dev
)
1011 struct tun_struct
*tun
= netdev_priv(dev
);
1014 netif_tx_start_all_queues(dev
);
1016 for (i
= 0; i
< tun
->numqueues
; i
++) {
1017 struct tun_file
*tfile
;
1019 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
1020 tfile
->socket
.sk
->sk_write_space(tfile
->socket
.sk
);
1026 /* Net device close. */
1027 static int tun_net_close(struct net_device
*dev
)
1029 netif_tx_stop_all_queues(dev
);
1033 /* Net device start xmit */
1034 static void tun_automq_xmit(struct tun_struct
*tun
, struct sk_buff
*skb
)
1037 if (tun
->numqueues
== 1 && static_key_false(&rps_needed
)) {
1038 /* Select queue was not called for the skbuff, so we extract the
1039 * RPS hash and save it into the flow_table here.
1043 rxhash
= __skb_get_hash_symmetric(skb
);
1045 struct tun_flow_entry
*e
;
1046 e
= tun_flow_find(&tun
->flows
[tun_hashfn(rxhash
)],
1049 tun_flow_save_rps_rxhash(e
, rxhash
);
1055 static unsigned int run_ebpf_filter(struct tun_struct
*tun
,
1056 struct sk_buff
*skb
,
1059 struct tun_prog
*prog
= rcu_dereference(tun
->filter_prog
);
1062 len
= bpf_prog_run_clear_cb(prog
->prog
, skb
);
1067 /* Net device start xmit */
1068 static netdev_tx_t
tun_net_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1070 struct tun_struct
*tun
= netdev_priv(dev
);
1071 int txq
= skb
->queue_mapping
;
1072 struct tun_file
*tfile
;
1076 tfile
= rcu_dereference(tun
->tfiles
[txq
]);
1078 /* Drop packet if interface is not attached */
1079 if (txq
>= tun
->numqueues
)
1082 if (!rcu_dereference(tun
->steering_prog
))
1083 tun_automq_xmit(tun
, skb
);
1085 tun_debug(KERN_INFO
, tun
, "tun_net_xmit %d\n", skb
->len
);
1089 /* Drop if the filter does not like it.
1090 * This is a noop if the filter is disabled.
1091 * Filter can be enabled only for the TAP devices. */
1092 if (!check_filter(&tun
->txflt
, skb
))
1095 if (tfile
->socket
.sk
->sk_filter
&&
1096 sk_filter(tfile
->socket
.sk
, skb
))
1099 len
= run_ebpf_filter(tun
, skb
, len
);
1100 if (len
== 0 || pskb_trim(skb
, len
))
1103 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
1106 skb_tx_timestamp(skb
);
1108 /* Orphan the skb - required as we might hang on to it
1109 * for indefinite time.
1115 if (ptr_ring_produce(&tfile
->tx_ring
, skb
))
1118 /* Notify and wake up reader process */
1119 if (tfile
->flags
& TUN_FASYNC
)
1120 kill_fasync(&tfile
->fasync
, SIGIO
, POLL_IN
);
1121 tfile
->socket
.sk
->sk_data_ready(tfile
->socket
.sk
);
1124 return NETDEV_TX_OK
;
1127 this_cpu_inc(tun
->pcpu_stats
->tx_dropped
);
1131 return NET_XMIT_DROP
;
1134 static void tun_net_mclist(struct net_device
*dev
)
1137 * This callback is supposed to deal with mc filter in
1138 * _rx_ path and has nothing to do with the _tx_ path.
1139 * In rx path we always accept everything userspace gives us.
1143 static netdev_features_t
tun_net_fix_features(struct net_device
*dev
,
1144 netdev_features_t features
)
1146 struct tun_struct
*tun
= netdev_priv(dev
);
1148 return (features
& tun
->set_features
) | (features
& ~TUN_USER_FEATURES
);
1150 #ifdef CONFIG_NET_POLL_CONTROLLER
1151 static void tun_poll_controller(struct net_device
*dev
)
1154 * Tun only receives frames when:
1155 * 1) the char device endpoint gets data from user space
1156 * 2) the tun socket gets a sendmsg call from user space
1157 * If NAPI is not enabled, since both of those are synchronous
1158 * operations, we are guaranteed never to have pending data when we poll
1159 * for it so there is nothing to do here but return.
1160 * We need this though so netpoll recognizes us as an interface that
1161 * supports polling, which enables bridge devices in virt setups to
1162 * still use netconsole
1163 * If NAPI is enabled, however, we need to schedule polling for all
1164 * queues unless we are using napi_gro_frags(), which we call in
1165 * process context and not in NAPI context.
1167 struct tun_struct
*tun
= netdev_priv(dev
);
1169 if (tun
->flags
& IFF_NAPI
) {
1170 struct tun_file
*tfile
;
1173 if (tun_napi_frags_enabled(tun
))
1177 for (i
= 0; i
< tun
->numqueues
; i
++) {
1178 tfile
= rcu_dereference(tun
->tfiles
[i
]);
1179 if (tfile
->napi_enabled
)
1180 napi_schedule(&tfile
->napi
);
1188 static void tun_set_headroom(struct net_device
*dev
, int new_hr
)
1190 struct tun_struct
*tun
= netdev_priv(dev
);
1192 if (new_hr
< NET_SKB_PAD
)
1193 new_hr
= NET_SKB_PAD
;
1195 tun
->align
= new_hr
;
1199 tun_net_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
1201 u32 rx_dropped
= 0, tx_dropped
= 0, rx_frame_errors
= 0;
1202 struct tun_struct
*tun
= netdev_priv(dev
);
1203 struct tun_pcpu_stats
*p
;
1206 for_each_possible_cpu(i
) {
1207 u64 rxpackets
, rxbytes
, txpackets
, txbytes
;
1210 p
= per_cpu_ptr(tun
->pcpu_stats
, i
);
1212 start
= u64_stats_fetch_begin(&p
->syncp
);
1213 rxpackets
= p
->rx_packets
;
1214 rxbytes
= p
->rx_bytes
;
1215 txpackets
= p
->tx_packets
;
1216 txbytes
= p
->tx_bytes
;
1217 } while (u64_stats_fetch_retry(&p
->syncp
, start
));
1219 stats
->rx_packets
+= rxpackets
;
1220 stats
->rx_bytes
+= rxbytes
;
1221 stats
->tx_packets
+= txpackets
;
1222 stats
->tx_bytes
+= txbytes
;
1225 rx_dropped
+= p
->rx_dropped
;
1226 rx_frame_errors
+= p
->rx_frame_errors
;
1227 tx_dropped
+= p
->tx_dropped
;
1229 stats
->rx_dropped
= rx_dropped
;
1230 stats
->rx_frame_errors
= rx_frame_errors
;
1231 stats
->tx_dropped
= tx_dropped
;
1234 static int tun_xdp_set(struct net_device
*dev
, struct bpf_prog
*prog
,
1235 struct netlink_ext_ack
*extack
)
1237 struct tun_struct
*tun
= netdev_priv(dev
);
1238 struct bpf_prog
*old_prog
;
1240 old_prog
= rtnl_dereference(tun
->xdp_prog
);
1241 rcu_assign_pointer(tun
->xdp_prog
, prog
);
1243 bpf_prog_put(old_prog
);
1248 static u32
tun_xdp_query(struct net_device
*dev
)
1250 struct tun_struct
*tun
= netdev_priv(dev
);
1251 const struct bpf_prog
*xdp_prog
;
1253 xdp_prog
= rtnl_dereference(tun
->xdp_prog
);
1255 return xdp_prog
->aux
->id
;
1260 static int tun_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
1262 switch (xdp
->command
) {
1263 case XDP_SETUP_PROG
:
1264 return tun_xdp_set(dev
, xdp
->prog
, xdp
->extack
);
1265 case XDP_QUERY_PROG
:
1266 xdp
->prog_id
= tun_xdp_query(dev
);
1267 xdp
->prog_attached
= !!xdp
->prog_id
;
1274 static const struct net_device_ops tun_netdev_ops
= {
1275 .ndo_uninit
= tun_net_uninit
,
1276 .ndo_open
= tun_net_open
,
1277 .ndo_stop
= tun_net_close
,
1278 .ndo_start_xmit
= tun_net_xmit
,
1279 .ndo_fix_features
= tun_net_fix_features
,
1280 .ndo_select_queue
= tun_select_queue
,
1281 #ifdef CONFIG_NET_POLL_CONTROLLER
1282 .ndo_poll_controller
= tun_poll_controller
,
1284 .ndo_set_rx_headroom
= tun_set_headroom
,
1285 .ndo_get_stats64
= tun_net_get_stats64
,
1288 static int tun_xdp_xmit(struct net_device
*dev
, int n
,
1289 struct xdp_frame
**frames
, u32 flags
)
1291 struct tun_struct
*tun
= netdev_priv(dev
);
1292 struct tun_file
*tfile
;
1298 if (unlikely(flags
& ~XDP_XMIT_FLAGS_NONE
))
1303 numqueues
= READ_ONCE(tun
->numqueues
);
1306 return -ENXIO
; /* Caller will free/return all frames */
1309 tfile
= rcu_dereference(tun
->tfiles
[smp_processor_id() %
1312 spin_lock(&tfile
->tx_ring
.producer_lock
);
1313 for (i
= 0; i
< n
; i
++) {
1314 struct xdp_frame
*xdp
= frames
[i
];
1315 /* Encode the XDP flag into lowest bit for consumer to differ
1316 * XDP buffer from sk_buff.
1318 void *frame
= tun_xdp_to_ptr(xdp
);
1320 if (__ptr_ring_produce(&tfile
->tx_ring
, frame
)) {
1321 this_cpu_inc(tun
->pcpu_stats
->tx_dropped
);
1322 xdp_return_frame_rx_napi(xdp
);
1326 spin_unlock(&tfile
->tx_ring
.producer_lock
);
1332 static int tun_xdp_tx(struct net_device
*dev
, struct xdp_buff
*xdp
)
1334 struct xdp_frame
*frame
= convert_to_xdp_frame(xdp
);
1336 if (unlikely(!frame
))
1339 return tun_xdp_xmit(dev
, 1, &frame
, 0);
1342 static void tun_xdp_flush(struct net_device
*dev
)
1344 struct tun_struct
*tun
= netdev_priv(dev
);
1345 struct tun_file
*tfile
;
1350 numqueues
= READ_ONCE(tun
->numqueues
);
1354 tfile
= rcu_dereference(tun
->tfiles
[smp_processor_id() %
1356 /* Notify and wake up reader process */
1357 if (tfile
->flags
& TUN_FASYNC
)
1358 kill_fasync(&tfile
->fasync
, SIGIO
, POLL_IN
);
1359 tfile
->socket
.sk
->sk_data_ready(tfile
->socket
.sk
);
1365 static const struct net_device_ops tap_netdev_ops
= {
1366 .ndo_uninit
= tun_net_uninit
,
1367 .ndo_open
= tun_net_open
,
1368 .ndo_stop
= tun_net_close
,
1369 .ndo_start_xmit
= tun_net_xmit
,
1370 .ndo_fix_features
= tun_net_fix_features
,
1371 .ndo_set_rx_mode
= tun_net_mclist
,
1372 .ndo_set_mac_address
= eth_mac_addr
,
1373 .ndo_validate_addr
= eth_validate_addr
,
1374 .ndo_select_queue
= tun_select_queue
,
1375 #ifdef CONFIG_NET_POLL_CONTROLLER
1376 .ndo_poll_controller
= tun_poll_controller
,
1378 .ndo_features_check
= passthru_features_check
,
1379 .ndo_set_rx_headroom
= tun_set_headroom
,
1380 .ndo_get_stats64
= tun_net_get_stats64
,
1382 .ndo_xdp_xmit
= tun_xdp_xmit
,
1383 .ndo_xdp_flush
= tun_xdp_flush
,
1386 static void tun_flow_init(struct tun_struct
*tun
)
1390 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++)
1391 INIT_HLIST_HEAD(&tun
->flows
[i
]);
1393 tun
->ageing_time
= TUN_FLOW_EXPIRE
;
1394 timer_setup(&tun
->flow_gc_timer
, tun_flow_cleanup
, 0);
1395 mod_timer(&tun
->flow_gc_timer
,
1396 round_jiffies_up(jiffies
+ tun
->ageing_time
));
1399 static void tun_flow_uninit(struct tun_struct
*tun
)
1401 del_timer_sync(&tun
->flow_gc_timer
);
1402 tun_flow_flush(tun
);
1406 #define MAX_MTU 65535
1408 /* Initialize net device. */
1409 static void tun_net_init(struct net_device
*dev
)
1411 struct tun_struct
*tun
= netdev_priv(dev
);
1413 switch (tun
->flags
& TUN_TYPE_MASK
) {
1415 dev
->netdev_ops
= &tun_netdev_ops
;
1417 /* Point-to-Point TUN Device */
1418 dev
->hard_header_len
= 0;
1422 /* Zero header length */
1423 dev
->type
= ARPHRD_NONE
;
1424 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
| IFF_MULTICAST
;
1428 dev
->netdev_ops
= &tap_netdev_ops
;
1429 /* Ethernet TAP Device */
1431 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
1432 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1434 eth_hw_addr_random(dev
);
1439 dev
->min_mtu
= MIN_MTU
;
1440 dev
->max_mtu
= MAX_MTU
- dev
->hard_header_len
;
1443 static bool tun_sock_writeable(struct tun_struct
*tun
, struct tun_file
*tfile
)
1445 struct sock
*sk
= tfile
->socket
.sk
;
1447 return (tun
->dev
->flags
& IFF_UP
) && sock_writeable(sk
);
1450 /* Character device part */
1453 static __poll_t
tun_chr_poll(struct file
*file
, poll_table
*wait
)
1455 struct tun_file
*tfile
= file
->private_data
;
1456 struct tun_struct
*tun
= tun_get(tfile
);
1463 sk
= tfile
->socket
.sk
;
1465 tun_debug(KERN_INFO
, tun
, "tun_chr_poll\n");
1467 poll_wait(file
, sk_sleep(sk
), wait
);
1469 if (!ptr_ring_empty(&tfile
->tx_ring
))
1470 mask
|= EPOLLIN
| EPOLLRDNORM
;
1472 /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
1473 * guarantee EPOLLOUT to be raised by either here or
1474 * tun_sock_write_space(). Then process could get notification
1475 * after it writes to a down device and meets -EIO.
1477 if (tun_sock_writeable(tun
, tfile
) ||
1478 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
) &&
1479 tun_sock_writeable(tun
, tfile
)))
1480 mask
|= EPOLLOUT
| EPOLLWRNORM
;
1482 if (tun
->dev
->reg_state
!= NETREG_REGISTERED
)
1489 static struct sk_buff
*tun_napi_alloc_frags(struct tun_file
*tfile
,
1491 const struct iov_iter
*it
)
1493 struct sk_buff
*skb
;
1498 if (it
->nr_segs
> MAX_SKB_FRAGS
+ 1)
1499 return ERR_PTR(-ENOMEM
);
1502 skb
= napi_get_frags(&tfile
->napi
);
1505 return ERR_PTR(-ENOMEM
);
1507 linear
= iov_iter_single_seg_count(it
);
1508 err
= __skb_grow(skb
, linear
);
1513 skb
->data_len
= len
- linear
;
1514 skb
->truesize
+= skb
->data_len
;
1516 for (i
= 1; i
< it
->nr_segs
; i
++) {
1517 struct page_frag
*pfrag
= ¤t
->task_frag
;
1518 size_t fragsz
= it
->iov
[i
].iov_len
;
1520 if (fragsz
== 0 || fragsz
> PAGE_SIZE
) {
1525 if (!skb_page_frag_refill(fragsz
, pfrag
, GFP_KERNEL
)) {
1530 skb_fill_page_desc(skb
, i
- 1, pfrag
->page
,
1531 pfrag
->offset
, fragsz
);
1532 page_ref_inc(pfrag
->page
);
1533 pfrag
->offset
+= fragsz
;
1538 /* frees skb and all frags allocated with napi_alloc_frag() */
1539 napi_free_frags(&tfile
->napi
);
1540 return ERR_PTR(err
);
1543 /* prepad is the amount to reserve at front. len is length after that.
1544 * linear is a hint as to how much to copy (usually headers). */
1545 static struct sk_buff
*tun_alloc_skb(struct tun_file
*tfile
,
1546 size_t prepad
, size_t len
,
1547 size_t linear
, int noblock
)
1549 struct sock
*sk
= tfile
->socket
.sk
;
1550 struct sk_buff
*skb
;
1553 /* Under a page? Don't bother with paged skb. */
1554 if (prepad
+ len
< PAGE_SIZE
|| !linear
)
1557 skb
= sock_alloc_send_pskb(sk
, prepad
+ linear
, len
- linear
, noblock
,
1560 return ERR_PTR(err
);
1562 skb_reserve(skb
, prepad
);
1563 skb_put(skb
, linear
);
1564 skb
->data_len
= len
- linear
;
1565 skb
->len
+= len
- linear
;
1570 static void tun_rx_batched(struct tun_struct
*tun
, struct tun_file
*tfile
,
1571 struct sk_buff
*skb
, int more
)
1573 struct sk_buff_head
*queue
= &tfile
->sk
.sk_write_queue
;
1574 struct sk_buff_head process_queue
;
1575 u32 rx_batched
= tun
->rx_batched
;
1578 if (!rx_batched
|| (!more
&& skb_queue_empty(queue
))) {
1580 netif_receive_skb(skb
);
1585 spin_lock(&queue
->lock
);
1586 if (!more
|| skb_queue_len(queue
) == rx_batched
) {
1587 __skb_queue_head_init(&process_queue
);
1588 skb_queue_splice_tail_init(queue
, &process_queue
);
1591 __skb_queue_tail(queue
, skb
);
1593 spin_unlock(&queue
->lock
);
1596 struct sk_buff
*nskb
;
1599 while ((nskb
= __skb_dequeue(&process_queue
)))
1600 netif_receive_skb(nskb
);
1601 netif_receive_skb(skb
);
1606 static bool tun_can_build_skb(struct tun_struct
*tun
, struct tun_file
*tfile
,
1607 int len
, int noblock
, bool zerocopy
)
1609 if ((tun
->flags
& TUN_TYPE_MASK
) != IFF_TAP
)
1612 if (tfile
->socket
.sk
->sk_sndbuf
!= INT_MAX
)
1621 if (SKB_DATA_ALIGN(len
+ TUN_RX_PAD
) +
1622 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)) > PAGE_SIZE
)
1628 static struct sk_buff
*tun_build_skb(struct tun_struct
*tun
,
1629 struct tun_file
*tfile
,
1630 struct iov_iter
*from
,
1631 struct virtio_net_hdr
*hdr
,
1632 int len
, int *skb_xdp
)
1634 struct page_frag
*alloc_frag
= ¤t
->task_frag
;
1635 struct sk_buff
*skb
;
1636 struct bpf_prog
*xdp_prog
;
1637 int buflen
= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
1638 unsigned int delta
= 0;
1641 int err
, pad
= TUN_RX_PAD
;
1644 xdp_prog
= rcu_dereference(tun
->xdp_prog
);
1646 pad
+= TUN_HEADROOM
;
1647 buflen
+= SKB_DATA_ALIGN(len
+ pad
);
1650 alloc_frag
->offset
= ALIGN((u64
)alloc_frag
->offset
, SMP_CACHE_BYTES
);
1651 if (unlikely(!skb_page_frag_refill(buflen
, alloc_frag
, GFP_KERNEL
)))
1652 return ERR_PTR(-ENOMEM
);
1654 buf
= (char *)page_address(alloc_frag
->page
) + alloc_frag
->offset
;
1655 copied
= copy_page_from_iter(alloc_frag
->page
,
1656 alloc_frag
->offset
+ pad
,
1659 return ERR_PTR(-EFAULT
);
1661 /* There's a small window that XDP may be set after the check
1662 * of xdp_prog above, this should be rare and for simplicity
1663 * we do XDP on skb in case the headroom is not enough.
1665 if (hdr
->gso_type
|| !xdp_prog
)
1672 xdp_prog
= rcu_dereference(tun
->xdp_prog
);
1673 if (xdp_prog
&& !*skb_xdp
) {
1674 struct xdp_buff xdp
;
1678 xdp
.data_hard_start
= buf
;
1679 xdp
.data
= buf
+ pad
;
1680 xdp_set_data_meta_invalid(&xdp
);
1681 xdp
.data_end
= xdp
.data
+ len
;
1682 xdp
.rxq
= &tfile
->xdp_rxq
;
1683 orig_data
= xdp
.data
;
1684 act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
1688 get_page(alloc_frag
->page
);
1689 alloc_frag
->offset
+= buflen
;
1690 err
= xdp_do_redirect(tun
->dev
, &xdp
, xdp_prog
);
1698 get_page(alloc_frag
->page
);
1699 alloc_frag
->offset
+= buflen
;
1700 if (tun_xdp_tx(tun
->dev
, &xdp
))
1702 tun_xdp_flush(tun
->dev
);
1707 delta
= orig_data
- xdp
.data
;
1708 len
= xdp
.data_end
- xdp
.data
;
1711 bpf_warn_invalid_xdp_action(act
);
1714 trace_xdp_exception(tun
->dev
, xdp_prog
, act
);
1721 skb
= build_skb(buf
, buflen
);
1725 return ERR_PTR(-ENOMEM
);
1728 skb_reserve(skb
, pad
- delta
);
1730 get_page(alloc_frag
->page
);
1731 alloc_frag
->offset
+= buflen
;
1739 put_page(alloc_frag
->page
);
1743 this_cpu_inc(tun
->pcpu_stats
->rx_dropped
);
1747 /* Get packet from user space buffer */
1748 static ssize_t
tun_get_user(struct tun_struct
*tun
, struct tun_file
*tfile
,
1749 void *msg_control
, struct iov_iter
*from
,
1750 int noblock
, bool more
)
1752 struct tun_pi pi
= { 0, cpu_to_be16(ETH_P_IP
) };
1753 struct sk_buff
*skb
;
1754 size_t total_len
= iov_iter_count(from
);
1755 size_t len
= total_len
, align
= tun
->align
, linear
;
1756 struct virtio_net_hdr gso
= { 0 };
1757 struct tun_pcpu_stats
*stats
;
1760 bool zerocopy
= false;
1764 bool frags
= tun_napi_frags_enabled(tun
);
1766 if (!(tun
->dev
->flags
& IFF_UP
))
1769 if (!(tun
->flags
& IFF_NO_PI
)) {
1770 if (len
< sizeof(pi
))
1774 if (!copy_from_iter_full(&pi
, sizeof(pi
), from
))
1778 if (tun
->flags
& IFF_VNET_HDR
) {
1779 int vnet_hdr_sz
= READ_ONCE(tun
->vnet_hdr_sz
);
1781 if (len
< vnet_hdr_sz
)
1785 if (!copy_from_iter_full(&gso
, sizeof(gso
), from
))
1788 if ((gso
.flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) &&
1789 tun16_to_cpu(tun
, gso
.csum_start
) + tun16_to_cpu(tun
, gso
.csum_offset
) + 2 > tun16_to_cpu(tun
, gso
.hdr_len
))
1790 gso
.hdr_len
= cpu_to_tun16(tun
, tun16_to_cpu(tun
, gso
.csum_start
) + tun16_to_cpu(tun
, gso
.csum_offset
) + 2);
1792 if (tun16_to_cpu(tun
, gso
.hdr_len
) > len
)
1794 iov_iter_advance(from
, vnet_hdr_sz
- sizeof(gso
));
1797 if ((tun
->flags
& TUN_TYPE_MASK
) == IFF_TAP
) {
1798 align
+= NET_IP_ALIGN
;
1799 if (unlikely(len
< ETH_HLEN
||
1800 (gso
.hdr_len
&& tun16_to_cpu(tun
, gso
.hdr_len
) < ETH_HLEN
)))
1804 good_linear
= SKB_MAX_HEAD(align
);
1807 struct iov_iter i
= *from
;
1809 /* There are 256 bytes to be copied in skb, so there is
1810 * enough room for skb expand head in case it is used.
1811 * The rest of the buffer is mapped from userspace.
1813 copylen
= gso
.hdr_len
? tun16_to_cpu(tun
, gso
.hdr_len
) : GOODCOPY_LEN
;
1814 if (copylen
> good_linear
)
1815 copylen
= good_linear
;
1817 iov_iter_advance(&i
, copylen
);
1818 if (iov_iter_npages(&i
, INT_MAX
) <= MAX_SKB_FRAGS
)
1822 if (!frags
&& tun_can_build_skb(tun
, tfile
, len
, noblock
, zerocopy
)) {
1823 /* For the packet that is not easy to be processed
1824 * (e.g gso or jumbo packet), we will do it at after
1825 * skb was created with generic XDP routine.
1827 skb
= tun_build_skb(tun
, tfile
, from
, &gso
, len
, &skb_xdp
);
1829 this_cpu_inc(tun
->pcpu_stats
->rx_dropped
);
1830 return PTR_ERR(skb
);
1837 if (tun16_to_cpu(tun
, gso
.hdr_len
) > good_linear
)
1838 linear
= good_linear
;
1840 linear
= tun16_to_cpu(tun
, gso
.hdr_len
);
1844 mutex_lock(&tfile
->napi_mutex
);
1845 skb
= tun_napi_alloc_frags(tfile
, copylen
, from
);
1846 /* tun_napi_alloc_frags() enforces a layout for the skb.
1847 * If zerocopy is enabled, then this layout will be
1848 * overwritten by zerocopy_sg_from_iter().
1852 skb
= tun_alloc_skb(tfile
, align
, copylen
, linear
,
1857 if (PTR_ERR(skb
) != -EAGAIN
)
1858 this_cpu_inc(tun
->pcpu_stats
->rx_dropped
);
1860 mutex_unlock(&tfile
->napi_mutex
);
1861 return PTR_ERR(skb
);
1865 err
= zerocopy_sg_from_iter(skb
, from
);
1867 err
= skb_copy_datagram_from_iter(skb
, 0, from
, len
);
1870 this_cpu_inc(tun
->pcpu_stats
->rx_dropped
);
1873 tfile
->napi
.skb
= NULL
;
1874 mutex_unlock(&tfile
->napi_mutex
);
1881 if (virtio_net_hdr_to_skb(skb
, &gso
, tun_is_little_endian(tun
))) {
1882 this_cpu_inc(tun
->pcpu_stats
->rx_frame_errors
);
1885 tfile
->napi
.skb
= NULL
;
1886 mutex_unlock(&tfile
->napi_mutex
);
1892 switch (tun
->flags
& TUN_TYPE_MASK
) {
1894 if (tun
->flags
& IFF_NO_PI
) {
1895 u8 ip_version
= skb
->len
? (skb
->data
[0] >> 4) : 0;
1897 switch (ip_version
) {
1899 pi
.proto
= htons(ETH_P_IP
);
1902 pi
.proto
= htons(ETH_P_IPV6
);
1905 this_cpu_inc(tun
->pcpu_stats
->rx_dropped
);
1911 skb_reset_mac_header(skb
);
1912 skb
->protocol
= pi
.proto
;
1913 skb
->dev
= tun
->dev
;
1917 skb
->protocol
= eth_type_trans(skb
, tun
->dev
);
1921 /* copy skb_ubuf_info for callback when skb has no error */
1923 skb_shinfo(skb
)->destructor_arg
= msg_control
;
1924 skb_shinfo(skb
)->tx_flags
|= SKBTX_DEV_ZEROCOPY
;
1925 skb_shinfo(skb
)->tx_flags
|= SKBTX_SHARED_FRAG
;
1926 } else if (msg_control
) {
1927 struct ubuf_info
*uarg
= msg_control
;
1928 uarg
->callback(uarg
, false);
1931 skb_reset_network_header(skb
);
1932 skb_probe_transport_header(skb
, 0);
1935 struct bpf_prog
*xdp_prog
;
1939 xdp_prog
= rcu_dereference(tun
->xdp_prog
);
1941 ret
= do_xdp_generic(xdp_prog
, skb
);
1942 if (ret
!= XDP_PASS
) {
1950 /* Compute the costly rx hash only if needed for flow updates.
1951 * We may get a very small possibility of OOO during switching, not
1952 * worth to optimize.
1954 if (!rcu_access_pointer(tun
->steering_prog
) && tun
->numqueues
> 1 &&
1956 rxhash
= __skb_get_hash_symmetric(skb
);
1959 /* Exercise flow dissector code path. */
1960 u32 headlen
= eth_get_headlen(skb
->data
, skb_headlen(skb
));
1962 if (unlikely(headlen
> skb_headlen(skb
))) {
1963 this_cpu_inc(tun
->pcpu_stats
->rx_dropped
);
1964 napi_free_frags(&tfile
->napi
);
1965 mutex_unlock(&tfile
->napi_mutex
);
1971 napi_gro_frags(&tfile
->napi
);
1973 mutex_unlock(&tfile
->napi_mutex
);
1974 } else if (tfile
->napi_enabled
) {
1975 struct sk_buff_head
*queue
= &tfile
->sk
.sk_write_queue
;
1978 spin_lock_bh(&queue
->lock
);
1979 __skb_queue_tail(queue
, skb
);
1980 queue_len
= skb_queue_len(queue
);
1981 spin_unlock(&queue
->lock
);
1983 if (!more
|| queue_len
> NAPI_POLL_WEIGHT
)
1984 napi_schedule(&tfile
->napi
);
1987 } else if (!IS_ENABLED(CONFIG_4KSTACKS
)) {
1988 tun_rx_batched(tun
, tfile
, skb
, more
);
1993 stats
= get_cpu_ptr(tun
->pcpu_stats
);
1994 u64_stats_update_begin(&stats
->syncp
);
1995 stats
->rx_packets
++;
1996 stats
->rx_bytes
+= len
;
1997 u64_stats_update_end(&stats
->syncp
);
2001 tun_flow_update(tun
, rxhash
, tfile
);
2006 static ssize_t
tun_chr_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
2008 struct file
*file
= iocb
->ki_filp
;
2009 struct tun_file
*tfile
= file
->private_data
;
2010 struct tun_struct
*tun
= tun_get(tfile
);
2016 result
= tun_get_user(tun
, tfile
, NULL
, from
,
2017 file
->f_flags
& O_NONBLOCK
, false);
2023 static ssize_t
tun_put_user_xdp(struct tun_struct
*tun
,
2024 struct tun_file
*tfile
,
2025 struct xdp_frame
*xdp_frame
,
2026 struct iov_iter
*iter
)
2028 int vnet_hdr_sz
= 0;
2029 size_t size
= xdp_frame
->len
;
2030 struct tun_pcpu_stats
*stats
;
2033 if (tun
->flags
& IFF_VNET_HDR
) {
2034 struct virtio_net_hdr gso
= { 0 };
2036 vnet_hdr_sz
= READ_ONCE(tun
->vnet_hdr_sz
);
2037 if (unlikely(iov_iter_count(iter
) < vnet_hdr_sz
))
2039 if (unlikely(copy_to_iter(&gso
, sizeof(gso
), iter
) !=
2042 iov_iter_advance(iter
, vnet_hdr_sz
- sizeof(gso
));
2045 ret
= copy_to_iter(xdp_frame
->data
, size
, iter
) + vnet_hdr_sz
;
2047 stats
= get_cpu_ptr(tun
->pcpu_stats
);
2048 u64_stats_update_begin(&stats
->syncp
);
2049 stats
->tx_packets
++;
2050 stats
->tx_bytes
+= ret
;
2051 u64_stats_update_end(&stats
->syncp
);
2052 put_cpu_ptr(tun
->pcpu_stats
);
2057 /* Put packet to the user space buffer */
2058 static ssize_t
tun_put_user(struct tun_struct
*tun
,
2059 struct tun_file
*tfile
,
2060 struct sk_buff
*skb
,
2061 struct iov_iter
*iter
)
2063 struct tun_pi pi
= { 0, skb
->protocol
};
2064 struct tun_pcpu_stats
*stats
;
2066 int vlan_offset
= 0;
2068 int vnet_hdr_sz
= 0;
2070 if (skb_vlan_tag_present(skb
))
2071 vlan_hlen
= VLAN_HLEN
;
2073 if (tun
->flags
& IFF_VNET_HDR
)
2074 vnet_hdr_sz
= READ_ONCE(tun
->vnet_hdr_sz
);
2076 total
= skb
->len
+ vlan_hlen
+ vnet_hdr_sz
;
2078 if (!(tun
->flags
& IFF_NO_PI
)) {
2079 if (iov_iter_count(iter
) < sizeof(pi
))
2082 total
+= sizeof(pi
);
2083 if (iov_iter_count(iter
) < total
) {
2084 /* Packet will be striped */
2085 pi
.flags
|= TUN_PKT_STRIP
;
2088 if (copy_to_iter(&pi
, sizeof(pi
), iter
) != sizeof(pi
))
2093 struct virtio_net_hdr gso
;
2095 if (iov_iter_count(iter
) < vnet_hdr_sz
)
2098 if (virtio_net_hdr_from_skb(skb
, &gso
,
2099 tun_is_little_endian(tun
), true)) {
2100 struct skb_shared_info
*sinfo
= skb_shinfo(skb
);
2101 pr_err("unexpected GSO type: "
2102 "0x%x, gso_size %d, hdr_len %d\n",
2103 sinfo
->gso_type
, tun16_to_cpu(tun
, gso
.gso_size
),
2104 tun16_to_cpu(tun
, gso
.hdr_len
));
2105 print_hex_dump(KERN_ERR
, "tun: ",
2108 min((int)tun16_to_cpu(tun
, gso
.hdr_len
), 64), true);
2113 if (copy_to_iter(&gso
, sizeof(gso
), iter
) != sizeof(gso
))
2116 iov_iter_advance(iter
, vnet_hdr_sz
- sizeof(gso
));
2123 veth
.h_vlan_proto
= skb
->vlan_proto
;
2124 veth
.h_vlan_TCI
= htons(skb_vlan_tag_get(skb
));
2126 vlan_offset
= offsetof(struct vlan_ethhdr
, h_vlan_proto
);
2128 ret
= skb_copy_datagram_iter(skb
, 0, iter
, vlan_offset
);
2129 if (ret
|| !iov_iter_count(iter
))
2132 ret
= copy_to_iter(&veth
, sizeof(veth
), iter
);
2133 if (ret
!= sizeof(veth
) || !iov_iter_count(iter
))
2137 skb_copy_datagram_iter(skb
, vlan_offset
, iter
, skb
->len
- vlan_offset
);
2140 /* caller is in process context, */
2141 stats
= get_cpu_ptr(tun
->pcpu_stats
);
2142 u64_stats_update_begin(&stats
->syncp
);
2143 stats
->tx_packets
++;
2144 stats
->tx_bytes
+= skb
->len
+ vlan_hlen
;
2145 u64_stats_update_end(&stats
->syncp
);
2146 put_cpu_ptr(tun
->pcpu_stats
);
2151 static void *tun_ring_recv(struct tun_file
*tfile
, int noblock
, int *err
)
2153 DECLARE_WAITQUEUE(wait
, current
);
2157 ptr
= ptr_ring_consume(&tfile
->tx_ring
);
2165 add_wait_queue(&tfile
->wq
.wait
, &wait
);
2166 current
->state
= TASK_INTERRUPTIBLE
;
2169 ptr
= ptr_ring_consume(&tfile
->tx_ring
);
2172 if (signal_pending(current
)) {
2173 error
= -ERESTARTSYS
;
2176 if (tfile
->socket
.sk
->sk_shutdown
& RCV_SHUTDOWN
) {
2184 current
->state
= TASK_RUNNING
;
2185 remove_wait_queue(&tfile
->wq
.wait
, &wait
);
2192 static ssize_t
tun_do_read(struct tun_struct
*tun
, struct tun_file
*tfile
,
2193 struct iov_iter
*to
,
2194 int noblock
, void *ptr
)
2199 tun_debug(KERN_INFO
, tun
, "tun_do_read\n");
2201 if (!iov_iter_count(to
)) {
2207 /* Read frames from ring */
2208 ptr
= tun_ring_recv(tfile
, noblock
, &err
);
2213 if (tun_is_xdp_frame(ptr
)) {
2214 struct xdp_frame
*xdpf
= tun_ptr_to_xdp(ptr
);
2216 ret
= tun_put_user_xdp(tun
, tfile
, xdpf
, to
);
2217 xdp_return_frame(xdpf
);
2219 struct sk_buff
*skb
= ptr
;
2221 ret
= tun_put_user(tun
, tfile
, skb
, to
);
2222 if (unlikely(ret
< 0))
2231 static ssize_t
tun_chr_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
2233 struct file
*file
= iocb
->ki_filp
;
2234 struct tun_file
*tfile
= file
->private_data
;
2235 struct tun_struct
*tun
= tun_get(tfile
);
2236 ssize_t len
= iov_iter_count(to
), ret
;
2240 ret
= tun_do_read(tun
, tfile
, to
, file
->f_flags
& O_NONBLOCK
, NULL
);
2241 ret
= min_t(ssize_t
, ret
, len
);
2248 static void tun_prog_free(struct rcu_head
*rcu
)
2250 struct tun_prog
*prog
= container_of(rcu
, struct tun_prog
, rcu
);
2252 bpf_prog_destroy(prog
->prog
);
2256 static int __tun_set_ebpf(struct tun_struct
*tun
,
2257 struct tun_prog __rcu
**prog_p
,
2258 struct bpf_prog
*prog
)
2260 struct tun_prog
*old
, *new = NULL
;
2263 new = kmalloc(sizeof(*new), GFP_KERNEL
);
2269 spin_lock_bh(&tun
->lock
);
2270 old
= rcu_dereference_protected(*prog_p
,
2271 lockdep_is_held(&tun
->lock
));
2272 rcu_assign_pointer(*prog_p
, new);
2273 spin_unlock_bh(&tun
->lock
);
2276 call_rcu(&old
->rcu
, tun_prog_free
);
2281 static void tun_free_netdev(struct net_device
*dev
)
2283 struct tun_struct
*tun
= netdev_priv(dev
);
2285 BUG_ON(!(list_empty(&tun
->disabled
)));
2286 free_percpu(tun
->pcpu_stats
);
2287 tun_flow_uninit(tun
);
2288 security_tun_dev_free_security(tun
->security
);
2289 __tun_set_ebpf(tun
, &tun
->steering_prog
, NULL
);
2290 __tun_set_ebpf(tun
, &tun
->filter_prog
, NULL
);
2293 static void tun_setup(struct net_device
*dev
)
2295 struct tun_struct
*tun
= netdev_priv(dev
);
2297 tun
->owner
= INVALID_UID
;
2298 tun
->group
= INVALID_GID
;
2300 dev
->ethtool_ops
= &tun_ethtool_ops
;
2301 dev
->needs_free_netdev
= true;
2302 dev
->priv_destructor
= tun_free_netdev
;
2303 /* We prefer our own queue length */
2304 dev
->tx_queue_len
= TUN_READQ_SIZE
;
2307 /* Trivial set of netlink ops to allow deleting tun or tap
2308 * device with netlink.
2310 static int tun_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
2311 struct netlink_ext_ack
*extack
)
2316 static size_t tun_get_size(const struct net_device
*dev
)
2318 BUILD_BUG_ON(sizeof(u32
) != sizeof(uid_t
));
2319 BUILD_BUG_ON(sizeof(u32
) != sizeof(gid_t
));
2321 return nla_total_size(sizeof(uid_t
)) + /* OWNER */
2322 nla_total_size(sizeof(gid_t
)) + /* GROUP */
2323 nla_total_size(sizeof(u8
)) + /* TYPE */
2324 nla_total_size(sizeof(u8
)) + /* PI */
2325 nla_total_size(sizeof(u8
)) + /* VNET_HDR */
2326 nla_total_size(sizeof(u8
)) + /* PERSIST */
2327 nla_total_size(sizeof(u8
)) + /* MULTI_QUEUE */
2328 nla_total_size(sizeof(u32
)) + /* NUM_QUEUES */
2329 nla_total_size(sizeof(u32
)) + /* NUM_DISABLED_QUEUES */
2333 static int tun_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
2335 struct tun_struct
*tun
= netdev_priv(dev
);
2337 if (nla_put_u8(skb
, IFLA_TUN_TYPE
, tun
->flags
& TUN_TYPE_MASK
))
2338 goto nla_put_failure
;
2339 if (uid_valid(tun
->owner
) &&
2340 nla_put_u32(skb
, IFLA_TUN_OWNER
,
2341 from_kuid_munged(current_user_ns(), tun
->owner
)))
2342 goto nla_put_failure
;
2343 if (gid_valid(tun
->group
) &&
2344 nla_put_u32(skb
, IFLA_TUN_GROUP
,
2345 from_kgid_munged(current_user_ns(), tun
->group
)))
2346 goto nla_put_failure
;
2347 if (nla_put_u8(skb
, IFLA_TUN_PI
, !(tun
->flags
& IFF_NO_PI
)))
2348 goto nla_put_failure
;
2349 if (nla_put_u8(skb
, IFLA_TUN_VNET_HDR
, !!(tun
->flags
& IFF_VNET_HDR
)))
2350 goto nla_put_failure
;
2351 if (nla_put_u8(skb
, IFLA_TUN_PERSIST
, !!(tun
->flags
& IFF_PERSIST
)))
2352 goto nla_put_failure
;
2353 if (nla_put_u8(skb
, IFLA_TUN_MULTI_QUEUE
,
2354 !!(tun
->flags
& IFF_MULTI_QUEUE
)))
2355 goto nla_put_failure
;
2356 if (tun
->flags
& IFF_MULTI_QUEUE
) {
2357 if (nla_put_u32(skb
, IFLA_TUN_NUM_QUEUES
, tun
->numqueues
))
2358 goto nla_put_failure
;
2359 if (nla_put_u32(skb
, IFLA_TUN_NUM_DISABLED_QUEUES
,
2361 goto nla_put_failure
;
2370 static struct rtnl_link_ops tun_link_ops __read_mostly
= {
2372 .priv_size
= sizeof(struct tun_struct
),
2374 .validate
= tun_validate
,
2375 .get_size
= tun_get_size
,
2376 .fill_info
= tun_fill_info
,
2379 static void tun_sock_write_space(struct sock
*sk
)
2381 struct tun_file
*tfile
;
2382 wait_queue_head_t
*wqueue
;
2384 if (!sock_writeable(sk
))
2387 if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
))
2390 wqueue
= sk_sleep(sk
);
2391 if (wqueue
&& waitqueue_active(wqueue
))
2392 wake_up_interruptible_sync_poll(wqueue
, EPOLLOUT
|
2393 EPOLLWRNORM
| EPOLLWRBAND
);
2395 tfile
= container_of(sk
, struct tun_file
, sk
);
2396 kill_fasync(&tfile
->fasync
, SIGIO
, POLL_OUT
);
2399 static int tun_sendmsg(struct socket
*sock
, struct msghdr
*m
, size_t total_len
)
2402 struct tun_file
*tfile
= container_of(sock
, struct tun_file
, socket
);
2403 struct tun_struct
*tun
= tun_get(tfile
);
2408 ret
= tun_get_user(tun
, tfile
, m
->msg_control
, &m
->msg_iter
,
2409 m
->msg_flags
& MSG_DONTWAIT
,
2410 m
->msg_flags
& MSG_MORE
);
2415 static int tun_recvmsg(struct socket
*sock
, struct msghdr
*m
, size_t total_len
,
2418 struct tun_file
*tfile
= container_of(sock
, struct tun_file
, socket
);
2419 struct tun_struct
*tun
= tun_get(tfile
);
2420 void *ptr
= m
->msg_control
;
2428 if (flags
& ~(MSG_DONTWAIT
|MSG_TRUNC
|MSG_ERRQUEUE
)) {
2432 if (flags
& MSG_ERRQUEUE
) {
2433 ret
= sock_recv_errqueue(sock
->sk
, m
, total_len
,
2434 SOL_PACKET
, TUN_TX_TIMESTAMP
);
2437 ret
= tun_do_read(tun
, tfile
, &m
->msg_iter
, flags
& MSG_DONTWAIT
, ptr
);
2438 if (ret
> (ssize_t
)total_len
) {
2439 m
->msg_flags
|= MSG_TRUNC
;
2440 ret
= flags
& MSG_TRUNC
? ret
: total_len
;
2453 static int tun_ptr_peek_len(void *ptr
)
2456 if (tun_is_xdp_frame(ptr
)) {
2457 struct xdp_frame
*xdpf
= tun_ptr_to_xdp(ptr
);
2461 return __skb_array_len_with_tag(ptr
);
2467 static int tun_peek_len(struct socket
*sock
)
2469 struct tun_file
*tfile
= container_of(sock
, struct tun_file
, socket
);
2470 struct tun_struct
*tun
;
2473 tun
= tun_get(tfile
);
2477 ret
= PTR_RING_PEEK_CALL(&tfile
->tx_ring
, tun_ptr_peek_len
);
2483 /* Ops structure to mimic raw sockets with tun */
2484 static const struct proto_ops tun_socket_ops
= {
2485 .peek_len
= tun_peek_len
,
2486 .sendmsg
= tun_sendmsg
,
2487 .recvmsg
= tun_recvmsg
,
2490 static struct proto tun_proto
= {
2492 .owner
= THIS_MODULE
,
2493 .obj_size
= sizeof(struct tun_file
),
2496 static int tun_flags(struct tun_struct
*tun
)
2498 return tun
->flags
& (TUN_FEATURES
| IFF_PERSIST
| IFF_TUN
| IFF_TAP
);
2501 static ssize_t
tun_show_flags(struct device
*dev
, struct device_attribute
*attr
,
2504 struct tun_struct
*tun
= netdev_priv(to_net_dev(dev
));
2505 return sprintf(buf
, "0x%x\n", tun_flags(tun
));
2508 static ssize_t
tun_show_owner(struct device
*dev
, struct device_attribute
*attr
,
2511 struct tun_struct
*tun
= netdev_priv(to_net_dev(dev
));
2512 return uid_valid(tun
->owner
)?
2513 sprintf(buf
, "%u\n",
2514 from_kuid_munged(current_user_ns(), tun
->owner
)):
2515 sprintf(buf
, "-1\n");
2518 static ssize_t
tun_show_group(struct device
*dev
, struct device_attribute
*attr
,
2521 struct tun_struct
*tun
= netdev_priv(to_net_dev(dev
));
2522 return gid_valid(tun
->group
) ?
2523 sprintf(buf
, "%u\n",
2524 from_kgid_munged(current_user_ns(), tun
->group
)):
2525 sprintf(buf
, "-1\n");
2528 static DEVICE_ATTR(tun_flags
, 0444, tun_show_flags
, NULL
);
2529 static DEVICE_ATTR(owner
, 0444, tun_show_owner
, NULL
);
2530 static DEVICE_ATTR(group
, 0444, tun_show_group
, NULL
);
2532 static struct attribute
*tun_dev_attrs
[] = {
2533 &dev_attr_tun_flags
.attr
,
2534 &dev_attr_owner
.attr
,
2535 &dev_attr_group
.attr
,
2539 static const struct attribute_group tun_attr_group
= {
2540 .attrs
= tun_dev_attrs
2543 static int tun_set_iff(struct net
*net
, struct file
*file
, struct ifreq
*ifr
)
2545 struct tun_struct
*tun
;
2546 struct tun_file
*tfile
= file
->private_data
;
2547 struct net_device
*dev
;
2550 if (tfile
->detached
)
2553 if ((ifr
->ifr_flags
& IFF_NAPI_FRAGS
)) {
2554 if (!capable(CAP_NET_ADMIN
))
2557 if (!(ifr
->ifr_flags
& IFF_NAPI
) ||
2558 (ifr
->ifr_flags
& TUN_TYPE_MASK
) != IFF_TAP
)
2562 dev
= __dev_get_by_name(net
, ifr
->ifr_name
);
2564 if (ifr
->ifr_flags
& IFF_TUN_EXCL
)
2566 if ((ifr
->ifr_flags
& IFF_TUN
) && dev
->netdev_ops
== &tun_netdev_ops
)
2567 tun
= netdev_priv(dev
);
2568 else if ((ifr
->ifr_flags
& IFF_TAP
) && dev
->netdev_ops
== &tap_netdev_ops
)
2569 tun
= netdev_priv(dev
);
2573 if (!!(ifr
->ifr_flags
& IFF_MULTI_QUEUE
) !=
2574 !!(tun
->flags
& IFF_MULTI_QUEUE
))
2577 if (tun_not_capable(tun
))
2579 err
= security_tun_dev_open(tun
->security
);
2583 err
= tun_attach(tun
, file
, ifr
->ifr_flags
& IFF_NOFILTER
,
2584 ifr
->ifr_flags
& IFF_NAPI
);
2588 if (tun
->flags
& IFF_MULTI_QUEUE
&&
2589 (tun
->numqueues
+ tun
->numdisabled
> 1)) {
2590 /* One or more queue has already been attached, no need
2591 * to initialize the device again.
2593 netdev_state_change(dev
);
2597 tun
->flags
= (tun
->flags
& ~TUN_FEATURES
) |
2598 (ifr
->ifr_flags
& TUN_FEATURES
);
2600 netdev_state_change(dev
);
2603 unsigned long flags
= 0;
2604 int queues
= ifr
->ifr_flags
& IFF_MULTI_QUEUE
?
2607 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
2609 err
= security_tun_dev_create();
2614 if (ifr
->ifr_flags
& IFF_TUN
) {
2618 } else if (ifr
->ifr_flags
& IFF_TAP
) {
2626 name
= ifr
->ifr_name
;
2628 dev
= alloc_netdev_mqs(sizeof(struct tun_struct
), name
,
2629 NET_NAME_UNKNOWN
, tun_setup
, queues
,
2634 err
= dev_get_valid_name(net
, dev
, name
);
2638 dev_net_set(dev
, net
);
2639 dev
->rtnl_link_ops
= &tun_link_ops
;
2640 dev
->ifindex
= tfile
->ifindex
;
2641 dev
->sysfs_groups
[0] = &tun_attr_group
;
2643 tun
= netdev_priv(dev
);
2646 tun
->txflt
.count
= 0;
2647 tun
->vnet_hdr_sz
= sizeof(struct virtio_net_hdr
);
2649 tun
->align
= NET_SKB_PAD
;
2650 tun
->filter_attached
= false;
2651 tun
->sndbuf
= tfile
->socket
.sk
->sk_sndbuf
;
2652 tun
->rx_batched
= 0;
2653 RCU_INIT_POINTER(tun
->steering_prog
, NULL
);
2655 tun
->pcpu_stats
= netdev_alloc_pcpu_stats(struct tun_pcpu_stats
);
2656 if (!tun
->pcpu_stats
) {
2661 spin_lock_init(&tun
->lock
);
2663 err
= security_tun_dev_alloc_security(&tun
->security
);
2670 dev
->hw_features
= NETIF_F_SG
| NETIF_F_FRAGLIST
|
2671 TUN_USER_FEATURES
| NETIF_F_HW_VLAN_CTAG_TX
|
2672 NETIF_F_HW_VLAN_STAG_TX
;
2673 dev
->features
= dev
->hw_features
| NETIF_F_LLTX
;
2674 dev
->vlan_features
= dev
->features
&
2675 ~(NETIF_F_HW_VLAN_CTAG_TX
|
2676 NETIF_F_HW_VLAN_STAG_TX
);
2678 tun
->flags
= (tun
->flags
& ~TUN_FEATURES
) |
2679 (ifr
->ifr_flags
& TUN_FEATURES
);
2681 INIT_LIST_HEAD(&tun
->disabled
);
2682 err
= tun_attach(tun
, file
, false, ifr
->ifr_flags
& IFF_NAPI
);
2686 err
= register_netdevice(tun
->dev
);
2691 netif_carrier_on(tun
->dev
);
2693 tun_debug(KERN_INFO
, tun
, "tun_set_iff\n");
2695 /* Make sure persistent devices do not get stuck in
2698 if (netif_running(tun
->dev
))
2699 netif_tx_wake_all_queues(tun
->dev
);
2701 strcpy(ifr
->ifr_name
, tun
->dev
->name
);
2705 tun_detach_all(dev
);
2706 /* register_netdevice() already called tun_free_netdev() */
2710 tun_flow_uninit(tun
);
2711 security_tun_dev_free_security(tun
->security
);
2713 free_percpu(tun
->pcpu_stats
);
2719 static void tun_get_iff(struct net
*net
, struct tun_struct
*tun
,
2722 tun_debug(KERN_INFO
, tun
, "tun_get_iff\n");
2724 strcpy(ifr
->ifr_name
, tun
->dev
->name
);
2726 ifr
->ifr_flags
= tun_flags(tun
);
2730 /* This is like a cut-down ethtool ops, except done via tun fd so no
2731 * privs required. */
2732 static int set_offload(struct tun_struct
*tun
, unsigned long arg
)
2734 netdev_features_t features
= 0;
2736 if (arg
& TUN_F_CSUM
) {
2737 features
|= NETIF_F_HW_CSUM
;
2740 if (arg
& (TUN_F_TSO4
|TUN_F_TSO6
)) {
2741 if (arg
& TUN_F_TSO_ECN
) {
2742 features
|= NETIF_F_TSO_ECN
;
2743 arg
&= ~TUN_F_TSO_ECN
;
2745 if (arg
& TUN_F_TSO4
)
2746 features
|= NETIF_F_TSO
;
2747 if (arg
& TUN_F_TSO6
)
2748 features
|= NETIF_F_TSO6
;
2749 arg
&= ~(TUN_F_TSO4
|TUN_F_TSO6
);
2755 /* This gives the user a way to test for new features in future by
2756 * trying to set them. */
2760 tun
->set_features
= features
;
2761 tun
->dev
->wanted_features
&= ~TUN_USER_FEATURES
;
2762 tun
->dev
->wanted_features
|= features
;
2763 netdev_update_features(tun
->dev
);
2768 static void tun_detach_filter(struct tun_struct
*tun
, int n
)
2771 struct tun_file
*tfile
;
2773 for (i
= 0; i
< n
; i
++) {
2774 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
2775 lock_sock(tfile
->socket
.sk
);
2776 sk_detach_filter(tfile
->socket
.sk
);
2777 release_sock(tfile
->socket
.sk
);
2780 tun
->filter_attached
= false;
2783 static int tun_attach_filter(struct tun_struct
*tun
)
2786 struct tun_file
*tfile
;
2788 for (i
= 0; i
< tun
->numqueues
; i
++) {
2789 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
2790 lock_sock(tfile
->socket
.sk
);
2791 ret
= sk_attach_filter(&tun
->fprog
, tfile
->socket
.sk
);
2792 release_sock(tfile
->socket
.sk
);
2794 tun_detach_filter(tun
, i
);
2799 tun
->filter_attached
= true;
2803 static void tun_set_sndbuf(struct tun_struct
*tun
)
2805 struct tun_file
*tfile
;
2808 for (i
= 0; i
< tun
->numqueues
; i
++) {
2809 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
2810 tfile
->socket
.sk
->sk_sndbuf
= tun
->sndbuf
;
2814 static int tun_set_queue(struct file
*file
, struct ifreq
*ifr
)
2816 struct tun_file
*tfile
= file
->private_data
;
2817 struct tun_struct
*tun
;
2822 if (ifr
->ifr_flags
& IFF_ATTACH_QUEUE
) {
2823 tun
= tfile
->detached
;
2828 ret
= security_tun_dev_attach_queue(tun
->security
);
2831 ret
= tun_attach(tun
, file
, false, tun
->flags
& IFF_NAPI
);
2832 } else if (ifr
->ifr_flags
& IFF_DETACH_QUEUE
) {
2833 tun
= rtnl_dereference(tfile
->tun
);
2834 if (!tun
|| !(tun
->flags
& IFF_MULTI_QUEUE
) || tfile
->detached
)
2837 __tun_detach(tfile
, false);
2842 netdev_state_change(tun
->dev
);
2849 static int tun_set_ebpf(struct tun_struct
*tun
, struct tun_prog
**prog_p
,
2852 struct bpf_prog
*prog
;
2855 if (copy_from_user(&fd
, data
, sizeof(fd
)))
2861 prog
= bpf_prog_get_type(fd
, BPF_PROG_TYPE_SOCKET_FILTER
);
2863 return PTR_ERR(prog
);
2866 return __tun_set_ebpf(tun
, prog_p
, prog
);
2869 static long __tun_chr_ioctl(struct file
*file
, unsigned int cmd
,
2870 unsigned long arg
, int ifreq_len
)
2872 struct tun_file
*tfile
= file
->private_data
;
2873 struct net
*net
= sock_net(&tfile
->sk
);
2874 struct tun_struct
*tun
;
2875 void __user
* argp
= (void __user
*)arg
;
2881 unsigned int ifindex
;
2884 bool do_notify
= false;
2886 if (cmd
== TUNSETIFF
|| cmd
== TUNSETQUEUE
||
2887 (_IOC_TYPE(cmd
) == SOCK_IOC_TYPE
&& cmd
!= SIOCGSKNS
)) {
2888 if (copy_from_user(&ifr
, argp
, ifreq_len
))
2891 memset(&ifr
, 0, sizeof(ifr
));
2893 if (cmd
== TUNGETFEATURES
) {
2894 /* Currently this just means: "what IFF flags are valid?".
2895 * This is needed because we never checked for invalid flags on
2898 return put_user(IFF_TUN
| IFF_TAP
| TUN_FEATURES
,
2899 (unsigned int __user
*)argp
);
2900 } else if (cmd
== TUNSETQUEUE
) {
2901 return tun_set_queue(file
, &ifr
);
2902 } else if (cmd
== SIOCGSKNS
) {
2903 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
2905 return open_related_ns(&net
->ns
, get_net_ns
);
2911 tun
= tun_get(tfile
);
2912 if (cmd
== TUNSETIFF
) {
2917 ifr
.ifr_name
[IFNAMSIZ
-1] = '\0';
2919 ret
= tun_set_iff(net
, file
, &ifr
);
2924 if (copy_to_user(argp
, &ifr
, ifreq_len
))
2928 if (cmd
== TUNSETIFINDEX
) {
2934 if (copy_from_user(&ifindex
, argp
, sizeof(ifindex
)))
2938 tfile
->ifindex
= ifindex
;
2946 tun_debug(KERN_INFO
, tun
, "tun_chr_ioctl cmd %u\n", cmd
);
2951 tun_get_iff(current
->nsproxy
->net_ns
, tun
, &ifr
);
2953 if (tfile
->detached
)
2954 ifr
.ifr_flags
|= IFF_DETACH_QUEUE
;
2955 if (!tfile
->socket
.sk
->sk_filter
)
2956 ifr
.ifr_flags
|= IFF_NOFILTER
;
2958 if (copy_to_user(argp
, &ifr
, ifreq_len
))
2963 /* Disable/Enable checksum */
2965 /* [unimplemented] */
2966 tun_debug(KERN_INFO
, tun
, "ignored: set checksum %s\n",
2967 arg
? "disabled" : "enabled");
2971 /* Disable/Enable persist mode. Keep an extra reference to the
2972 * module to prevent the module being unprobed.
2974 if (arg
&& !(tun
->flags
& IFF_PERSIST
)) {
2975 tun
->flags
|= IFF_PERSIST
;
2976 __module_get(THIS_MODULE
);
2979 if (!arg
&& (tun
->flags
& IFF_PERSIST
)) {
2980 tun
->flags
&= ~IFF_PERSIST
;
2981 module_put(THIS_MODULE
);
2985 tun_debug(KERN_INFO
, tun
, "persist %s\n",
2986 arg
? "enabled" : "disabled");
2990 /* Set owner of the device */
2991 owner
= make_kuid(current_user_ns(), arg
);
2992 if (!uid_valid(owner
)) {
2998 tun_debug(KERN_INFO
, tun
, "owner set to %u\n",
2999 from_kuid(&init_user_ns
, tun
->owner
));
3003 /* Set group of the device */
3004 group
= make_kgid(current_user_ns(), arg
);
3005 if (!gid_valid(group
)) {
3011 tun_debug(KERN_INFO
, tun
, "group set to %u\n",
3012 from_kgid(&init_user_ns
, tun
->group
));
3016 /* Only allow setting the type when the interface is down */
3017 if (tun
->dev
->flags
& IFF_UP
) {
3018 tun_debug(KERN_INFO
, tun
,
3019 "Linktype set failed because interface is up\n");
3022 tun
->dev
->type
= (int) arg
;
3023 tun_debug(KERN_INFO
, tun
, "linktype set to %d\n",
3035 ret
= set_offload(tun
, arg
);
3038 case TUNSETTXFILTER
:
3039 /* Can be set only for TAPs */
3041 if ((tun
->flags
& TUN_TYPE_MASK
) != IFF_TAP
)
3043 ret
= update_filter(&tun
->txflt
, (void __user
*)arg
);
3047 /* Get hw address */
3048 memcpy(ifr
.ifr_hwaddr
.sa_data
, tun
->dev
->dev_addr
, ETH_ALEN
);
3049 ifr
.ifr_hwaddr
.sa_family
= tun
->dev
->type
;
3050 if (copy_to_user(argp
, &ifr
, ifreq_len
))
3055 /* Set hw address */
3056 tun_debug(KERN_DEBUG
, tun
, "set hw address: %pM\n",
3057 ifr
.ifr_hwaddr
.sa_data
);
3059 ret
= dev_set_mac_address(tun
->dev
, &ifr
.ifr_hwaddr
);
3063 sndbuf
= tfile
->socket
.sk
->sk_sndbuf
;
3064 if (copy_to_user(argp
, &sndbuf
, sizeof(sndbuf
)))
3069 if (copy_from_user(&sndbuf
, argp
, sizeof(sndbuf
))) {
3078 tun
->sndbuf
= sndbuf
;
3079 tun_set_sndbuf(tun
);
3082 case TUNGETVNETHDRSZ
:
3083 vnet_hdr_sz
= tun
->vnet_hdr_sz
;
3084 if (copy_to_user(argp
, &vnet_hdr_sz
, sizeof(vnet_hdr_sz
)))
3088 case TUNSETVNETHDRSZ
:
3089 if (copy_from_user(&vnet_hdr_sz
, argp
, sizeof(vnet_hdr_sz
))) {
3093 if (vnet_hdr_sz
< (int)sizeof(struct virtio_net_hdr
)) {
3098 tun
->vnet_hdr_sz
= vnet_hdr_sz
;
3102 le
= !!(tun
->flags
& TUN_VNET_LE
);
3103 if (put_user(le
, (int __user
*)argp
))
3108 if (get_user(le
, (int __user
*)argp
)) {
3113 tun
->flags
|= TUN_VNET_LE
;
3115 tun
->flags
&= ~TUN_VNET_LE
;
3119 ret
= tun_get_vnet_be(tun
, argp
);
3123 ret
= tun_set_vnet_be(tun
, argp
);
3126 case TUNATTACHFILTER
:
3127 /* Can be set only for TAPs */
3129 if ((tun
->flags
& TUN_TYPE_MASK
) != IFF_TAP
)
3132 if (copy_from_user(&tun
->fprog
, argp
, sizeof(tun
->fprog
)))
3135 ret
= tun_attach_filter(tun
);
3138 case TUNDETACHFILTER
:
3139 /* Can be set only for TAPs */
3141 if ((tun
->flags
& TUN_TYPE_MASK
) != IFF_TAP
)
3144 tun_detach_filter(tun
, tun
->numqueues
);
3149 if ((tun
->flags
& TUN_TYPE_MASK
) != IFF_TAP
)
3152 if (copy_to_user(argp
, &tun
->fprog
, sizeof(tun
->fprog
)))
3157 case TUNSETSTEERINGEBPF
:
3158 ret
= tun_set_ebpf(tun
, &tun
->steering_prog
, argp
);
3161 case TUNSETFILTEREBPF
:
3162 ret
= tun_set_ebpf(tun
, &tun
->filter_prog
, argp
);
3171 netdev_state_change(tun
->dev
);
3180 static long tun_chr_ioctl(struct file
*file
,
3181 unsigned int cmd
, unsigned long arg
)
3183 return __tun_chr_ioctl(file
, cmd
, arg
, sizeof (struct ifreq
));
3186 #ifdef CONFIG_COMPAT
3187 static long tun_chr_compat_ioctl(struct file
*file
,
3188 unsigned int cmd
, unsigned long arg
)
3193 case TUNSETTXFILTER
:
3198 arg
= (unsigned long)compat_ptr(arg
);
3201 arg
= (compat_ulong_t
)arg
;
3206 * compat_ifreq is shorter than ifreq, so we must not access beyond
3207 * the end of that structure. All fields that are used in this
3208 * driver are compatible though, we don't need to convert the
3211 return __tun_chr_ioctl(file
, cmd
, arg
, sizeof(struct compat_ifreq
));
3213 #endif /* CONFIG_COMPAT */
3215 static int tun_chr_fasync(int fd
, struct file
*file
, int on
)
3217 struct tun_file
*tfile
= file
->private_data
;
3220 if ((ret
= fasync_helper(fd
, file
, on
, &tfile
->fasync
)) < 0)
3224 __f_setown(file
, task_pid(current
), PIDTYPE_PID
, 0);
3225 tfile
->flags
|= TUN_FASYNC
;
3227 tfile
->flags
&= ~TUN_FASYNC
;
3233 static int tun_chr_open(struct inode
*inode
, struct file
* file
)
3235 struct net
*net
= current
->nsproxy
->net_ns
;
3236 struct tun_file
*tfile
;
3238 DBG1(KERN_INFO
, "tunX: tun_chr_open\n");
3240 tfile
= (struct tun_file
*)sk_alloc(net
, AF_UNSPEC
, GFP_KERNEL
,
3244 if (ptr_ring_init(&tfile
->tx_ring
, 0, GFP_KERNEL
)) {
3245 sk_free(&tfile
->sk
);
3249 RCU_INIT_POINTER(tfile
->tun
, NULL
);
3253 init_waitqueue_head(&tfile
->wq
.wait
);
3254 RCU_INIT_POINTER(tfile
->socket
.wq
, &tfile
->wq
);
3256 tfile
->socket
.file
= file
;
3257 tfile
->socket
.ops
= &tun_socket_ops
;
3259 sock_init_data(&tfile
->socket
, &tfile
->sk
);
3261 tfile
->sk
.sk_write_space
= tun_sock_write_space
;
3262 tfile
->sk
.sk_sndbuf
= INT_MAX
;
3264 file
->private_data
= tfile
;
3265 INIT_LIST_HEAD(&tfile
->next
);
3267 sock_set_flag(&tfile
->sk
, SOCK_ZEROCOPY
);
3272 static int tun_chr_close(struct inode
*inode
, struct file
*file
)
3274 struct tun_file
*tfile
= file
->private_data
;
3276 tun_detach(tfile
, true);
3281 #ifdef CONFIG_PROC_FS
3282 static void tun_chr_show_fdinfo(struct seq_file
*m
, struct file
*file
)
3284 struct tun_file
*tfile
= file
->private_data
;
3285 struct tun_struct
*tun
;
3288 memset(&ifr
, 0, sizeof(ifr
));
3291 tun
= tun_get(tfile
);
3293 tun_get_iff(current
->nsproxy
->net_ns
, tun
, &ifr
);
3299 seq_printf(m
, "iff:\t%s\n", ifr
.ifr_name
);
3303 static const struct file_operations tun_fops
= {
3304 .owner
= THIS_MODULE
,
3305 .llseek
= no_llseek
,
3306 .read_iter
= tun_chr_read_iter
,
3307 .write_iter
= tun_chr_write_iter
,
3308 .poll
= tun_chr_poll
,
3309 .unlocked_ioctl
= tun_chr_ioctl
,
3310 #ifdef CONFIG_COMPAT
3311 .compat_ioctl
= tun_chr_compat_ioctl
,
3313 .open
= tun_chr_open
,
3314 .release
= tun_chr_close
,
3315 .fasync
= tun_chr_fasync
,
3316 #ifdef CONFIG_PROC_FS
3317 .show_fdinfo
= tun_chr_show_fdinfo
,
3321 static struct miscdevice tun_miscdev
= {
3324 .nodename
= "net/tun",
3328 /* ethtool interface */
3330 static int tun_get_link_ksettings(struct net_device
*dev
,
3331 struct ethtool_link_ksettings
*cmd
)
3333 ethtool_link_ksettings_zero_link_mode(cmd
, supported
);
3334 ethtool_link_ksettings_zero_link_mode(cmd
, advertising
);
3335 cmd
->base
.speed
= SPEED_10
;
3336 cmd
->base
.duplex
= DUPLEX_FULL
;
3337 cmd
->base
.port
= PORT_TP
;
3338 cmd
->base
.phy_address
= 0;
3339 cmd
->base
.autoneg
= AUTONEG_DISABLE
;
3343 static void tun_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
3345 struct tun_struct
*tun
= netdev_priv(dev
);
3347 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
3348 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
3350 switch (tun
->flags
& TUN_TYPE_MASK
) {
3352 strlcpy(info
->bus_info
, "tun", sizeof(info
->bus_info
));
3355 strlcpy(info
->bus_info
, "tap", sizeof(info
->bus_info
));
3360 static u32
tun_get_msglevel(struct net_device
*dev
)
3363 struct tun_struct
*tun
= netdev_priv(dev
);
3370 static void tun_set_msglevel(struct net_device
*dev
, u32 value
)
3373 struct tun_struct
*tun
= netdev_priv(dev
);
3378 static int tun_get_coalesce(struct net_device
*dev
,
3379 struct ethtool_coalesce
*ec
)
3381 struct tun_struct
*tun
= netdev_priv(dev
);
3383 ec
->rx_max_coalesced_frames
= tun
->rx_batched
;
3388 static int tun_set_coalesce(struct net_device
*dev
,
3389 struct ethtool_coalesce
*ec
)
3391 struct tun_struct
*tun
= netdev_priv(dev
);
3393 if (ec
->rx_max_coalesced_frames
> NAPI_POLL_WEIGHT
)
3394 tun
->rx_batched
= NAPI_POLL_WEIGHT
;
3396 tun
->rx_batched
= ec
->rx_max_coalesced_frames
;
3401 static const struct ethtool_ops tun_ethtool_ops
= {
3402 .get_drvinfo
= tun_get_drvinfo
,
3403 .get_msglevel
= tun_get_msglevel
,
3404 .set_msglevel
= tun_set_msglevel
,
3405 .get_link
= ethtool_op_get_link
,
3406 .get_ts_info
= ethtool_op_get_ts_info
,
3407 .get_coalesce
= tun_get_coalesce
,
3408 .set_coalesce
= tun_set_coalesce
,
3409 .get_link_ksettings
= tun_get_link_ksettings
,
3412 static int tun_queue_resize(struct tun_struct
*tun
)
3414 struct net_device
*dev
= tun
->dev
;
3415 struct tun_file
*tfile
;
3416 struct ptr_ring
**rings
;
3417 int n
= tun
->numqueues
+ tun
->numdisabled
;
3420 rings
= kmalloc_array(n
, sizeof(*rings
), GFP_KERNEL
);
3424 for (i
= 0; i
< tun
->numqueues
; i
++) {
3425 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
3426 rings
[i
] = &tfile
->tx_ring
;
3428 list_for_each_entry(tfile
, &tun
->disabled
, next
)
3429 rings
[i
++] = &tfile
->tx_ring
;
3431 ret
= ptr_ring_resize_multiple(rings
, n
,
3432 dev
->tx_queue_len
, GFP_KERNEL
,
3439 static int tun_device_event(struct notifier_block
*unused
,
3440 unsigned long event
, void *ptr
)
3442 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
3443 struct tun_struct
*tun
= netdev_priv(dev
);
3445 if (dev
->rtnl_link_ops
!= &tun_link_ops
)
3449 case NETDEV_CHANGE_TX_QUEUE_LEN
:
3450 if (tun_queue_resize(tun
))
3460 static struct notifier_block tun_notifier_block __read_mostly
= {
3461 .notifier_call
= tun_device_event
,
3464 static int __init
tun_init(void)
3468 pr_info("%s, %s\n", DRV_DESCRIPTION
, DRV_VERSION
);
3470 ret
= rtnl_link_register(&tun_link_ops
);
3472 pr_err("Can't register link_ops\n");
3476 ret
= misc_register(&tun_miscdev
);
3478 pr_err("Can't register misc device %d\n", TUN_MINOR
);
3482 ret
= register_netdevice_notifier(&tun_notifier_block
);
3484 pr_err("Can't register netdevice notifier\n");
3491 misc_deregister(&tun_miscdev
);
3493 rtnl_link_unregister(&tun_link_ops
);
3498 static void tun_cleanup(void)
3500 misc_deregister(&tun_miscdev
);
3501 rtnl_link_unregister(&tun_link_ops
);
3502 unregister_netdevice_notifier(&tun_notifier_block
);
3505 /* Get an underlying socket object from tun file. Returns error unless file is
3506 * attached to a device. The returned object works like a packet socket, it
3507 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
3508 * holding a reference to the file for as long as the socket is in use. */
3509 struct socket
*tun_get_socket(struct file
*file
)
3511 struct tun_file
*tfile
;
3512 if (file
->f_op
!= &tun_fops
)
3513 return ERR_PTR(-EINVAL
);
3514 tfile
= file
->private_data
;
3516 return ERR_PTR(-EBADFD
);
3517 return &tfile
->socket
;
3519 EXPORT_SYMBOL_GPL(tun_get_socket
);
3521 struct ptr_ring
*tun_get_tx_ring(struct file
*file
)
3523 struct tun_file
*tfile
;
3525 if (file
->f_op
!= &tun_fops
)
3526 return ERR_PTR(-EINVAL
);
3527 tfile
= file
->private_data
;
3529 return ERR_PTR(-EBADFD
);
3530 return &tfile
->tx_ring
;
3532 EXPORT_SYMBOL_GPL(tun_get_tx_ring
);
3534 module_init(tun_init
);
3535 module_exit(tun_cleanup
);
3536 MODULE_DESCRIPTION(DRV_DESCRIPTION
);
3537 MODULE_AUTHOR(DRV_COPYRIGHT
);
3538 MODULE_LICENSE("GPL");
3539 MODULE_ALIAS_MISCDEV(TUN_MINOR
);
3540 MODULE_ALIAS("devname:net/tun");