2 * TUN - Universal TUN/TAP device driver.
3 * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
21 * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
22 * Add TUNSETLINK ioctl to set the link encapsulation
24 * Mark Smith <markzzzsmith@yahoo.com.au>
25 * Use eth_random_addr() for tap MAC address.
27 * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20
28 * Fixes in packet dropping, queue length setting and queue wakeup.
29 * Increased default tx queue length.
33 * Daniel Podlejski <underley@underley.eu.org>
34 * Modifications for 2.3.99-pre5 kernel.
37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39 #define DRV_NAME "tun"
40 #define DRV_VERSION "1.6"
41 #define DRV_DESCRIPTION "Universal TUN/TAP device driver"
42 #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
44 #include <linux/module.h>
45 #include <linux/errno.h>
46 #include <linux/kernel.h>
47 #include <linux/sched/signal.h>
48 #include <linux/major.h>
49 #include <linux/slab.h>
50 #include <linux/poll.h>
51 #include <linux/fcntl.h>
52 #include <linux/init.h>
53 #include <linux/skbuff.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/miscdevice.h>
57 #include <linux/ethtool.h>
58 #include <linux/rtnetlink.h>
59 #include <linux/compat.h>
61 #include <linux/if_arp.h>
62 #include <linux/if_ether.h>
63 #include <linux/if_tun.h>
64 #include <linux/if_vlan.h>
65 #include <linux/crc32.h>
66 #include <linux/nsproxy.h>
67 #include <linux/virtio_net.h>
68 #include <linux/rcupdate.h>
69 #include <net/net_namespace.h>
70 #include <net/netns/generic.h>
71 #include <net/rtnetlink.h>
74 #include <linux/seq_file.h>
75 #include <linux/uio.h>
76 #include <linux/skb_array.h>
77 #include <linux/bpf.h>
78 #include <linux/bpf_trace.h>
79 #include <linux/mutex.h>
81 #include <linux/uaccess.h>
82 #include <linux/proc_fs.h>
84 static void tun_default_link_ksettings(struct net_device
*dev
,
85 struct ethtool_link_ksettings
*cmd
);
87 /* Uncomment to enable debugging */
88 /* #define TUN_DEBUG 1 */
93 #define tun_debug(level, tun, fmt, args...) \
96 netdev_printk(level, tun->dev, fmt, ##args); \
98 #define DBG1(level, fmt, args...) \
101 printk(level fmt, ##args); \
104 #define tun_debug(level, tun, fmt, args...) \
107 netdev_printk(level, tun->dev, fmt, ##args); \
109 #define DBG1(level, fmt, args...) \
112 printk(level fmt, ##args); \
116 #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
118 /* TUN device flags */
120 /* IFF_ATTACH_QUEUE is never stored in device flags,
121 * overload it to mean fasync when stored there.
123 #define TUN_FASYNC IFF_ATTACH_QUEUE
124 /* High bits in flags field are unused. */
125 #define TUN_VNET_LE 0x80000000
126 #define TUN_VNET_BE 0x40000000
128 #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
129 IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
131 #define GOODCOPY_LEN 128
133 #define FLT_EXACT_COUNT 8
135 unsigned int count
; /* Number of addrs. Zero means disabled */
136 u32 mask
[2]; /* Mask of the hashed addrs */
137 unsigned char addr
[FLT_EXACT_COUNT
][ETH_ALEN
];
140 /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
141 * to max number of VCPUs in guest. */
142 #define MAX_TAP_QUEUES 256
143 #define MAX_TAP_FLOWS 4096
145 #define TUN_FLOW_EXPIRE (3 * HZ)
147 struct tun_pcpu_stats
{
152 struct u64_stats_sync syncp
;
158 /* A tun_file connects an open character device to a tuntap netdevice. It
159 * also contains all socket related structures (except sock_fprog and tap_filter)
160 * to serve as one transmit queue for tuntap device. The sock_fprog and
161 * tap_filter were kept in tun_struct since they were used for filtering for the
162 * netdevice not for a specific queue (at least I didn't see the requirement for
166 * The tun_file and tun_struct are loosely coupled, the pointer from one to the
167 * other can only be read while rcu_read_lock or rtnl_lock is held.
171 struct socket socket
;
173 struct tun_struct __rcu
*tun
;
174 struct fasync_struct
*fasync
;
175 /* only used for fasnyc */
179 unsigned int ifindex
;
181 struct napi_struct napi
;
183 bool napi_frags_enabled
;
184 struct mutex napi_mutex
; /* Protects access to the above napi */
185 struct list_head next
;
186 struct tun_struct
*detached
;
187 struct ptr_ring tx_ring
;
188 struct xdp_rxq_info xdp_rxq
;
196 struct tun_flow_entry
{
197 struct hlist_node hash_link
;
199 struct tun_struct
*tun
;
204 unsigned long updated ____cacheline_aligned_in_smp
;
207 #define TUN_NUM_FLOW_ENTRIES 1024
208 #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
212 struct bpf_prog
*prog
;
215 /* Since the socket were moved to tun_file, to preserve the behavior of persist
216 * device, socket filter, sndbuf and vnet header size were restore when the
217 * file were attached to a persist device.
220 struct tun_file __rcu
*tfiles
[MAX_TAP_QUEUES
];
221 unsigned int numqueues
;
226 struct net_device
*dev
;
227 netdev_features_t set_features
;
228 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
234 struct tap_filter txflt
;
235 struct sock_fprog fprog
;
236 /* protected by rtnl lock */
237 bool filter_attached
;
242 struct hlist_head flows
[TUN_NUM_FLOW_ENTRIES
];
243 struct timer_list flow_gc_timer
;
244 unsigned long ageing_time
;
245 unsigned int numdisabled
;
246 struct list_head disabled
;
250 struct tun_pcpu_stats __percpu
*pcpu_stats
;
251 struct bpf_prog __rcu
*xdp_prog
;
252 struct tun_prog __rcu
*steering_prog
;
253 struct tun_prog __rcu
*filter_prog
;
254 struct ethtool_link_ksettings link_ksettings
;
262 bool tun_is_xdp_frame(void *ptr
)
264 return (unsigned long)ptr
& TUN_XDP_FLAG
;
266 EXPORT_SYMBOL(tun_is_xdp_frame
);
268 void *tun_xdp_to_ptr(void *ptr
)
270 return (void *)((unsigned long)ptr
| TUN_XDP_FLAG
);
272 EXPORT_SYMBOL(tun_xdp_to_ptr
);
274 void *tun_ptr_to_xdp(void *ptr
)
276 return (void *)((unsigned long)ptr
& ~TUN_XDP_FLAG
);
278 EXPORT_SYMBOL(tun_ptr_to_xdp
);
280 static int tun_napi_receive(struct napi_struct
*napi
, int budget
)
282 struct tun_file
*tfile
= container_of(napi
, struct tun_file
, napi
);
283 struct sk_buff_head
*queue
= &tfile
->sk
.sk_write_queue
;
284 struct sk_buff_head process_queue
;
288 __skb_queue_head_init(&process_queue
);
290 spin_lock(&queue
->lock
);
291 skb_queue_splice_tail_init(queue
, &process_queue
);
292 spin_unlock(&queue
->lock
);
294 while (received
< budget
&& (skb
= __skb_dequeue(&process_queue
))) {
295 napi_gro_receive(napi
, skb
);
299 if (!skb_queue_empty(&process_queue
)) {
300 spin_lock(&queue
->lock
);
301 skb_queue_splice(&process_queue
, queue
);
302 spin_unlock(&queue
->lock
);
308 static int tun_napi_poll(struct napi_struct
*napi
, int budget
)
310 unsigned int received
;
312 received
= tun_napi_receive(napi
, budget
);
314 if (received
< budget
)
315 napi_complete_done(napi
, received
);
320 static void tun_napi_init(struct tun_struct
*tun
, struct tun_file
*tfile
,
321 bool napi_en
, bool napi_frags
)
323 tfile
->napi_enabled
= napi_en
;
324 tfile
->napi_frags_enabled
= napi_en
&& napi_frags
;
326 netif_napi_add(tun
->dev
, &tfile
->napi
, tun_napi_poll
,
328 napi_enable(&tfile
->napi
);
332 static void tun_napi_disable(struct tun_file
*tfile
)
334 if (tfile
->napi_enabled
)
335 napi_disable(&tfile
->napi
);
338 static void tun_napi_del(struct tun_file
*tfile
)
340 if (tfile
->napi_enabled
)
341 netif_napi_del(&tfile
->napi
);
344 static bool tun_napi_frags_enabled(const struct tun_file
*tfile
)
346 return tfile
->napi_frags_enabled
;
349 #ifdef CONFIG_TUN_VNET_CROSS_LE
350 static inline bool tun_legacy_is_little_endian(struct tun_struct
*tun
)
352 return tun
->flags
& TUN_VNET_BE
? false :
353 virtio_legacy_is_little_endian();
356 static long tun_get_vnet_be(struct tun_struct
*tun
, int __user
*argp
)
358 int be
= !!(tun
->flags
& TUN_VNET_BE
);
360 if (put_user(be
, argp
))
366 static long tun_set_vnet_be(struct tun_struct
*tun
, int __user
*argp
)
370 if (get_user(be
, argp
))
374 tun
->flags
|= TUN_VNET_BE
;
376 tun
->flags
&= ~TUN_VNET_BE
;
381 static inline bool tun_legacy_is_little_endian(struct tun_struct
*tun
)
383 return virtio_legacy_is_little_endian();
386 static long tun_get_vnet_be(struct tun_struct
*tun
, int __user
*argp
)
391 static long tun_set_vnet_be(struct tun_struct
*tun
, int __user
*argp
)
395 #endif /* CONFIG_TUN_VNET_CROSS_LE */
397 static inline bool tun_is_little_endian(struct tun_struct
*tun
)
399 return tun
->flags
& TUN_VNET_LE
||
400 tun_legacy_is_little_endian(tun
);
403 static inline u16
tun16_to_cpu(struct tun_struct
*tun
, __virtio16 val
)
405 return __virtio16_to_cpu(tun_is_little_endian(tun
), val
);
408 static inline __virtio16
cpu_to_tun16(struct tun_struct
*tun
, u16 val
)
410 return __cpu_to_virtio16(tun_is_little_endian(tun
), val
);
413 static inline u32
tun_hashfn(u32 rxhash
)
415 return rxhash
& TUN_MASK_FLOW_ENTRIES
;
418 static struct tun_flow_entry
*tun_flow_find(struct hlist_head
*head
, u32 rxhash
)
420 struct tun_flow_entry
*e
;
422 hlist_for_each_entry_rcu(e
, head
, hash_link
) {
423 if (e
->rxhash
== rxhash
)
429 static struct tun_flow_entry
*tun_flow_create(struct tun_struct
*tun
,
430 struct hlist_head
*head
,
431 u32 rxhash
, u16 queue_index
)
433 struct tun_flow_entry
*e
= kmalloc(sizeof(*e
), GFP_ATOMIC
);
436 tun_debug(KERN_INFO
, tun
, "create flow: hash %u index %u\n",
437 rxhash
, queue_index
);
438 e
->updated
= jiffies
;
441 e
->queue_index
= queue_index
;
443 hlist_add_head_rcu(&e
->hash_link
, head
);
449 static void tun_flow_delete(struct tun_struct
*tun
, struct tun_flow_entry
*e
)
451 tun_debug(KERN_INFO
, tun
, "delete flow: hash %u index %u\n",
452 e
->rxhash
, e
->queue_index
);
453 hlist_del_rcu(&e
->hash_link
);
458 static void tun_flow_flush(struct tun_struct
*tun
)
462 spin_lock_bh(&tun
->lock
);
463 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++) {
464 struct tun_flow_entry
*e
;
465 struct hlist_node
*n
;
467 hlist_for_each_entry_safe(e
, n
, &tun
->flows
[i
], hash_link
)
468 tun_flow_delete(tun
, e
);
470 spin_unlock_bh(&tun
->lock
);
473 static void tun_flow_delete_by_queue(struct tun_struct
*tun
, u16 queue_index
)
477 spin_lock_bh(&tun
->lock
);
478 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++) {
479 struct tun_flow_entry
*e
;
480 struct hlist_node
*n
;
482 hlist_for_each_entry_safe(e
, n
, &tun
->flows
[i
], hash_link
) {
483 if (e
->queue_index
== queue_index
)
484 tun_flow_delete(tun
, e
);
487 spin_unlock_bh(&tun
->lock
);
490 static void tun_flow_cleanup(struct timer_list
*t
)
492 struct tun_struct
*tun
= from_timer(tun
, t
, flow_gc_timer
);
493 unsigned long delay
= tun
->ageing_time
;
494 unsigned long next_timer
= jiffies
+ delay
;
495 unsigned long count
= 0;
498 tun_debug(KERN_INFO
, tun
, "tun_flow_cleanup\n");
500 spin_lock(&tun
->lock
);
501 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++) {
502 struct tun_flow_entry
*e
;
503 struct hlist_node
*n
;
505 hlist_for_each_entry_safe(e
, n
, &tun
->flows
[i
], hash_link
) {
506 unsigned long this_timer
;
508 this_timer
= e
->updated
+ delay
;
509 if (time_before_eq(this_timer
, jiffies
)) {
510 tun_flow_delete(tun
, e
);
514 if (time_before(this_timer
, next_timer
))
515 next_timer
= this_timer
;
520 mod_timer(&tun
->flow_gc_timer
, round_jiffies_up(next_timer
));
521 spin_unlock(&tun
->lock
);
524 static void tun_flow_update(struct tun_struct
*tun
, u32 rxhash
,
525 struct tun_file
*tfile
)
527 struct hlist_head
*head
;
528 struct tun_flow_entry
*e
;
529 unsigned long delay
= tun
->ageing_time
;
530 u16 queue_index
= tfile
->queue_index
;
532 head
= &tun
->flows
[tun_hashfn(rxhash
)];
536 e
= tun_flow_find(head
, rxhash
);
538 /* TODO: keep queueing to old queue until it's empty? */
539 if (e
->queue_index
!= queue_index
)
540 e
->queue_index
= queue_index
;
541 if (e
->updated
!= jiffies
)
542 e
->updated
= jiffies
;
543 sock_rps_record_flow_hash(e
->rps_rxhash
);
545 spin_lock_bh(&tun
->lock
);
546 if (!tun_flow_find(head
, rxhash
) &&
547 tun
->flow_count
< MAX_TAP_FLOWS
)
548 tun_flow_create(tun
, head
, rxhash
, queue_index
);
550 if (!timer_pending(&tun
->flow_gc_timer
))
551 mod_timer(&tun
->flow_gc_timer
,
552 round_jiffies_up(jiffies
+ delay
));
553 spin_unlock_bh(&tun
->lock
);
560 * Save the hash received in the stack receive path and update the
561 * flow_hash table accordingly.
563 static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry
*e
, u32 hash
)
565 if (unlikely(e
->rps_rxhash
!= hash
))
566 e
->rps_rxhash
= hash
;
569 /* We try to identify a flow through its rxhash. The reason that
570 * we do not check rxq no. is because some cards(e.g 82599), chooses
571 * the rxq based on the txq where the last packet of the flow comes. As
572 * the userspace application move between processors, we may get a
573 * different rxq no. here.
575 static u16
tun_automq_select_queue(struct tun_struct
*tun
, struct sk_buff
*skb
)
577 struct tun_flow_entry
*e
;
581 numqueues
= READ_ONCE(tun
->numqueues
);
583 txq
= __skb_get_hash_symmetric(skb
);
584 e
= tun_flow_find(&tun
->flows
[tun_hashfn(txq
)], txq
);
586 tun_flow_save_rps_rxhash(e
, txq
);
587 txq
= e
->queue_index
;
589 /* use multiply and shift instead of expensive divide */
590 txq
= ((u64
)txq
* numqueues
) >> 32;
596 static u16
tun_ebpf_select_queue(struct tun_struct
*tun
, struct sk_buff
*skb
)
598 struct tun_prog
*prog
;
601 prog
= rcu_dereference(tun
->steering_prog
);
603 ret
= bpf_prog_run_clear_cb(prog
->prog
, skb
);
605 return ret
% tun
->numqueues
;
608 static u16
tun_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
609 struct net_device
*sb_dev
,
610 select_queue_fallback_t fallback
)
612 struct tun_struct
*tun
= netdev_priv(dev
);
616 if (rcu_dereference(tun
->steering_prog
))
617 ret
= tun_ebpf_select_queue(tun
, skb
);
619 ret
= tun_automq_select_queue(tun
, skb
);
625 static inline bool tun_not_capable(struct tun_struct
*tun
)
627 const struct cred
*cred
= current_cred();
628 struct net
*net
= dev_net(tun
->dev
);
630 return ((uid_valid(tun
->owner
) && !uid_eq(cred
->euid
, tun
->owner
)) ||
631 (gid_valid(tun
->group
) && !in_egroup_p(tun
->group
))) &&
632 !ns_capable(net
->user_ns
, CAP_NET_ADMIN
);
635 static void tun_set_real_num_queues(struct tun_struct
*tun
)
637 netif_set_real_num_tx_queues(tun
->dev
, tun
->numqueues
);
638 netif_set_real_num_rx_queues(tun
->dev
, tun
->numqueues
);
641 static void tun_disable_queue(struct tun_struct
*tun
, struct tun_file
*tfile
)
643 tfile
->detached
= tun
;
644 list_add_tail(&tfile
->next
, &tun
->disabled
);
648 static struct tun_struct
*tun_enable_queue(struct tun_file
*tfile
)
650 struct tun_struct
*tun
= tfile
->detached
;
652 tfile
->detached
= NULL
;
653 list_del_init(&tfile
->next
);
658 void tun_ptr_free(void *ptr
)
662 if (tun_is_xdp_frame(ptr
)) {
663 struct xdp_frame
*xdpf
= tun_ptr_to_xdp(ptr
);
665 xdp_return_frame(xdpf
);
667 __skb_array_destroy_skb(ptr
);
670 EXPORT_SYMBOL_GPL(tun_ptr_free
);
672 static void tun_queue_purge(struct tun_file
*tfile
)
676 while ((ptr
= ptr_ring_consume(&tfile
->tx_ring
)) != NULL
)
679 skb_queue_purge(&tfile
->sk
.sk_write_queue
);
680 skb_queue_purge(&tfile
->sk
.sk_error_queue
);
683 static void __tun_detach(struct tun_file
*tfile
, bool clean
)
685 struct tun_file
*ntfile
;
686 struct tun_struct
*tun
;
688 tun
= rtnl_dereference(tfile
->tun
);
691 tun_napi_disable(tfile
);
695 if (tun
&& !tfile
->detached
) {
696 u16 index
= tfile
->queue_index
;
697 BUG_ON(index
>= tun
->numqueues
);
699 rcu_assign_pointer(tun
->tfiles
[index
],
700 tun
->tfiles
[tun
->numqueues
- 1]);
701 ntfile
= rtnl_dereference(tun
->tfiles
[index
]);
702 ntfile
->queue_index
= index
;
706 RCU_INIT_POINTER(tfile
->tun
, NULL
);
707 sock_put(&tfile
->sk
);
709 tun_disable_queue(tun
, tfile
);
712 tun_flow_delete_by_queue(tun
, tun
->numqueues
+ 1);
713 /* Drop read queue */
714 tun_queue_purge(tfile
);
715 tun_set_real_num_queues(tun
);
716 } else if (tfile
->detached
&& clean
) {
717 tun
= tun_enable_queue(tfile
);
718 sock_put(&tfile
->sk
);
722 if (tun
&& tun
->numqueues
== 0 && tun
->numdisabled
== 0) {
723 netif_carrier_off(tun
->dev
);
725 if (!(tun
->flags
& IFF_PERSIST
) &&
726 tun
->dev
->reg_state
== NETREG_REGISTERED
)
727 unregister_netdevice(tun
->dev
);
730 xdp_rxq_info_unreg(&tfile
->xdp_rxq
);
731 ptr_ring_cleanup(&tfile
->tx_ring
, tun_ptr_free
);
732 sock_put(&tfile
->sk
);
736 static void tun_detach(struct tun_file
*tfile
, bool clean
)
738 struct tun_struct
*tun
;
739 struct net_device
*dev
;
742 tun
= rtnl_dereference(tfile
->tun
);
743 dev
= tun
? tun
->dev
: NULL
;
744 __tun_detach(tfile
, clean
);
746 netdev_state_change(dev
);
750 static void tun_detach_all(struct net_device
*dev
)
752 struct tun_struct
*tun
= netdev_priv(dev
);
753 struct tun_file
*tfile
, *tmp
;
754 int i
, n
= tun
->numqueues
;
756 for (i
= 0; i
< n
; i
++) {
757 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
759 tun_napi_disable(tfile
);
760 tfile
->socket
.sk
->sk_shutdown
= RCV_SHUTDOWN
;
761 tfile
->socket
.sk
->sk_data_ready(tfile
->socket
.sk
);
762 RCU_INIT_POINTER(tfile
->tun
, NULL
);
765 list_for_each_entry(tfile
, &tun
->disabled
, next
) {
766 tfile
->socket
.sk
->sk_shutdown
= RCV_SHUTDOWN
;
767 tfile
->socket
.sk
->sk_data_ready(tfile
->socket
.sk
);
768 RCU_INIT_POINTER(tfile
->tun
, NULL
);
770 BUG_ON(tun
->numqueues
!= 0);
773 for (i
= 0; i
< n
; i
++) {
774 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
776 /* Drop read queue */
777 tun_queue_purge(tfile
);
778 xdp_rxq_info_unreg(&tfile
->xdp_rxq
);
779 sock_put(&tfile
->sk
);
781 list_for_each_entry_safe(tfile
, tmp
, &tun
->disabled
, next
) {
782 tun_enable_queue(tfile
);
783 tun_queue_purge(tfile
);
784 xdp_rxq_info_unreg(&tfile
->xdp_rxq
);
785 sock_put(&tfile
->sk
);
787 BUG_ON(tun
->numdisabled
!= 0);
789 if (tun
->flags
& IFF_PERSIST
)
790 module_put(THIS_MODULE
);
793 static int tun_attach(struct tun_struct
*tun
, struct file
*file
,
794 bool skip_filter
, bool napi
, bool napi_frags
)
796 struct tun_file
*tfile
= file
->private_data
;
797 struct net_device
*dev
= tun
->dev
;
800 err
= security_tun_dev_attach(tfile
->socket
.sk
, tun
->security
);
805 if (rtnl_dereference(tfile
->tun
) && !tfile
->detached
)
809 if (!(tun
->flags
& IFF_MULTI_QUEUE
) && tun
->numqueues
== 1)
813 if (!tfile
->detached
&&
814 tun
->numqueues
+ tun
->numdisabled
== MAX_TAP_QUEUES
)
819 /* Re-attach the filter to persist device */
820 if (!skip_filter
&& (tun
->filter_attached
== true)) {
821 lock_sock(tfile
->socket
.sk
);
822 err
= sk_attach_filter(&tun
->fprog
, tfile
->socket
.sk
);
823 release_sock(tfile
->socket
.sk
);
828 if (!tfile
->detached
&&
829 ptr_ring_resize(&tfile
->tx_ring
, dev
->tx_queue_len
,
830 GFP_KERNEL
, tun_ptr_free
)) {
835 tfile
->queue_index
= tun
->numqueues
;
836 tfile
->socket
.sk
->sk_shutdown
&= ~RCV_SHUTDOWN
;
838 if (tfile
->detached
) {
839 /* Re-attach detached tfile, updating XDP queue_index */
840 WARN_ON(!xdp_rxq_info_is_reg(&tfile
->xdp_rxq
));
842 if (tfile
->xdp_rxq
.queue_index
!= tfile
->queue_index
)
843 tfile
->xdp_rxq
.queue_index
= tfile
->queue_index
;
845 /* Setup XDP RX-queue info, for new tfile getting attached */
846 err
= xdp_rxq_info_reg(&tfile
->xdp_rxq
,
847 tun
->dev
, tfile
->queue_index
);
850 err
= xdp_rxq_info_reg_mem_model(&tfile
->xdp_rxq
,
851 MEM_TYPE_PAGE_SHARED
, NULL
);
853 xdp_rxq_info_unreg(&tfile
->xdp_rxq
);
859 rcu_assign_pointer(tfile
->tun
, tun
);
860 rcu_assign_pointer(tun
->tfiles
[tun
->numqueues
], tfile
);
863 if (tfile
->detached
) {
864 tun_enable_queue(tfile
);
866 sock_hold(&tfile
->sk
);
867 tun_napi_init(tun
, tfile
, napi
, napi_frags
);
870 if (rtnl_dereference(tun
->xdp_prog
))
871 sock_set_flag(&tfile
->sk
, SOCK_XDP
);
873 tun_set_real_num_queues(tun
);
875 /* device is allowed to go away first, so no need to hold extra
883 static struct tun_struct
*tun_get(struct tun_file
*tfile
)
885 struct tun_struct
*tun
;
888 tun
= rcu_dereference(tfile
->tun
);
896 static void tun_put(struct tun_struct
*tun
)
902 static void addr_hash_set(u32
*mask
, const u8
*addr
)
904 int n
= ether_crc(ETH_ALEN
, addr
) >> 26;
905 mask
[n
>> 5] |= (1 << (n
& 31));
908 static unsigned int addr_hash_test(const u32
*mask
, const u8
*addr
)
910 int n
= ether_crc(ETH_ALEN
, addr
) >> 26;
911 return mask
[n
>> 5] & (1 << (n
& 31));
914 static int update_filter(struct tap_filter
*filter
, void __user
*arg
)
916 struct { u8 u
[ETH_ALEN
]; } *addr
;
917 struct tun_filter uf
;
918 int err
, alen
, n
, nexact
;
920 if (copy_from_user(&uf
, arg
, sizeof(uf
)))
929 alen
= ETH_ALEN
* uf
.count
;
930 addr
= memdup_user(arg
+ sizeof(uf
), alen
);
932 return PTR_ERR(addr
);
934 /* The filter is updated without holding any locks. Which is
935 * perfectly safe. We disable it first and in the worst
936 * case we'll accept a few undesired packets. */
940 /* Use first set of addresses as an exact filter */
941 for (n
= 0; n
< uf
.count
&& n
< FLT_EXACT_COUNT
; n
++)
942 memcpy(filter
->addr
[n
], addr
[n
].u
, ETH_ALEN
);
946 /* Remaining multicast addresses are hashed,
947 * unicast will leave the filter disabled. */
948 memset(filter
->mask
, 0, sizeof(filter
->mask
));
949 for (; n
< uf
.count
; n
++) {
950 if (!is_multicast_ether_addr(addr
[n
].u
)) {
951 err
= 0; /* no filter */
954 addr_hash_set(filter
->mask
, addr
[n
].u
);
957 /* For ALLMULTI just set the mask to all ones.
958 * This overrides the mask populated above. */
959 if ((uf
.flags
& TUN_FLT_ALLMULTI
))
960 memset(filter
->mask
, ~0, sizeof(filter
->mask
));
962 /* Now enable the filter */
964 filter
->count
= nexact
;
966 /* Return the number of exact filters */
973 /* Returns: 0 - drop, !=0 - accept */
974 static int run_filter(struct tap_filter
*filter
, const struct sk_buff
*skb
)
976 /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
978 struct ethhdr
*eh
= (struct ethhdr
*) skb
->data
;
982 for (i
= 0; i
< filter
->count
; i
++)
983 if (ether_addr_equal(eh
->h_dest
, filter
->addr
[i
]))
986 /* Inexact match (multicast only) */
987 if (is_multicast_ether_addr(eh
->h_dest
))
988 return addr_hash_test(filter
->mask
, eh
->h_dest
);
994 * Checks whether the packet is accepted or not.
995 * Returns: 0 - drop, !=0 - accept
997 static int check_filter(struct tap_filter
*filter
, const struct sk_buff
*skb
)
1002 return run_filter(filter
, skb
);
1005 /* Network device part of the driver */
1007 static const struct ethtool_ops tun_ethtool_ops
;
1009 /* Net device detach from fd. */
1010 static void tun_net_uninit(struct net_device
*dev
)
1012 tun_detach_all(dev
);
1015 /* Net device open. */
1016 static int tun_net_open(struct net_device
*dev
)
1018 struct tun_struct
*tun
= netdev_priv(dev
);
1021 netif_tx_start_all_queues(dev
);
1023 for (i
= 0; i
< tun
->numqueues
; i
++) {
1024 struct tun_file
*tfile
;
1026 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
1027 tfile
->socket
.sk
->sk_write_space(tfile
->socket
.sk
);
1033 /* Net device close. */
1034 static int tun_net_close(struct net_device
*dev
)
1036 netif_tx_stop_all_queues(dev
);
1040 /* Net device start xmit */
1041 static void tun_automq_xmit(struct tun_struct
*tun
, struct sk_buff
*skb
)
1044 if (tun
->numqueues
== 1 && static_key_false(&rps_needed
)) {
1045 /* Select queue was not called for the skbuff, so we extract the
1046 * RPS hash and save it into the flow_table here.
1048 struct tun_flow_entry
*e
;
1051 rxhash
= __skb_get_hash_symmetric(skb
);
1052 e
= tun_flow_find(&tun
->flows
[tun_hashfn(rxhash
)], rxhash
);
1054 tun_flow_save_rps_rxhash(e
, rxhash
);
1059 static unsigned int run_ebpf_filter(struct tun_struct
*tun
,
1060 struct sk_buff
*skb
,
1063 struct tun_prog
*prog
= rcu_dereference(tun
->filter_prog
);
1066 len
= bpf_prog_run_clear_cb(prog
->prog
, skb
);
1071 /* Net device start xmit */
1072 static netdev_tx_t
tun_net_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1074 struct tun_struct
*tun
= netdev_priv(dev
);
1075 int txq
= skb
->queue_mapping
;
1076 struct tun_file
*tfile
;
1080 tfile
= rcu_dereference(tun
->tfiles
[txq
]);
1082 /* Drop packet if interface is not attached */
1083 if (txq
>= tun
->numqueues
)
1086 if (!rcu_dereference(tun
->steering_prog
))
1087 tun_automq_xmit(tun
, skb
);
1089 tun_debug(KERN_INFO
, tun
, "tun_net_xmit %d\n", skb
->len
);
1093 /* Drop if the filter does not like it.
1094 * This is a noop if the filter is disabled.
1095 * Filter can be enabled only for the TAP devices. */
1096 if (!check_filter(&tun
->txflt
, skb
))
1099 if (tfile
->socket
.sk
->sk_filter
&&
1100 sk_filter(tfile
->socket
.sk
, skb
))
1103 len
= run_ebpf_filter(tun
, skb
, len
);
1104 if (len
== 0 || pskb_trim(skb
, len
))
1107 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
1110 skb_tx_timestamp(skb
);
1112 /* Orphan the skb - required as we might hang on to it
1113 * for indefinite time.
1119 if (ptr_ring_produce(&tfile
->tx_ring
, skb
))
1122 /* Notify and wake up reader process */
1123 if (tfile
->flags
& TUN_FASYNC
)
1124 kill_fasync(&tfile
->fasync
, SIGIO
, POLL_IN
);
1125 tfile
->socket
.sk
->sk_data_ready(tfile
->socket
.sk
);
1128 return NETDEV_TX_OK
;
1131 this_cpu_inc(tun
->pcpu_stats
->tx_dropped
);
1135 return NET_XMIT_DROP
;
1138 static void tun_net_mclist(struct net_device
*dev
)
1141 * This callback is supposed to deal with mc filter in
1142 * _rx_ path and has nothing to do with the _tx_ path.
1143 * In rx path we always accept everything userspace gives us.
1147 static netdev_features_t
tun_net_fix_features(struct net_device
*dev
,
1148 netdev_features_t features
)
1150 struct tun_struct
*tun
= netdev_priv(dev
);
1152 return (features
& tun
->set_features
) | (features
& ~TUN_USER_FEATURES
);
1155 static void tun_set_headroom(struct net_device
*dev
, int new_hr
)
1157 struct tun_struct
*tun
= netdev_priv(dev
);
1159 if (new_hr
< NET_SKB_PAD
)
1160 new_hr
= NET_SKB_PAD
;
1162 tun
->align
= new_hr
;
1166 tun_net_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
1168 u32 rx_dropped
= 0, tx_dropped
= 0, rx_frame_errors
= 0;
1169 struct tun_struct
*tun
= netdev_priv(dev
);
1170 struct tun_pcpu_stats
*p
;
1173 for_each_possible_cpu(i
) {
1174 u64 rxpackets
, rxbytes
, txpackets
, txbytes
;
1177 p
= per_cpu_ptr(tun
->pcpu_stats
, i
);
1179 start
= u64_stats_fetch_begin(&p
->syncp
);
1180 rxpackets
= p
->rx_packets
;
1181 rxbytes
= p
->rx_bytes
;
1182 txpackets
= p
->tx_packets
;
1183 txbytes
= p
->tx_bytes
;
1184 } while (u64_stats_fetch_retry(&p
->syncp
, start
));
1186 stats
->rx_packets
+= rxpackets
;
1187 stats
->rx_bytes
+= rxbytes
;
1188 stats
->tx_packets
+= txpackets
;
1189 stats
->tx_bytes
+= txbytes
;
1192 rx_dropped
+= p
->rx_dropped
;
1193 rx_frame_errors
+= p
->rx_frame_errors
;
1194 tx_dropped
+= p
->tx_dropped
;
1196 stats
->rx_dropped
= rx_dropped
;
1197 stats
->rx_frame_errors
= rx_frame_errors
;
1198 stats
->tx_dropped
= tx_dropped
;
1201 static int tun_xdp_set(struct net_device
*dev
, struct bpf_prog
*prog
,
1202 struct netlink_ext_ack
*extack
)
1204 struct tun_struct
*tun
= netdev_priv(dev
);
1205 struct tun_file
*tfile
;
1206 struct bpf_prog
*old_prog
;
1209 old_prog
= rtnl_dereference(tun
->xdp_prog
);
1210 rcu_assign_pointer(tun
->xdp_prog
, prog
);
1212 bpf_prog_put(old_prog
);
1214 for (i
= 0; i
< tun
->numqueues
; i
++) {
1215 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
1217 sock_set_flag(&tfile
->sk
, SOCK_XDP
);
1219 sock_reset_flag(&tfile
->sk
, SOCK_XDP
);
1221 list_for_each_entry(tfile
, &tun
->disabled
, next
) {
1223 sock_set_flag(&tfile
->sk
, SOCK_XDP
);
1225 sock_reset_flag(&tfile
->sk
, SOCK_XDP
);
1231 static u32
tun_xdp_query(struct net_device
*dev
)
1233 struct tun_struct
*tun
= netdev_priv(dev
);
1234 const struct bpf_prog
*xdp_prog
;
1236 xdp_prog
= rtnl_dereference(tun
->xdp_prog
);
1238 return xdp_prog
->aux
->id
;
1243 static int tun_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
1245 switch (xdp
->command
) {
1246 case XDP_SETUP_PROG
:
1247 return tun_xdp_set(dev
, xdp
->prog
, xdp
->extack
);
1248 case XDP_QUERY_PROG
:
1249 xdp
->prog_id
= tun_xdp_query(dev
);
1256 static int tun_net_change_carrier(struct net_device
*dev
, bool new_carrier
)
1259 struct tun_struct
*tun
= netdev_priv(dev
);
1261 if (!tun
->numqueues
)
1264 netif_carrier_on(dev
);
1266 netif_carrier_off(dev
);
1271 static const struct net_device_ops tun_netdev_ops
= {
1272 .ndo_uninit
= tun_net_uninit
,
1273 .ndo_open
= tun_net_open
,
1274 .ndo_stop
= tun_net_close
,
1275 .ndo_start_xmit
= tun_net_xmit
,
1276 .ndo_fix_features
= tun_net_fix_features
,
1277 .ndo_select_queue
= tun_select_queue
,
1278 .ndo_set_rx_headroom
= tun_set_headroom
,
1279 .ndo_get_stats64
= tun_net_get_stats64
,
1280 .ndo_change_carrier
= tun_net_change_carrier
,
1283 static void __tun_xdp_flush_tfile(struct tun_file
*tfile
)
1285 /* Notify and wake up reader process */
1286 if (tfile
->flags
& TUN_FASYNC
)
1287 kill_fasync(&tfile
->fasync
, SIGIO
, POLL_IN
);
1288 tfile
->socket
.sk
->sk_data_ready(tfile
->socket
.sk
);
1291 static int tun_xdp_xmit(struct net_device
*dev
, int n
,
1292 struct xdp_frame
**frames
, u32 flags
)
1294 struct tun_struct
*tun
= netdev_priv(dev
);
1295 struct tun_file
*tfile
;
1301 if (unlikely(flags
& ~XDP_XMIT_FLAGS_MASK
))
1306 numqueues
= READ_ONCE(tun
->numqueues
);
1309 return -ENXIO
; /* Caller will free/return all frames */
1312 tfile
= rcu_dereference(tun
->tfiles
[smp_processor_id() %
1315 spin_lock(&tfile
->tx_ring
.producer_lock
);
1316 for (i
= 0; i
< n
; i
++) {
1317 struct xdp_frame
*xdp
= frames
[i
];
1318 /* Encode the XDP flag into lowest bit for consumer to differ
1319 * XDP buffer from sk_buff.
1321 void *frame
= tun_xdp_to_ptr(xdp
);
1323 if (__ptr_ring_produce(&tfile
->tx_ring
, frame
)) {
1324 this_cpu_inc(tun
->pcpu_stats
->tx_dropped
);
1325 xdp_return_frame_rx_napi(xdp
);
1329 spin_unlock(&tfile
->tx_ring
.producer_lock
);
1331 if (flags
& XDP_XMIT_FLUSH
)
1332 __tun_xdp_flush_tfile(tfile
);
1338 static int tun_xdp_tx(struct net_device
*dev
, struct xdp_buff
*xdp
)
1340 struct xdp_frame
*frame
= convert_to_xdp_frame(xdp
);
1342 if (unlikely(!frame
))
1345 return tun_xdp_xmit(dev
, 1, &frame
, XDP_XMIT_FLUSH
);
1348 static const struct net_device_ops tap_netdev_ops
= {
1349 .ndo_uninit
= tun_net_uninit
,
1350 .ndo_open
= tun_net_open
,
1351 .ndo_stop
= tun_net_close
,
1352 .ndo_start_xmit
= tun_net_xmit
,
1353 .ndo_fix_features
= tun_net_fix_features
,
1354 .ndo_set_rx_mode
= tun_net_mclist
,
1355 .ndo_set_mac_address
= eth_mac_addr
,
1356 .ndo_validate_addr
= eth_validate_addr
,
1357 .ndo_select_queue
= tun_select_queue
,
1358 .ndo_features_check
= passthru_features_check
,
1359 .ndo_set_rx_headroom
= tun_set_headroom
,
1360 .ndo_get_stats64
= tun_net_get_stats64
,
1362 .ndo_xdp_xmit
= tun_xdp_xmit
,
1363 .ndo_change_carrier
= tun_net_change_carrier
,
1366 static void tun_flow_init(struct tun_struct
*tun
)
1370 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++)
1371 INIT_HLIST_HEAD(&tun
->flows
[i
]);
1373 tun
->ageing_time
= TUN_FLOW_EXPIRE
;
1374 timer_setup(&tun
->flow_gc_timer
, tun_flow_cleanup
, 0);
1375 mod_timer(&tun
->flow_gc_timer
,
1376 round_jiffies_up(jiffies
+ tun
->ageing_time
));
1379 static void tun_flow_uninit(struct tun_struct
*tun
)
1381 del_timer_sync(&tun
->flow_gc_timer
);
1382 tun_flow_flush(tun
);
1386 #define MAX_MTU 65535
1388 /* Initialize net device. */
1389 static void tun_net_init(struct net_device
*dev
)
1391 struct tun_struct
*tun
= netdev_priv(dev
);
1393 switch (tun
->flags
& TUN_TYPE_MASK
) {
1395 dev
->netdev_ops
= &tun_netdev_ops
;
1397 /* Point-to-Point TUN Device */
1398 dev
->hard_header_len
= 0;
1402 /* Zero header length */
1403 dev
->type
= ARPHRD_NONE
;
1404 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
| IFF_MULTICAST
;
1408 dev
->netdev_ops
= &tap_netdev_ops
;
1409 /* Ethernet TAP Device */
1411 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
1412 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1414 eth_hw_addr_random(dev
);
1419 dev
->min_mtu
= MIN_MTU
;
1420 dev
->max_mtu
= MAX_MTU
- dev
->hard_header_len
;
1423 static bool tun_sock_writeable(struct tun_struct
*tun
, struct tun_file
*tfile
)
1425 struct sock
*sk
= tfile
->socket
.sk
;
1427 return (tun
->dev
->flags
& IFF_UP
) && sock_writeable(sk
);
1430 /* Character device part */
1433 static __poll_t
tun_chr_poll(struct file
*file
, poll_table
*wait
)
1435 struct tun_file
*tfile
= file
->private_data
;
1436 struct tun_struct
*tun
= tun_get(tfile
);
1443 sk
= tfile
->socket
.sk
;
1445 tun_debug(KERN_INFO
, tun
, "tun_chr_poll\n");
1447 poll_wait(file
, sk_sleep(sk
), wait
);
1449 if (!ptr_ring_empty(&tfile
->tx_ring
))
1450 mask
|= EPOLLIN
| EPOLLRDNORM
;
1452 /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
1453 * guarantee EPOLLOUT to be raised by either here or
1454 * tun_sock_write_space(). Then process could get notification
1455 * after it writes to a down device and meets -EIO.
1457 if (tun_sock_writeable(tun
, tfile
) ||
1458 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
) &&
1459 tun_sock_writeable(tun
, tfile
)))
1460 mask
|= EPOLLOUT
| EPOLLWRNORM
;
1462 if (tun
->dev
->reg_state
!= NETREG_REGISTERED
)
1469 static struct sk_buff
*tun_napi_alloc_frags(struct tun_file
*tfile
,
1471 const struct iov_iter
*it
)
1473 struct sk_buff
*skb
;
1478 if (it
->nr_segs
> MAX_SKB_FRAGS
+ 1)
1479 return ERR_PTR(-ENOMEM
);
1482 skb
= napi_get_frags(&tfile
->napi
);
1485 return ERR_PTR(-ENOMEM
);
1487 linear
= iov_iter_single_seg_count(it
);
1488 err
= __skb_grow(skb
, linear
);
1493 skb
->data_len
= len
- linear
;
1494 skb
->truesize
+= skb
->data_len
;
1496 for (i
= 1; i
< it
->nr_segs
; i
++) {
1497 size_t fragsz
= it
->iov
[i
].iov_len
;
1501 if (fragsz
== 0 || fragsz
> PAGE_SIZE
) {
1505 frag
= netdev_alloc_frag(fragsz
);
1510 page
= virt_to_head_page(frag
);
1511 skb_fill_page_desc(skb
, i
- 1, page
,
1512 frag
- page_address(page
), fragsz
);
1517 /* frees skb and all frags allocated with napi_alloc_frag() */
1518 napi_free_frags(&tfile
->napi
);
1519 return ERR_PTR(err
);
1522 /* prepad is the amount to reserve at front. len is length after that.
1523 * linear is a hint as to how much to copy (usually headers). */
1524 static struct sk_buff
*tun_alloc_skb(struct tun_file
*tfile
,
1525 size_t prepad
, size_t len
,
1526 size_t linear
, int noblock
)
1528 struct sock
*sk
= tfile
->socket
.sk
;
1529 struct sk_buff
*skb
;
1532 /* Under a page? Don't bother with paged skb. */
1533 if (prepad
+ len
< PAGE_SIZE
|| !linear
)
1536 skb
= sock_alloc_send_pskb(sk
, prepad
+ linear
, len
- linear
, noblock
,
1539 return ERR_PTR(err
);
1541 skb_reserve(skb
, prepad
);
1542 skb_put(skb
, linear
);
1543 skb
->data_len
= len
- linear
;
1544 skb
->len
+= len
- linear
;
1549 static void tun_rx_batched(struct tun_struct
*tun
, struct tun_file
*tfile
,
1550 struct sk_buff
*skb
, int more
)
1552 struct sk_buff_head
*queue
= &tfile
->sk
.sk_write_queue
;
1553 struct sk_buff_head process_queue
;
1554 u32 rx_batched
= tun
->rx_batched
;
1557 if (!rx_batched
|| (!more
&& skb_queue_empty(queue
))) {
1559 skb_record_rx_queue(skb
, tfile
->queue_index
);
1560 netif_receive_skb(skb
);
1565 spin_lock(&queue
->lock
);
1566 if (!more
|| skb_queue_len(queue
) == rx_batched
) {
1567 __skb_queue_head_init(&process_queue
);
1568 skb_queue_splice_tail_init(queue
, &process_queue
);
1571 __skb_queue_tail(queue
, skb
);
1573 spin_unlock(&queue
->lock
);
1576 struct sk_buff
*nskb
;
1579 while ((nskb
= __skb_dequeue(&process_queue
))) {
1580 skb_record_rx_queue(nskb
, tfile
->queue_index
);
1581 netif_receive_skb(nskb
);
1583 skb_record_rx_queue(skb
, tfile
->queue_index
);
1584 netif_receive_skb(skb
);
1589 static bool tun_can_build_skb(struct tun_struct
*tun
, struct tun_file
*tfile
,
1590 int len
, int noblock
, bool zerocopy
)
1592 if ((tun
->flags
& TUN_TYPE_MASK
) != IFF_TAP
)
1595 if (tfile
->socket
.sk
->sk_sndbuf
!= INT_MAX
)
1604 if (SKB_DATA_ALIGN(len
+ TUN_RX_PAD
) +
1605 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)) > PAGE_SIZE
)
1611 static struct sk_buff
*__tun_build_skb(struct page_frag
*alloc_frag
, char *buf
,
1612 int buflen
, int len
, int pad
)
1614 struct sk_buff
*skb
= build_skb(buf
, buflen
);
1617 return ERR_PTR(-ENOMEM
);
1619 skb_reserve(skb
, pad
);
1622 get_page(alloc_frag
->page
);
1623 alloc_frag
->offset
+= buflen
;
1628 static int tun_xdp_act(struct tun_struct
*tun
, struct bpf_prog
*xdp_prog
,
1629 struct xdp_buff
*xdp
, u32 act
)
1635 err
= xdp_do_redirect(tun
->dev
, xdp
, xdp_prog
);
1640 err
= tun_xdp_tx(tun
->dev
, xdp
);
1647 bpf_warn_invalid_xdp_action(act
);
1650 trace_xdp_exception(tun
->dev
, xdp_prog
, act
);
1653 this_cpu_inc(tun
->pcpu_stats
->rx_dropped
);
1660 static struct sk_buff
*tun_build_skb(struct tun_struct
*tun
,
1661 struct tun_file
*tfile
,
1662 struct iov_iter
*from
,
1663 struct virtio_net_hdr
*hdr
,
1664 int len
, int *skb_xdp
)
1666 struct page_frag
*alloc_frag
= ¤t
->task_frag
;
1667 struct bpf_prog
*xdp_prog
;
1668 int buflen
= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
1671 int pad
= TUN_RX_PAD
;
1675 xdp_prog
= rcu_dereference(tun
->xdp_prog
);
1677 pad
+= XDP_PACKET_HEADROOM
;
1678 buflen
+= SKB_DATA_ALIGN(len
+ pad
);
1681 alloc_frag
->offset
= ALIGN((u64
)alloc_frag
->offset
, SMP_CACHE_BYTES
);
1682 if (unlikely(!skb_page_frag_refill(buflen
, alloc_frag
, GFP_KERNEL
)))
1683 return ERR_PTR(-ENOMEM
);
1685 buf
= (char *)page_address(alloc_frag
->page
) + alloc_frag
->offset
;
1686 copied
= copy_page_from_iter(alloc_frag
->page
,
1687 alloc_frag
->offset
+ pad
,
1690 return ERR_PTR(-EFAULT
);
1692 /* There's a small window that XDP may be set after the check
1693 * of xdp_prog above, this should be rare and for simplicity
1694 * we do XDP on skb in case the headroom is not enough.
1696 if (hdr
->gso_type
|| !xdp_prog
) {
1698 return __tun_build_skb(alloc_frag
, buf
, buflen
, len
, pad
);
1705 xdp_prog
= rcu_dereference(tun
->xdp_prog
);
1707 struct xdp_buff xdp
;
1710 xdp
.data_hard_start
= buf
;
1711 xdp
.data
= buf
+ pad
;
1712 xdp_set_data_meta_invalid(&xdp
);
1713 xdp
.data_end
= xdp
.data
+ len
;
1714 xdp
.rxq
= &tfile
->xdp_rxq
;
1716 act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
1717 if (act
== XDP_REDIRECT
|| act
== XDP_TX
) {
1718 get_page(alloc_frag
->page
);
1719 alloc_frag
->offset
+= buflen
;
1721 err
= tun_xdp_act(tun
, xdp_prog
, &xdp
, act
);
1724 if (err
== XDP_REDIRECT
)
1726 if (err
!= XDP_PASS
)
1729 pad
= xdp
.data
- xdp
.data_hard_start
;
1730 len
= xdp
.data_end
- xdp
.data
;
1735 return __tun_build_skb(alloc_frag
, buf
, buflen
, len
, pad
);
1738 put_page(alloc_frag
->page
);
1745 /* Get packet from user space buffer */
1746 static ssize_t
tun_get_user(struct tun_struct
*tun
, struct tun_file
*tfile
,
1747 void *msg_control
, struct iov_iter
*from
,
1748 int noblock
, bool more
)
1750 struct tun_pi pi
= { 0, cpu_to_be16(ETH_P_IP
) };
1751 struct sk_buff
*skb
;
1752 size_t total_len
= iov_iter_count(from
);
1753 size_t len
= total_len
, align
= tun
->align
, linear
;
1754 struct virtio_net_hdr gso
= { 0 };
1755 struct tun_pcpu_stats
*stats
;
1758 bool zerocopy
= false;
1762 bool frags
= tun_napi_frags_enabled(tfile
);
1764 if (!(tun
->dev
->flags
& IFF_UP
))
1767 if (!(tun
->flags
& IFF_NO_PI
)) {
1768 if (len
< sizeof(pi
))
1772 if (!copy_from_iter_full(&pi
, sizeof(pi
), from
))
1776 if (tun
->flags
& IFF_VNET_HDR
) {
1777 int vnet_hdr_sz
= READ_ONCE(tun
->vnet_hdr_sz
);
1779 if (len
< vnet_hdr_sz
)
1783 if (!copy_from_iter_full(&gso
, sizeof(gso
), from
))
1786 if ((gso
.flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) &&
1787 tun16_to_cpu(tun
, gso
.csum_start
) + tun16_to_cpu(tun
, gso
.csum_offset
) + 2 > tun16_to_cpu(tun
, gso
.hdr_len
))
1788 gso
.hdr_len
= cpu_to_tun16(tun
, tun16_to_cpu(tun
, gso
.csum_start
) + tun16_to_cpu(tun
, gso
.csum_offset
) + 2);
1790 if (tun16_to_cpu(tun
, gso
.hdr_len
) > len
)
1792 iov_iter_advance(from
, vnet_hdr_sz
- sizeof(gso
));
1795 if ((tun
->flags
& TUN_TYPE_MASK
) == IFF_TAP
) {
1796 align
+= NET_IP_ALIGN
;
1797 if (unlikely(len
< ETH_HLEN
||
1798 (gso
.hdr_len
&& tun16_to_cpu(tun
, gso
.hdr_len
) < ETH_HLEN
)))
1802 good_linear
= SKB_MAX_HEAD(align
);
1805 struct iov_iter i
= *from
;
1807 /* There are 256 bytes to be copied in skb, so there is
1808 * enough room for skb expand head in case it is used.
1809 * The rest of the buffer is mapped from userspace.
1811 copylen
= gso
.hdr_len
? tun16_to_cpu(tun
, gso
.hdr_len
) : GOODCOPY_LEN
;
1812 if (copylen
> good_linear
)
1813 copylen
= good_linear
;
1815 iov_iter_advance(&i
, copylen
);
1816 if (iov_iter_npages(&i
, INT_MAX
) <= MAX_SKB_FRAGS
)
1820 if (!frags
&& tun_can_build_skb(tun
, tfile
, len
, noblock
, zerocopy
)) {
1821 /* For the packet that is not easy to be processed
1822 * (e.g gso or jumbo packet), we will do it at after
1823 * skb was created with generic XDP routine.
1825 skb
= tun_build_skb(tun
, tfile
, from
, &gso
, len
, &skb_xdp
);
1827 this_cpu_inc(tun
->pcpu_stats
->rx_dropped
);
1828 return PTR_ERR(skb
);
1835 if (tun16_to_cpu(tun
, gso
.hdr_len
) > good_linear
)
1836 linear
= good_linear
;
1838 linear
= tun16_to_cpu(tun
, gso
.hdr_len
);
1842 mutex_lock(&tfile
->napi_mutex
);
1843 skb
= tun_napi_alloc_frags(tfile
, copylen
, from
);
1844 /* tun_napi_alloc_frags() enforces a layout for the skb.
1845 * If zerocopy is enabled, then this layout will be
1846 * overwritten by zerocopy_sg_from_iter().
1850 skb
= tun_alloc_skb(tfile
, align
, copylen
, linear
,
1855 if (PTR_ERR(skb
) != -EAGAIN
)
1856 this_cpu_inc(tun
->pcpu_stats
->rx_dropped
);
1858 mutex_unlock(&tfile
->napi_mutex
);
1859 return PTR_ERR(skb
);
1863 err
= zerocopy_sg_from_iter(skb
, from
);
1865 err
= skb_copy_datagram_from_iter(skb
, 0, from
, len
);
1868 this_cpu_inc(tun
->pcpu_stats
->rx_dropped
);
1871 tfile
->napi
.skb
= NULL
;
1872 mutex_unlock(&tfile
->napi_mutex
);
1879 if (virtio_net_hdr_to_skb(skb
, &gso
, tun_is_little_endian(tun
))) {
1880 this_cpu_inc(tun
->pcpu_stats
->rx_frame_errors
);
1883 tfile
->napi
.skb
= NULL
;
1884 mutex_unlock(&tfile
->napi_mutex
);
1890 switch (tun
->flags
& TUN_TYPE_MASK
) {
1892 if (tun
->flags
& IFF_NO_PI
) {
1893 u8 ip_version
= skb
->len
? (skb
->data
[0] >> 4) : 0;
1895 switch (ip_version
) {
1897 pi
.proto
= htons(ETH_P_IP
);
1900 pi
.proto
= htons(ETH_P_IPV6
);
1903 this_cpu_inc(tun
->pcpu_stats
->rx_dropped
);
1909 skb_reset_mac_header(skb
);
1910 skb
->protocol
= pi
.proto
;
1911 skb
->dev
= tun
->dev
;
1915 skb
->protocol
= eth_type_trans(skb
, tun
->dev
);
1919 /* copy skb_ubuf_info for callback when skb has no error */
1921 skb_shinfo(skb
)->destructor_arg
= msg_control
;
1922 skb_shinfo(skb
)->tx_flags
|= SKBTX_DEV_ZEROCOPY
;
1923 skb_shinfo(skb
)->tx_flags
|= SKBTX_SHARED_FRAG
;
1924 } else if (msg_control
) {
1925 struct ubuf_info
*uarg
= msg_control
;
1926 uarg
->callback(uarg
, false);
1929 skb_reset_network_header(skb
);
1930 skb_probe_transport_header(skb
, 0);
1933 struct bpf_prog
*xdp_prog
;
1938 xdp_prog
= rcu_dereference(tun
->xdp_prog
);
1940 ret
= do_xdp_generic(xdp_prog
, skb
);
1941 if (ret
!= XDP_PASS
) {
1951 /* Compute the costly rx hash only if needed for flow updates.
1952 * We may get a very small possibility of OOO during switching, not
1953 * worth to optimize.
1955 if (!rcu_access_pointer(tun
->steering_prog
) && tun
->numqueues
> 1 &&
1957 rxhash
= __skb_get_hash_symmetric(skb
);
1960 /* Exercise flow dissector code path. */
1961 u32 headlen
= eth_get_headlen(skb
->data
, skb_headlen(skb
));
1963 if (unlikely(headlen
> skb_headlen(skb
))) {
1964 this_cpu_inc(tun
->pcpu_stats
->rx_dropped
);
1965 napi_free_frags(&tfile
->napi
);
1966 mutex_unlock(&tfile
->napi_mutex
);
1972 napi_gro_frags(&tfile
->napi
);
1974 mutex_unlock(&tfile
->napi_mutex
);
1975 } else if (tfile
->napi_enabled
) {
1976 struct sk_buff_head
*queue
= &tfile
->sk
.sk_write_queue
;
1979 spin_lock_bh(&queue
->lock
);
1980 __skb_queue_tail(queue
, skb
);
1981 queue_len
= skb_queue_len(queue
);
1982 spin_unlock(&queue
->lock
);
1984 if (!more
|| queue_len
> NAPI_POLL_WEIGHT
)
1985 napi_schedule(&tfile
->napi
);
1988 } else if (!IS_ENABLED(CONFIG_4KSTACKS
)) {
1989 tun_rx_batched(tun
, tfile
, skb
, more
);
1994 stats
= get_cpu_ptr(tun
->pcpu_stats
);
1995 u64_stats_update_begin(&stats
->syncp
);
1996 stats
->rx_packets
++;
1997 stats
->rx_bytes
+= len
;
1998 u64_stats_update_end(&stats
->syncp
);
2002 tun_flow_update(tun
, rxhash
, tfile
);
2007 static ssize_t
tun_chr_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
2009 struct file
*file
= iocb
->ki_filp
;
2010 struct tun_file
*tfile
= file
->private_data
;
2011 struct tun_struct
*tun
= tun_get(tfile
);
2017 result
= tun_get_user(tun
, tfile
, NULL
, from
,
2018 file
->f_flags
& O_NONBLOCK
, false);
2024 static ssize_t
tun_put_user_xdp(struct tun_struct
*tun
,
2025 struct tun_file
*tfile
,
2026 struct xdp_frame
*xdp_frame
,
2027 struct iov_iter
*iter
)
2029 int vnet_hdr_sz
= 0;
2030 size_t size
= xdp_frame
->len
;
2031 struct tun_pcpu_stats
*stats
;
2034 if (tun
->flags
& IFF_VNET_HDR
) {
2035 struct virtio_net_hdr gso
= { 0 };
2037 vnet_hdr_sz
= READ_ONCE(tun
->vnet_hdr_sz
);
2038 if (unlikely(iov_iter_count(iter
) < vnet_hdr_sz
))
2040 if (unlikely(copy_to_iter(&gso
, sizeof(gso
), iter
) !=
2043 iov_iter_advance(iter
, vnet_hdr_sz
- sizeof(gso
));
2046 ret
= copy_to_iter(xdp_frame
->data
, size
, iter
) + vnet_hdr_sz
;
2048 stats
= get_cpu_ptr(tun
->pcpu_stats
);
2049 u64_stats_update_begin(&stats
->syncp
);
2050 stats
->tx_packets
++;
2051 stats
->tx_bytes
+= ret
;
2052 u64_stats_update_end(&stats
->syncp
);
2053 put_cpu_ptr(tun
->pcpu_stats
);
2058 /* Put packet to the user space buffer */
2059 static ssize_t
tun_put_user(struct tun_struct
*tun
,
2060 struct tun_file
*tfile
,
2061 struct sk_buff
*skb
,
2062 struct iov_iter
*iter
)
2064 struct tun_pi pi
= { 0, skb
->protocol
};
2065 struct tun_pcpu_stats
*stats
;
2067 int vlan_offset
= 0;
2069 int vnet_hdr_sz
= 0;
2071 if (skb_vlan_tag_present(skb
))
2072 vlan_hlen
= VLAN_HLEN
;
2074 if (tun
->flags
& IFF_VNET_HDR
)
2075 vnet_hdr_sz
= READ_ONCE(tun
->vnet_hdr_sz
);
2077 total
= skb
->len
+ vlan_hlen
+ vnet_hdr_sz
;
2079 if (!(tun
->flags
& IFF_NO_PI
)) {
2080 if (iov_iter_count(iter
) < sizeof(pi
))
2083 total
+= sizeof(pi
);
2084 if (iov_iter_count(iter
) < total
) {
2085 /* Packet will be striped */
2086 pi
.flags
|= TUN_PKT_STRIP
;
2089 if (copy_to_iter(&pi
, sizeof(pi
), iter
) != sizeof(pi
))
2094 struct virtio_net_hdr gso
;
2096 if (iov_iter_count(iter
) < vnet_hdr_sz
)
2099 if (virtio_net_hdr_from_skb(skb
, &gso
,
2100 tun_is_little_endian(tun
), true,
2102 struct skb_shared_info
*sinfo
= skb_shinfo(skb
);
2103 pr_err("unexpected GSO type: "
2104 "0x%x, gso_size %d, hdr_len %d\n",
2105 sinfo
->gso_type
, tun16_to_cpu(tun
, gso
.gso_size
),
2106 tun16_to_cpu(tun
, gso
.hdr_len
));
2107 print_hex_dump(KERN_ERR
, "tun: ",
2110 min((int)tun16_to_cpu(tun
, gso
.hdr_len
), 64), true);
2115 if (copy_to_iter(&gso
, sizeof(gso
), iter
) != sizeof(gso
))
2118 iov_iter_advance(iter
, vnet_hdr_sz
- sizeof(gso
));
2125 veth
.h_vlan_proto
= skb
->vlan_proto
;
2126 veth
.h_vlan_TCI
= htons(skb_vlan_tag_get(skb
));
2128 vlan_offset
= offsetof(struct vlan_ethhdr
, h_vlan_proto
);
2130 ret
= skb_copy_datagram_iter(skb
, 0, iter
, vlan_offset
);
2131 if (ret
|| !iov_iter_count(iter
))
2134 ret
= copy_to_iter(&veth
, sizeof(veth
), iter
);
2135 if (ret
!= sizeof(veth
) || !iov_iter_count(iter
))
2139 skb_copy_datagram_iter(skb
, vlan_offset
, iter
, skb
->len
- vlan_offset
);
2142 /* caller is in process context, */
2143 stats
= get_cpu_ptr(tun
->pcpu_stats
);
2144 u64_stats_update_begin(&stats
->syncp
);
2145 stats
->tx_packets
++;
2146 stats
->tx_bytes
+= skb
->len
+ vlan_hlen
;
2147 u64_stats_update_end(&stats
->syncp
);
2148 put_cpu_ptr(tun
->pcpu_stats
);
2153 static void *tun_ring_recv(struct tun_file
*tfile
, int noblock
, int *err
)
2155 DECLARE_WAITQUEUE(wait
, current
);
2159 ptr
= ptr_ring_consume(&tfile
->tx_ring
);
2167 add_wait_queue(&tfile
->wq
.wait
, &wait
);
2168 current
->state
= TASK_INTERRUPTIBLE
;
2171 ptr
= ptr_ring_consume(&tfile
->tx_ring
);
2174 if (signal_pending(current
)) {
2175 error
= -ERESTARTSYS
;
2178 if (tfile
->socket
.sk
->sk_shutdown
& RCV_SHUTDOWN
) {
2186 current
->state
= TASK_RUNNING
;
2187 remove_wait_queue(&tfile
->wq
.wait
, &wait
);
2194 static ssize_t
tun_do_read(struct tun_struct
*tun
, struct tun_file
*tfile
,
2195 struct iov_iter
*to
,
2196 int noblock
, void *ptr
)
2201 tun_debug(KERN_INFO
, tun
, "tun_do_read\n");
2203 if (!iov_iter_count(to
)) {
2209 /* Read frames from ring */
2210 ptr
= tun_ring_recv(tfile
, noblock
, &err
);
2215 if (tun_is_xdp_frame(ptr
)) {
2216 struct xdp_frame
*xdpf
= tun_ptr_to_xdp(ptr
);
2218 ret
= tun_put_user_xdp(tun
, tfile
, xdpf
, to
);
2219 xdp_return_frame(xdpf
);
2221 struct sk_buff
*skb
= ptr
;
2223 ret
= tun_put_user(tun
, tfile
, skb
, to
);
2224 if (unlikely(ret
< 0))
2233 static ssize_t
tun_chr_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
2235 struct file
*file
= iocb
->ki_filp
;
2236 struct tun_file
*tfile
= file
->private_data
;
2237 struct tun_struct
*tun
= tun_get(tfile
);
2238 ssize_t len
= iov_iter_count(to
), ret
;
2242 ret
= tun_do_read(tun
, tfile
, to
, file
->f_flags
& O_NONBLOCK
, NULL
);
2243 ret
= min_t(ssize_t
, ret
, len
);
2250 static void tun_prog_free(struct rcu_head
*rcu
)
2252 struct tun_prog
*prog
= container_of(rcu
, struct tun_prog
, rcu
);
2254 bpf_prog_destroy(prog
->prog
);
2258 static int __tun_set_ebpf(struct tun_struct
*tun
,
2259 struct tun_prog __rcu
**prog_p
,
2260 struct bpf_prog
*prog
)
2262 struct tun_prog
*old
, *new = NULL
;
2265 new = kmalloc(sizeof(*new), GFP_KERNEL
);
2271 spin_lock_bh(&tun
->lock
);
2272 old
= rcu_dereference_protected(*prog_p
,
2273 lockdep_is_held(&tun
->lock
));
2274 rcu_assign_pointer(*prog_p
, new);
2275 spin_unlock_bh(&tun
->lock
);
2278 call_rcu(&old
->rcu
, tun_prog_free
);
2283 static void tun_free_netdev(struct net_device
*dev
)
2285 struct tun_struct
*tun
= netdev_priv(dev
);
2287 BUG_ON(!(list_empty(&tun
->disabled
)));
2288 free_percpu(tun
->pcpu_stats
);
2289 tun_flow_uninit(tun
);
2290 security_tun_dev_free_security(tun
->security
);
2291 __tun_set_ebpf(tun
, &tun
->steering_prog
, NULL
);
2292 __tun_set_ebpf(tun
, &tun
->filter_prog
, NULL
);
2295 static void tun_setup(struct net_device
*dev
)
2297 struct tun_struct
*tun
= netdev_priv(dev
);
2299 tun
->owner
= INVALID_UID
;
2300 tun
->group
= INVALID_GID
;
2301 tun_default_link_ksettings(dev
, &tun
->link_ksettings
);
2303 dev
->ethtool_ops
= &tun_ethtool_ops
;
2304 dev
->needs_free_netdev
= true;
2305 dev
->priv_destructor
= tun_free_netdev
;
2306 /* We prefer our own queue length */
2307 dev
->tx_queue_len
= TUN_READQ_SIZE
;
2310 /* Trivial set of netlink ops to allow deleting tun or tap
2311 * device with netlink.
2313 static int tun_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
2314 struct netlink_ext_ack
*extack
)
2316 NL_SET_ERR_MSG(extack
,
2317 "tun/tap creation via rtnetlink is not supported.");
2321 static size_t tun_get_size(const struct net_device
*dev
)
2323 BUILD_BUG_ON(sizeof(u32
) != sizeof(uid_t
));
2324 BUILD_BUG_ON(sizeof(u32
) != sizeof(gid_t
));
2326 return nla_total_size(sizeof(uid_t
)) + /* OWNER */
2327 nla_total_size(sizeof(gid_t
)) + /* GROUP */
2328 nla_total_size(sizeof(u8
)) + /* TYPE */
2329 nla_total_size(sizeof(u8
)) + /* PI */
2330 nla_total_size(sizeof(u8
)) + /* VNET_HDR */
2331 nla_total_size(sizeof(u8
)) + /* PERSIST */
2332 nla_total_size(sizeof(u8
)) + /* MULTI_QUEUE */
2333 nla_total_size(sizeof(u32
)) + /* NUM_QUEUES */
2334 nla_total_size(sizeof(u32
)) + /* NUM_DISABLED_QUEUES */
2338 static int tun_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
2340 struct tun_struct
*tun
= netdev_priv(dev
);
2342 if (nla_put_u8(skb
, IFLA_TUN_TYPE
, tun
->flags
& TUN_TYPE_MASK
))
2343 goto nla_put_failure
;
2344 if (uid_valid(tun
->owner
) &&
2345 nla_put_u32(skb
, IFLA_TUN_OWNER
,
2346 from_kuid_munged(current_user_ns(), tun
->owner
)))
2347 goto nla_put_failure
;
2348 if (gid_valid(tun
->group
) &&
2349 nla_put_u32(skb
, IFLA_TUN_GROUP
,
2350 from_kgid_munged(current_user_ns(), tun
->group
)))
2351 goto nla_put_failure
;
2352 if (nla_put_u8(skb
, IFLA_TUN_PI
, !(tun
->flags
& IFF_NO_PI
)))
2353 goto nla_put_failure
;
2354 if (nla_put_u8(skb
, IFLA_TUN_VNET_HDR
, !!(tun
->flags
& IFF_VNET_HDR
)))
2355 goto nla_put_failure
;
2356 if (nla_put_u8(skb
, IFLA_TUN_PERSIST
, !!(tun
->flags
& IFF_PERSIST
)))
2357 goto nla_put_failure
;
2358 if (nla_put_u8(skb
, IFLA_TUN_MULTI_QUEUE
,
2359 !!(tun
->flags
& IFF_MULTI_QUEUE
)))
2360 goto nla_put_failure
;
2361 if (tun
->flags
& IFF_MULTI_QUEUE
) {
2362 if (nla_put_u32(skb
, IFLA_TUN_NUM_QUEUES
, tun
->numqueues
))
2363 goto nla_put_failure
;
2364 if (nla_put_u32(skb
, IFLA_TUN_NUM_DISABLED_QUEUES
,
2366 goto nla_put_failure
;
2375 static struct rtnl_link_ops tun_link_ops __read_mostly
= {
2377 .priv_size
= sizeof(struct tun_struct
),
2379 .validate
= tun_validate
,
2380 .get_size
= tun_get_size
,
2381 .fill_info
= tun_fill_info
,
2384 static void tun_sock_write_space(struct sock
*sk
)
2386 struct tun_file
*tfile
;
2387 wait_queue_head_t
*wqueue
;
2389 if (!sock_writeable(sk
))
2392 if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
))
2395 wqueue
= sk_sleep(sk
);
2396 if (wqueue
&& waitqueue_active(wqueue
))
2397 wake_up_interruptible_sync_poll(wqueue
, EPOLLOUT
|
2398 EPOLLWRNORM
| EPOLLWRBAND
);
2400 tfile
= container_of(sk
, struct tun_file
, sk
);
2401 kill_fasync(&tfile
->fasync
, SIGIO
, POLL_OUT
);
2404 static void tun_put_page(struct tun_page
*tpage
)
2407 __page_frag_cache_drain(tpage
->page
, tpage
->count
);
2410 static int tun_xdp_one(struct tun_struct
*tun
,
2411 struct tun_file
*tfile
,
2412 struct xdp_buff
*xdp
, int *flush
,
2413 struct tun_page
*tpage
)
2415 unsigned int datasize
= xdp
->data_end
- xdp
->data
;
2416 struct tun_xdp_hdr
*hdr
= xdp
->data_hard_start
;
2417 struct virtio_net_hdr
*gso
= &hdr
->gso
;
2418 struct tun_pcpu_stats
*stats
;
2419 struct bpf_prog
*xdp_prog
;
2420 struct sk_buff
*skb
= NULL
;
2421 u32 rxhash
= 0, act
;
2422 int buflen
= hdr
->buflen
;
2424 bool skb_xdp
= false;
2427 xdp_prog
= rcu_dereference(tun
->xdp_prog
);
2429 if (gso
->gso_type
) {
2433 xdp_set_data_meta_invalid(xdp
);
2434 xdp
->rxq
= &tfile
->xdp_rxq
;
2436 act
= bpf_prog_run_xdp(xdp_prog
, xdp
);
2437 err
= tun_xdp_act(tun
, xdp_prog
, xdp
, act
);
2439 put_page(virt_to_head_page(xdp
->data
));
2452 page
= virt_to_head_page(xdp
->data
);
2453 if (tpage
->page
== page
) {
2456 tun_put_page(tpage
);
2465 skb
= build_skb(xdp
->data_hard_start
, buflen
);
2471 skb_reserve(skb
, xdp
->data
- xdp
->data_hard_start
);
2472 skb_put(skb
, xdp
->data_end
- xdp
->data
);
2474 if (virtio_net_hdr_to_skb(skb
, gso
, tun_is_little_endian(tun
))) {
2475 this_cpu_inc(tun
->pcpu_stats
->rx_frame_errors
);
2481 skb
->protocol
= eth_type_trans(skb
, tun
->dev
);
2482 skb_reset_network_header(skb
);
2483 skb_probe_transport_header(skb
, 0);
2486 err
= do_xdp_generic(xdp_prog
, skb
);
2487 if (err
!= XDP_PASS
)
2491 if (!rcu_dereference(tun
->steering_prog
) && tun
->numqueues
> 1 &&
2493 rxhash
= __skb_get_hash_symmetric(skb
);
2495 skb_record_rx_queue(skb
, tfile
->queue_index
);
2496 netif_receive_skb(skb
);
2498 stats
= get_cpu_ptr(tun
->pcpu_stats
);
2499 u64_stats_update_begin(&stats
->syncp
);
2500 stats
->rx_packets
++;
2501 stats
->rx_bytes
+= datasize
;
2502 u64_stats_update_end(&stats
->syncp
);
2506 tun_flow_update(tun
, rxhash
, tfile
);
2512 static int tun_sendmsg(struct socket
*sock
, struct msghdr
*m
, size_t total_len
)
2515 struct tun_file
*tfile
= container_of(sock
, struct tun_file
, socket
);
2516 struct tun_struct
*tun
= tun_get(tfile
);
2517 struct tun_msg_ctl
*ctl
= m
->msg_control
;
2518 struct xdp_buff
*xdp
;
2523 if (ctl
&& (ctl
->type
== TUN_MSG_PTR
)) {
2524 struct tun_page tpage
;
2528 memset(&tpage
, 0, sizeof(tpage
));
2533 for (i
= 0; i
< n
; i
++) {
2534 xdp
= &((struct xdp_buff
*)ctl
->ptr
)[i
];
2535 tun_xdp_one(tun
, tfile
, xdp
, &flush
, &tpage
);
2544 tun_put_page(&tpage
);
2550 ret
= tun_get_user(tun
, tfile
, ctl
? ctl
->ptr
: NULL
, &m
->msg_iter
,
2551 m
->msg_flags
& MSG_DONTWAIT
,
2552 m
->msg_flags
& MSG_MORE
);
2558 static int tun_recvmsg(struct socket
*sock
, struct msghdr
*m
, size_t total_len
,
2561 struct tun_file
*tfile
= container_of(sock
, struct tun_file
, socket
);
2562 struct tun_struct
*tun
= tun_get(tfile
);
2563 void *ptr
= m
->msg_control
;
2571 if (flags
& ~(MSG_DONTWAIT
|MSG_TRUNC
|MSG_ERRQUEUE
)) {
2575 if (flags
& MSG_ERRQUEUE
) {
2576 ret
= sock_recv_errqueue(sock
->sk
, m
, total_len
,
2577 SOL_PACKET
, TUN_TX_TIMESTAMP
);
2580 ret
= tun_do_read(tun
, tfile
, &m
->msg_iter
, flags
& MSG_DONTWAIT
, ptr
);
2581 if (ret
> (ssize_t
)total_len
) {
2582 m
->msg_flags
|= MSG_TRUNC
;
2583 ret
= flags
& MSG_TRUNC
? ret
: total_len
;
2596 static int tun_ptr_peek_len(void *ptr
)
2599 if (tun_is_xdp_frame(ptr
)) {
2600 struct xdp_frame
*xdpf
= tun_ptr_to_xdp(ptr
);
2604 return __skb_array_len_with_tag(ptr
);
2610 static int tun_peek_len(struct socket
*sock
)
2612 struct tun_file
*tfile
= container_of(sock
, struct tun_file
, socket
);
2613 struct tun_struct
*tun
;
2616 tun
= tun_get(tfile
);
2620 ret
= PTR_RING_PEEK_CALL(&tfile
->tx_ring
, tun_ptr_peek_len
);
2626 /* Ops structure to mimic raw sockets with tun */
2627 static const struct proto_ops tun_socket_ops
= {
2628 .peek_len
= tun_peek_len
,
2629 .sendmsg
= tun_sendmsg
,
2630 .recvmsg
= tun_recvmsg
,
2633 static struct proto tun_proto
= {
2635 .owner
= THIS_MODULE
,
2636 .obj_size
= sizeof(struct tun_file
),
2639 static int tun_flags(struct tun_struct
*tun
)
2641 return tun
->flags
& (TUN_FEATURES
| IFF_PERSIST
| IFF_TUN
| IFF_TAP
);
2644 static ssize_t
tun_show_flags(struct device
*dev
, struct device_attribute
*attr
,
2647 struct tun_struct
*tun
= netdev_priv(to_net_dev(dev
));
2648 return sprintf(buf
, "0x%x\n", tun_flags(tun
));
2651 static ssize_t
tun_show_owner(struct device
*dev
, struct device_attribute
*attr
,
2654 struct tun_struct
*tun
= netdev_priv(to_net_dev(dev
));
2655 return uid_valid(tun
->owner
)?
2656 sprintf(buf
, "%u\n",
2657 from_kuid_munged(current_user_ns(), tun
->owner
)):
2658 sprintf(buf
, "-1\n");
2661 static ssize_t
tun_show_group(struct device
*dev
, struct device_attribute
*attr
,
2664 struct tun_struct
*tun
= netdev_priv(to_net_dev(dev
));
2665 return gid_valid(tun
->group
) ?
2666 sprintf(buf
, "%u\n",
2667 from_kgid_munged(current_user_ns(), tun
->group
)):
2668 sprintf(buf
, "-1\n");
2671 static DEVICE_ATTR(tun_flags
, 0444, tun_show_flags
, NULL
);
2672 static DEVICE_ATTR(owner
, 0444, tun_show_owner
, NULL
);
2673 static DEVICE_ATTR(group
, 0444, tun_show_group
, NULL
);
2675 static struct attribute
*tun_dev_attrs
[] = {
2676 &dev_attr_tun_flags
.attr
,
2677 &dev_attr_owner
.attr
,
2678 &dev_attr_group
.attr
,
2682 static const struct attribute_group tun_attr_group
= {
2683 .attrs
= tun_dev_attrs
2686 static int tun_set_iff(struct net
*net
, struct file
*file
, struct ifreq
*ifr
)
2688 struct tun_struct
*tun
;
2689 struct tun_file
*tfile
= file
->private_data
;
2690 struct net_device
*dev
;
2693 if (tfile
->detached
)
2696 if ((ifr
->ifr_flags
& IFF_NAPI_FRAGS
)) {
2697 if (!capable(CAP_NET_ADMIN
))
2700 if (!(ifr
->ifr_flags
& IFF_NAPI
) ||
2701 (ifr
->ifr_flags
& TUN_TYPE_MASK
) != IFF_TAP
)
2705 dev
= __dev_get_by_name(net
, ifr
->ifr_name
);
2707 if (ifr
->ifr_flags
& IFF_TUN_EXCL
)
2709 if ((ifr
->ifr_flags
& IFF_TUN
) && dev
->netdev_ops
== &tun_netdev_ops
)
2710 tun
= netdev_priv(dev
);
2711 else if ((ifr
->ifr_flags
& IFF_TAP
) && dev
->netdev_ops
== &tap_netdev_ops
)
2712 tun
= netdev_priv(dev
);
2716 if (!!(ifr
->ifr_flags
& IFF_MULTI_QUEUE
) !=
2717 !!(tun
->flags
& IFF_MULTI_QUEUE
))
2720 if (tun_not_capable(tun
))
2722 err
= security_tun_dev_open(tun
->security
);
2726 err
= tun_attach(tun
, file
, ifr
->ifr_flags
& IFF_NOFILTER
,
2727 ifr
->ifr_flags
& IFF_NAPI
,
2728 ifr
->ifr_flags
& IFF_NAPI_FRAGS
);
2732 if (tun
->flags
& IFF_MULTI_QUEUE
&&
2733 (tun
->numqueues
+ tun
->numdisabled
> 1)) {
2734 /* One or more queue has already been attached, no need
2735 * to initialize the device again.
2737 netdev_state_change(dev
);
2741 tun
->flags
= (tun
->flags
& ~TUN_FEATURES
) |
2742 (ifr
->ifr_flags
& TUN_FEATURES
);
2744 netdev_state_change(dev
);
2747 unsigned long flags
= 0;
2748 int queues
= ifr
->ifr_flags
& IFF_MULTI_QUEUE
?
2751 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
2753 err
= security_tun_dev_create();
2758 if (ifr
->ifr_flags
& IFF_TUN
) {
2762 } else if (ifr
->ifr_flags
& IFF_TAP
) {
2770 name
= ifr
->ifr_name
;
2772 dev
= alloc_netdev_mqs(sizeof(struct tun_struct
), name
,
2773 NET_NAME_UNKNOWN
, tun_setup
, queues
,
2778 err
= dev_get_valid_name(net
, dev
, name
);
2782 dev_net_set(dev
, net
);
2783 dev
->rtnl_link_ops
= &tun_link_ops
;
2784 dev
->ifindex
= tfile
->ifindex
;
2785 dev
->sysfs_groups
[0] = &tun_attr_group
;
2787 tun
= netdev_priv(dev
);
2790 tun
->txflt
.count
= 0;
2791 tun
->vnet_hdr_sz
= sizeof(struct virtio_net_hdr
);
2793 tun
->align
= NET_SKB_PAD
;
2794 tun
->filter_attached
= false;
2795 tun
->sndbuf
= tfile
->socket
.sk
->sk_sndbuf
;
2796 tun
->rx_batched
= 0;
2797 RCU_INIT_POINTER(tun
->steering_prog
, NULL
);
2799 tun
->pcpu_stats
= netdev_alloc_pcpu_stats(struct tun_pcpu_stats
);
2800 if (!tun
->pcpu_stats
) {
2805 spin_lock_init(&tun
->lock
);
2807 err
= security_tun_dev_alloc_security(&tun
->security
);
2814 dev
->hw_features
= NETIF_F_SG
| NETIF_F_FRAGLIST
|
2815 TUN_USER_FEATURES
| NETIF_F_HW_VLAN_CTAG_TX
|
2816 NETIF_F_HW_VLAN_STAG_TX
;
2817 dev
->features
= dev
->hw_features
| NETIF_F_LLTX
;
2818 dev
->vlan_features
= dev
->features
&
2819 ~(NETIF_F_HW_VLAN_CTAG_TX
|
2820 NETIF_F_HW_VLAN_STAG_TX
);
2822 tun
->flags
= (tun
->flags
& ~TUN_FEATURES
) |
2823 (ifr
->ifr_flags
& TUN_FEATURES
);
2825 INIT_LIST_HEAD(&tun
->disabled
);
2826 err
= tun_attach(tun
, file
, false, ifr
->ifr_flags
& IFF_NAPI
,
2827 ifr
->ifr_flags
& IFF_NAPI_FRAGS
);
2831 err
= register_netdevice(tun
->dev
);
2836 netif_carrier_on(tun
->dev
);
2838 tun_debug(KERN_INFO
, tun
, "tun_set_iff\n");
2840 /* Make sure persistent devices do not get stuck in
2843 if (netif_running(tun
->dev
))
2844 netif_tx_wake_all_queues(tun
->dev
);
2846 strcpy(ifr
->ifr_name
, tun
->dev
->name
);
2850 tun_detach_all(dev
);
2851 /* register_netdevice() already called tun_free_netdev() */
2855 tun_flow_uninit(tun
);
2856 security_tun_dev_free_security(tun
->security
);
2858 free_percpu(tun
->pcpu_stats
);
2864 static void tun_get_iff(struct net
*net
, struct tun_struct
*tun
,
2867 tun_debug(KERN_INFO
, tun
, "tun_get_iff\n");
2869 strcpy(ifr
->ifr_name
, tun
->dev
->name
);
2871 ifr
->ifr_flags
= tun_flags(tun
);
2875 /* This is like a cut-down ethtool ops, except done via tun fd so no
2876 * privs required. */
2877 static int set_offload(struct tun_struct
*tun
, unsigned long arg
)
2879 netdev_features_t features
= 0;
2881 if (arg
& TUN_F_CSUM
) {
2882 features
|= NETIF_F_HW_CSUM
;
2885 if (arg
& (TUN_F_TSO4
|TUN_F_TSO6
)) {
2886 if (arg
& TUN_F_TSO_ECN
) {
2887 features
|= NETIF_F_TSO_ECN
;
2888 arg
&= ~TUN_F_TSO_ECN
;
2890 if (arg
& TUN_F_TSO4
)
2891 features
|= NETIF_F_TSO
;
2892 if (arg
& TUN_F_TSO6
)
2893 features
|= NETIF_F_TSO6
;
2894 arg
&= ~(TUN_F_TSO4
|TUN_F_TSO6
);
2900 /* This gives the user a way to test for new features in future by
2901 * trying to set them. */
2905 tun
->set_features
= features
;
2906 tun
->dev
->wanted_features
&= ~TUN_USER_FEATURES
;
2907 tun
->dev
->wanted_features
|= features
;
2908 netdev_update_features(tun
->dev
);
2913 static void tun_detach_filter(struct tun_struct
*tun
, int n
)
2916 struct tun_file
*tfile
;
2918 for (i
= 0; i
< n
; i
++) {
2919 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
2920 lock_sock(tfile
->socket
.sk
);
2921 sk_detach_filter(tfile
->socket
.sk
);
2922 release_sock(tfile
->socket
.sk
);
2925 tun
->filter_attached
= false;
2928 static int tun_attach_filter(struct tun_struct
*tun
)
2931 struct tun_file
*tfile
;
2933 for (i
= 0; i
< tun
->numqueues
; i
++) {
2934 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
2935 lock_sock(tfile
->socket
.sk
);
2936 ret
= sk_attach_filter(&tun
->fprog
, tfile
->socket
.sk
);
2937 release_sock(tfile
->socket
.sk
);
2939 tun_detach_filter(tun
, i
);
2944 tun
->filter_attached
= true;
2948 static void tun_set_sndbuf(struct tun_struct
*tun
)
2950 struct tun_file
*tfile
;
2953 for (i
= 0; i
< tun
->numqueues
; i
++) {
2954 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
2955 tfile
->socket
.sk
->sk_sndbuf
= tun
->sndbuf
;
2959 static int tun_set_queue(struct file
*file
, struct ifreq
*ifr
)
2961 struct tun_file
*tfile
= file
->private_data
;
2962 struct tun_struct
*tun
;
2967 if (ifr
->ifr_flags
& IFF_ATTACH_QUEUE
) {
2968 tun
= tfile
->detached
;
2973 ret
= security_tun_dev_attach_queue(tun
->security
);
2976 ret
= tun_attach(tun
, file
, false, tun
->flags
& IFF_NAPI
,
2977 tun
->flags
& IFF_NAPI_FRAGS
);
2978 } else if (ifr
->ifr_flags
& IFF_DETACH_QUEUE
) {
2979 tun
= rtnl_dereference(tfile
->tun
);
2980 if (!tun
|| !(tun
->flags
& IFF_MULTI_QUEUE
) || tfile
->detached
)
2983 __tun_detach(tfile
, false);
2988 netdev_state_change(tun
->dev
);
2995 static int tun_set_ebpf(struct tun_struct
*tun
, struct tun_prog
**prog_p
,
2998 struct bpf_prog
*prog
;
3001 if (copy_from_user(&fd
, data
, sizeof(fd
)))
3007 prog
= bpf_prog_get_type(fd
, BPF_PROG_TYPE_SOCKET_FILTER
);
3009 return PTR_ERR(prog
);
3012 return __tun_set_ebpf(tun
, prog_p
, prog
);
3015 static long __tun_chr_ioctl(struct file
*file
, unsigned int cmd
,
3016 unsigned long arg
, int ifreq_len
)
3018 struct tun_file
*tfile
= file
->private_data
;
3019 struct net
*net
= sock_net(&tfile
->sk
);
3020 struct tun_struct
*tun
;
3021 void __user
* argp
= (void __user
*)arg
;
3022 unsigned int ifindex
, carrier
;
3030 bool do_notify
= false;
3032 if (cmd
== TUNSETIFF
|| cmd
== TUNSETQUEUE
||
3033 (_IOC_TYPE(cmd
) == SOCK_IOC_TYPE
&& cmd
!= SIOCGSKNS
)) {
3034 if (copy_from_user(&ifr
, argp
, ifreq_len
))
3037 memset(&ifr
, 0, sizeof(ifr
));
3039 if (cmd
== TUNGETFEATURES
) {
3040 /* Currently this just means: "what IFF flags are valid?".
3041 * This is needed because we never checked for invalid flags on
3044 return put_user(IFF_TUN
| IFF_TAP
| TUN_FEATURES
,
3045 (unsigned int __user
*)argp
);
3046 } else if (cmd
== TUNSETQUEUE
) {
3047 return tun_set_queue(file
, &ifr
);
3048 } else if (cmd
== SIOCGSKNS
) {
3049 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
3051 return open_related_ns(&net
->ns
, get_net_ns
);
3057 tun
= tun_get(tfile
);
3058 if (cmd
== TUNSETIFF
) {
3063 ifr
.ifr_name
[IFNAMSIZ
-1] = '\0';
3065 ret
= tun_set_iff(net
, file
, &ifr
);
3070 if (copy_to_user(argp
, &ifr
, ifreq_len
))
3074 if (cmd
== TUNSETIFINDEX
) {
3080 if (copy_from_user(&ifindex
, argp
, sizeof(ifindex
)))
3084 tfile
->ifindex
= ifindex
;
3092 tun_debug(KERN_INFO
, tun
, "tun_chr_ioctl cmd %u\n", cmd
);
3097 tun_get_iff(current
->nsproxy
->net_ns
, tun
, &ifr
);
3099 if (tfile
->detached
)
3100 ifr
.ifr_flags
|= IFF_DETACH_QUEUE
;
3101 if (!tfile
->socket
.sk
->sk_filter
)
3102 ifr
.ifr_flags
|= IFF_NOFILTER
;
3104 if (copy_to_user(argp
, &ifr
, ifreq_len
))
3109 /* Disable/Enable checksum */
3111 /* [unimplemented] */
3112 tun_debug(KERN_INFO
, tun
, "ignored: set checksum %s\n",
3113 arg
? "disabled" : "enabled");
3117 /* Disable/Enable persist mode. Keep an extra reference to the
3118 * module to prevent the module being unprobed.
3120 if (arg
&& !(tun
->flags
& IFF_PERSIST
)) {
3121 tun
->flags
|= IFF_PERSIST
;
3122 __module_get(THIS_MODULE
);
3125 if (!arg
&& (tun
->flags
& IFF_PERSIST
)) {
3126 tun
->flags
&= ~IFF_PERSIST
;
3127 module_put(THIS_MODULE
);
3131 tun_debug(KERN_INFO
, tun
, "persist %s\n",
3132 arg
? "enabled" : "disabled");
3136 /* Set owner of the device */
3137 owner
= make_kuid(current_user_ns(), arg
);
3138 if (!uid_valid(owner
)) {
3144 tun_debug(KERN_INFO
, tun
, "owner set to %u\n",
3145 from_kuid(&init_user_ns
, tun
->owner
));
3149 /* Set group of the device */
3150 group
= make_kgid(current_user_ns(), arg
);
3151 if (!gid_valid(group
)) {
3157 tun_debug(KERN_INFO
, tun
, "group set to %u\n",
3158 from_kgid(&init_user_ns
, tun
->group
));
3162 /* Only allow setting the type when the interface is down */
3163 if (tun
->dev
->flags
& IFF_UP
) {
3164 tun_debug(KERN_INFO
, tun
,
3165 "Linktype set failed because interface is up\n");
3168 tun
->dev
->type
= (int) arg
;
3169 tun_debug(KERN_INFO
, tun
, "linktype set to %d\n",
3181 ret
= set_offload(tun
, arg
);
3184 case TUNSETTXFILTER
:
3185 /* Can be set only for TAPs */
3187 if ((tun
->flags
& TUN_TYPE_MASK
) != IFF_TAP
)
3189 ret
= update_filter(&tun
->txflt
, (void __user
*)arg
);
3193 /* Get hw address */
3194 memcpy(ifr
.ifr_hwaddr
.sa_data
, tun
->dev
->dev_addr
, ETH_ALEN
);
3195 ifr
.ifr_hwaddr
.sa_family
= tun
->dev
->type
;
3196 if (copy_to_user(argp
, &ifr
, ifreq_len
))
3201 /* Set hw address */
3202 tun_debug(KERN_DEBUG
, tun
, "set hw address: %pM\n",
3203 ifr
.ifr_hwaddr
.sa_data
);
3205 ret
= dev_set_mac_address(tun
->dev
, &ifr
.ifr_hwaddr
, NULL
);
3209 sndbuf
= tfile
->socket
.sk
->sk_sndbuf
;
3210 if (copy_to_user(argp
, &sndbuf
, sizeof(sndbuf
)))
3215 if (copy_from_user(&sndbuf
, argp
, sizeof(sndbuf
))) {
3224 tun
->sndbuf
= sndbuf
;
3225 tun_set_sndbuf(tun
);
3228 case TUNGETVNETHDRSZ
:
3229 vnet_hdr_sz
= tun
->vnet_hdr_sz
;
3230 if (copy_to_user(argp
, &vnet_hdr_sz
, sizeof(vnet_hdr_sz
)))
3234 case TUNSETVNETHDRSZ
:
3235 if (copy_from_user(&vnet_hdr_sz
, argp
, sizeof(vnet_hdr_sz
))) {
3239 if (vnet_hdr_sz
< (int)sizeof(struct virtio_net_hdr
)) {
3244 tun
->vnet_hdr_sz
= vnet_hdr_sz
;
3248 le
= !!(tun
->flags
& TUN_VNET_LE
);
3249 if (put_user(le
, (int __user
*)argp
))
3254 if (get_user(le
, (int __user
*)argp
)) {
3259 tun
->flags
|= TUN_VNET_LE
;
3261 tun
->flags
&= ~TUN_VNET_LE
;
3265 ret
= tun_get_vnet_be(tun
, argp
);
3269 ret
= tun_set_vnet_be(tun
, argp
);
3272 case TUNATTACHFILTER
:
3273 /* Can be set only for TAPs */
3275 if ((tun
->flags
& TUN_TYPE_MASK
) != IFF_TAP
)
3278 if (copy_from_user(&tun
->fprog
, argp
, sizeof(tun
->fprog
)))
3281 ret
= tun_attach_filter(tun
);
3284 case TUNDETACHFILTER
:
3285 /* Can be set only for TAPs */
3287 if ((tun
->flags
& TUN_TYPE_MASK
) != IFF_TAP
)
3290 tun_detach_filter(tun
, tun
->numqueues
);
3295 if ((tun
->flags
& TUN_TYPE_MASK
) != IFF_TAP
)
3298 if (copy_to_user(argp
, &tun
->fprog
, sizeof(tun
->fprog
)))
3303 case TUNSETSTEERINGEBPF
:
3304 ret
= tun_set_ebpf(tun
, &tun
->steering_prog
, argp
);
3307 case TUNSETFILTEREBPF
:
3308 ret
= tun_set_ebpf(tun
, &tun
->filter_prog
, argp
);
3313 if (copy_from_user(&carrier
, argp
, sizeof(carrier
)))
3316 ret
= tun_net_change_carrier(tun
->dev
, (bool)carrier
);
3325 netdev_state_change(tun
->dev
);
3334 static long tun_chr_ioctl(struct file
*file
,
3335 unsigned int cmd
, unsigned long arg
)
3337 return __tun_chr_ioctl(file
, cmd
, arg
, sizeof (struct ifreq
));
3340 #ifdef CONFIG_COMPAT
3341 static long tun_chr_compat_ioctl(struct file
*file
,
3342 unsigned int cmd
, unsigned long arg
)
3347 case TUNSETTXFILTER
:
3352 arg
= (unsigned long)compat_ptr(arg
);
3355 arg
= (compat_ulong_t
)arg
;
3360 * compat_ifreq is shorter than ifreq, so we must not access beyond
3361 * the end of that structure. All fields that are used in this
3362 * driver are compatible though, we don't need to convert the
3365 return __tun_chr_ioctl(file
, cmd
, arg
, sizeof(struct compat_ifreq
));
3367 #endif /* CONFIG_COMPAT */
3369 static int tun_chr_fasync(int fd
, struct file
*file
, int on
)
3371 struct tun_file
*tfile
= file
->private_data
;
3374 if ((ret
= fasync_helper(fd
, file
, on
, &tfile
->fasync
)) < 0)
3378 __f_setown(file
, task_pid(current
), PIDTYPE_TGID
, 0);
3379 tfile
->flags
|= TUN_FASYNC
;
3381 tfile
->flags
&= ~TUN_FASYNC
;
3387 static int tun_chr_open(struct inode
*inode
, struct file
* file
)
3389 struct net
*net
= current
->nsproxy
->net_ns
;
3390 struct tun_file
*tfile
;
3392 DBG1(KERN_INFO
, "tunX: tun_chr_open\n");
3394 tfile
= (struct tun_file
*)sk_alloc(net
, AF_UNSPEC
, GFP_KERNEL
,
3398 if (ptr_ring_init(&tfile
->tx_ring
, 0, GFP_KERNEL
)) {
3399 sk_free(&tfile
->sk
);
3403 mutex_init(&tfile
->napi_mutex
);
3404 RCU_INIT_POINTER(tfile
->tun
, NULL
);
3408 init_waitqueue_head(&tfile
->wq
.wait
);
3409 RCU_INIT_POINTER(tfile
->socket
.wq
, &tfile
->wq
);
3411 tfile
->socket
.file
= file
;
3412 tfile
->socket
.ops
= &tun_socket_ops
;
3414 sock_init_data(&tfile
->socket
, &tfile
->sk
);
3416 tfile
->sk
.sk_write_space
= tun_sock_write_space
;
3417 tfile
->sk
.sk_sndbuf
= INT_MAX
;
3419 file
->private_data
= tfile
;
3420 INIT_LIST_HEAD(&tfile
->next
);
3422 sock_set_flag(&tfile
->sk
, SOCK_ZEROCOPY
);
3427 static int tun_chr_close(struct inode
*inode
, struct file
*file
)
3429 struct tun_file
*tfile
= file
->private_data
;
3431 tun_detach(tfile
, true);
3436 #ifdef CONFIG_PROC_FS
3437 static void tun_chr_show_fdinfo(struct seq_file
*m
, struct file
*file
)
3439 struct tun_file
*tfile
= file
->private_data
;
3440 struct tun_struct
*tun
;
3443 memset(&ifr
, 0, sizeof(ifr
));
3446 tun
= tun_get(tfile
);
3448 tun_get_iff(current
->nsproxy
->net_ns
, tun
, &ifr
);
3454 seq_printf(m
, "iff:\t%s\n", ifr
.ifr_name
);
3458 static const struct file_operations tun_fops
= {
3459 .owner
= THIS_MODULE
,
3460 .llseek
= no_llseek
,
3461 .read_iter
= tun_chr_read_iter
,
3462 .write_iter
= tun_chr_write_iter
,
3463 .poll
= tun_chr_poll
,
3464 .unlocked_ioctl
= tun_chr_ioctl
,
3465 #ifdef CONFIG_COMPAT
3466 .compat_ioctl
= tun_chr_compat_ioctl
,
3468 .open
= tun_chr_open
,
3469 .release
= tun_chr_close
,
3470 .fasync
= tun_chr_fasync
,
3471 #ifdef CONFIG_PROC_FS
3472 .show_fdinfo
= tun_chr_show_fdinfo
,
3476 static struct miscdevice tun_miscdev
= {
3479 .nodename
= "net/tun",
3483 /* ethtool interface */
3485 static void tun_default_link_ksettings(struct net_device
*dev
,
3486 struct ethtool_link_ksettings
*cmd
)
3488 ethtool_link_ksettings_zero_link_mode(cmd
, supported
);
3489 ethtool_link_ksettings_zero_link_mode(cmd
, advertising
);
3490 cmd
->base
.speed
= SPEED_10
;
3491 cmd
->base
.duplex
= DUPLEX_FULL
;
3492 cmd
->base
.port
= PORT_TP
;
3493 cmd
->base
.phy_address
= 0;
3494 cmd
->base
.autoneg
= AUTONEG_DISABLE
;
3497 static int tun_get_link_ksettings(struct net_device
*dev
,
3498 struct ethtool_link_ksettings
*cmd
)
3500 struct tun_struct
*tun
= netdev_priv(dev
);
3502 memcpy(cmd
, &tun
->link_ksettings
, sizeof(*cmd
));
3506 static int tun_set_link_ksettings(struct net_device
*dev
,
3507 const struct ethtool_link_ksettings
*cmd
)
3509 struct tun_struct
*tun
= netdev_priv(dev
);
3511 memcpy(&tun
->link_ksettings
, cmd
, sizeof(*cmd
));
3515 static void tun_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
3517 struct tun_struct
*tun
= netdev_priv(dev
);
3519 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
3520 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
3522 switch (tun
->flags
& TUN_TYPE_MASK
) {
3524 strlcpy(info
->bus_info
, "tun", sizeof(info
->bus_info
));
3527 strlcpy(info
->bus_info
, "tap", sizeof(info
->bus_info
));
3532 static u32
tun_get_msglevel(struct net_device
*dev
)
3535 struct tun_struct
*tun
= netdev_priv(dev
);
3542 static void tun_set_msglevel(struct net_device
*dev
, u32 value
)
3545 struct tun_struct
*tun
= netdev_priv(dev
);
3550 static int tun_get_coalesce(struct net_device
*dev
,
3551 struct ethtool_coalesce
*ec
)
3553 struct tun_struct
*tun
= netdev_priv(dev
);
3555 ec
->rx_max_coalesced_frames
= tun
->rx_batched
;
3560 static int tun_set_coalesce(struct net_device
*dev
,
3561 struct ethtool_coalesce
*ec
)
3563 struct tun_struct
*tun
= netdev_priv(dev
);
3565 if (ec
->rx_max_coalesced_frames
> NAPI_POLL_WEIGHT
)
3566 tun
->rx_batched
= NAPI_POLL_WEIGHT
;
3568 tun
->rx_batched
= ec
->rx_max_coalesced_frames
;
3573 static const struct ethtool_ops tun_ethtool_ops
= {
3574 .get_drvinfo
= tun_get_drvinfo
,
3575 .get_msglevel
= tun_get_msglevel
,
3576 .set_msglevel
= tun_set_msglevel
,
3577 .get_link
= ethtool_op_get_link
,
3578 .get_ts_info
= ethtool_op_get_ts_info
,
3579 .get_coalesce
= tun_get_coalesce
,
3580 .set_coalesce
= tun_set_coalesce
,
3581 .get_link_ksettings
= tun_get_link_ksettings
,
3582 .set_link_ksettings
= tun_set_link_ksettings
,
3585 static int tun_queue_resize(struct tun_struct
*tun
)
3587 struct net_device
*dev
= tun
->dev
;
3588 struct tun_file
*tfile
;
3589 struct ptr_ring
**rings
;
3590 int n
= tun
->numqueues
+ tun
->numdisabled
;
3593 rings
= kmalloc_array(n
, sizeof(*rings
), GFP_KERNEL
);
3597 for (i
= 0; i
< tun
->numqueues
; i
++) {
3598 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
3599 rings
[i
] = &tfile
->tx_ring
;
3601 list_for_each_entry(tfile
, &tun
->disabled
, next
)
3602 rings
[i
++] = &tfile
->tx_ring
;
3604 ret
= ptr_ring_resize_multiple(rings
, n
,
3605 dev
->tx_queue_len
, GFP_KERNEL
,
3612 static int tun_device_event(struct notifier_block
*unused
,
3613 unsigned long event
, void *ptr
)
3615 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
3616 struct tun_struct
*tun
= netdev_priv(dev
);
3618 if (dev
->rtnl_link_ops
!= &tun_link_ops
)
3622 case NETDEV_CHANGE_TX_QUEUE_LEN
:
3623 if (tun_queue_resize(tun
))
3633 static struct notifier_block tun_notifier_block __read_mostly
= {
3634 .notifier_call
= tun_device_event
,
3637 static int __init
tun_init(void)
3641 pr_info("%s, %s\n", DRV_DESCRIPTION
, DRV_VERSION
);
3643 ret
= rtnl_link_register(&tun_link_ops
);
3645 pr_err("Can't register link_ops\n");
3649 ret
= misc_register(&tun_miscdev
);
3651 pr_err("Can't register misc device %d\n", TUN_MINOR
);
3655 ret
= register_netdevice_notifier(&tun_notifier_block
);
3657 pr_err("Can't register netdevice notifier\n");
3664 misc_deregister(&tun_miscdev
);
3666 rtnl_link_unregister(&tun_link_ops
);
3671 static void tun_cleanup(void)
3673 misc_deregister(&tun_miscdev
);
3674 rtnl_link_unregister(&tun_link_ops
);
3675 unregister_netdevice_notifier(&tun_notifier_block
);
3678 /* Get an underlying socket object from tun file. Returns error unless file is
3679 * attached to a device. The returned object works like a packet socket, it
3680 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
3681 * holding a reference to the file for as long as the socket is in use. */
3682 struct socket
*tun_get_socket(struct file
*file
)
3684 struct tun_file
*tfile
;
3685 if (file
->f_op
!= &tun_fops
)
3686 return ERR_PTR(-EINVAL
);
3687 tfile
= file
->private_data
;
3689 return ERR_PTR(-EBADFD
);
3690 return &tfile
->socket
;
3692 EXPORT_SYMBOL_GPL(tun_get_socket
);
3694 struct ptr_ring
*tun_get_tx_ring(struct file
*file
)
3696 struct tun_file
*tfile
;
3698 if (file
->f_op
!= &tun_fops
)
3699 return ERR_PTR(-EINVAL
);
3700 tfile
= file
->private_data
;
3702 return ERR_PTR(-EBADFD
);
3703 return &tfile
->tx_ring
;
3705 EXPORT_SYMBOL_GPL(tun_get_tx_ring
);
3707 module_init(tun_init
);
3708 module_exit(tun_cleanup
);
3709 MODULE_DESCRIPTION(DRV_DESCRIPTION
);
3710 MODULE_AUTHOR(DRV_COPYRIGHT
);
3711 MODULE_LICENSE("GPL");
3712 MODULE_ALIAS_MISCDEV(TUN_MINOR
);
3713 MODULE_ALIAS("devname:net/tun");