1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * TUN - Universal TUN/TAP device driver.
4 * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
6 * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
12 * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
13 * Add TUNSETLINK ioctl to set the link encapsulation
15 * Mark Smith <markzzzsmith@yahoo.com.au>
16 * Use eth_random_addr() for tap MAC address.
18 * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20
19 * Fixes in packet dropping, queue length setting and queue wakeup.
20 * Increased default tx queue length.
24 * Daniel Podlejski <underley@underley.eu.org>
25 * Modifications for 2.3.99-pre5 kernel.
28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 #define DRV_NAME "tun"
31 #define DRV_VERSION "1.6"
32 #define DRV_DESCRIPTION "Universal TUN/TAP device driver"
33 #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
35 #include <linux/module.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/sched/signal.h>
39 #include <linux/major.h>
40 #include <linux/slab.h>
41 #include <linux/poll.h>
42 #include <linux/fcntl.h>
43 #include <linux/init.h>
44 #include <linux/skbuff.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/miscdevice.h>
48 #include <linux/ethtool.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/compat.h>
52 #include <linux/if_arp.h>
53 #include <linux/if_ether.h>
54 #include <linux/if_tun.h>
55 #include <linux/if_vlan.h>
56 #include <linux/crc32.h>
57 #include <linux/nsproxy.h>
58 #include <linux/virtio_net.h>
59 #include <linux/rcupdate.h>
60 #include <net/net_namespace.h>
61 #include <net/netns/generic.h>
62 #include <net/rtnetlink.h>
65 #include <net/ip_tunnels.h>
66 #include <linux/seq_file.h>
67 #include <linux/uio.h>
68 #include <linux/skb_array.h>
69 #include <linux/bpf.h>
70 #include <linux/bpf_trace.h>
71 #include <linux/mutex.h>
73 #include <linux/uaccess.h>
74 #include <linux/proc_fs.h>
76 static void tun_default_link_ksettings(struct net_device
*dev
,
77 struct ethtool_link_ksettings
*cmd
);
79 #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
81 /* TUN device flags */
83 /* IFF_ATTACH_QUEUE is never stored in device flags,
84 * overload it to mean fasync when stored there.
86 #define TUN_FASYNC IFF_ATTACH_QUEUE
87 /* High bits in flags field are unused. */
88 #define TUN_VNET_LE 0x80000000
89 #define TUN_VNET_BE 0x40000000
91 #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
92 IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
94 #define GOODCOPY_LEN 128
96 #define FLT_EXACT_COUNT 8
98 unsigned int count
; /* Number of addrs. Zero means disabled */
99 u32 mask
[2]; /* Mask of the hashed addrs */
100 unsigned char addr
[FLT_EXACT_COUNT
][ETH_ALEN
];
103 /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
104 * to max number of VCPUs in guest. */
105 #define MAX_TAP_QUEUES 256
106 #define MAX_TAP_FLOWS 4096
108 #define TUN_FLOW_EXPIRE (3 * HZ)
110 struct tun_pcpu_stats
{
111 u64_stats_t rx_packets
;
112 u64_stats_t rx_bytes
;
113 u64_stats_t tx_packets
;
114 u64_stats_t tx_bytes
;
115 struct u64_stats_sync syncp
;
121 /* A tun_file connects an open character device to a tuntap netdevice. It
122 * also contains all socket related structures (except sock_fprog and tap_filter)
123 * to serve as one transmit queue for tuntap device. The sock_fprog and
124 * tap_filter were kept in tun_struct since they were used for filtering for the
125 * netdevice not for a specific queue (at least I didn't see the requirement for
129 * The tun_file and tun_struct are loosely coupled, the pointer from one to the
130 * other can only be read while rcu_read_lock or rtnl_lock is held.
134 struct socket socket
;
135 struct tun_struct __rcu
*tun
;
136 struct fasync_struct
*fasync
;
137 /* only used for fasnyc */
141 unsigned int ifindex
;
143 struct napi_struct napi
;
145 bool napi_frags_enabled
;
146 struct mutex napi_mutex
; /* Protects access to the above napi */
147 struct list_head next
;
148 struct tun_struct
*detached
;
149 struct ptr_ring tx_ring
;
150 struct xdp_rxq_info xdp_rxq
;
158 struct tun_flow_entry
{
159 struct hlist_node hash_link
;
161 struct tun_struct
*tun
;
166 unsigned long updated ____cacheline_aligned_in_smp
;
169 #define TUN_NUM_FLOW_ENTRIES 1024
170 #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
174 struct bpf_prog
*prog
;
177 /* Since the socket were moved to tun_file, to preserve the behavior of persist
178 * device, socket filter, sndbuf and vnet header size were restore when the
179 * file were attached to a persist device.
182 struct tun_file __rcu
*tfiles
[MAX_TAP_QUEUES
];
183 unsigned int numqueues
;
188 struct net_device
*dev
;
189 netdev_features_t set_features
;
190 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
196 struct tap_filter txflt
;
197 struct sock_fprog fprog
;
198 /* protected by rtnl lock */
199 bool filter_attached
;
202 struct hlist_head flows
[TUN_NUM_FLOW_ENTRIES
];
203 struct timer_list flow_gc_timer
;
204 unsigned long ageing_time
;
205 unsigned int numdisabled
;
206 struct list_head disabled
;
210 struct tun_pcpu_stats __percpu
*pcpu_stats
;
211 struct bpf_prog __rcu
*xdp_prog
;
212 struct tun_prog __rcu
*steering_prog
;
213 struct tun_prog __rcu
*filter_prog
;
214 struct ethtool_link_ksettings link_ksettings
;
222 bool tun_is_xdp_frame(void *ptr
)
224 return (unsigned long)ptr
& TUN_XDP_FLAG
;
226 EXPORT_SYMBOL(tun_is_xdp_frame
);
228 void *tun_xdp_to_ptr(void *ptr
)
230 return (void *)((unsigned long)ptr
| TUN_XDP_FLAG
);
232 EXPORT_SYMBOL(tun_xdp_to_ptr
);
234 void *tun_ptr_to_xdp(void *ptr
)
236 return (void *)((unsigned long)ptr
& ~TUN_XDP_FLAG
);
238 EXPORT_SYMBOL(tun_ptr_to_xdp
);
240 static int tun_napi_receive(struct napi_struct
*napi
, int budget
)
242 struct tun_file
*tfile
= container_of(napi
, struct tun_file
, napi
);
243 struct sk_buff_head
*queue
= &tfile
->sk
.sk_write_queue
;
244 struct sk_buff_head process_queue
;
248 __skb_queue_head_init(&process_queue
);
250 spin_lock(&queue
->lock
);
251 skb_queue_splice_tail_init(queue
, &process_queue
);
252 spin_unlock(&queue
->lock
);
254 while (received
< budget
&& (skb
= __skb_dequeue(&process_queue
))) {
255 napi_gro_receive(napi
, skb
);
259 if (!skb_queue_empty(&process_queue
)) {
260 spin_lock(&queue
->lock
);
261 skb_queue_splice(&process_queue
, queue
);
262 spin_unlock(&queue
->lock
);
268 static int tun_napi_poll(struct napi_struct
*napi
, int budget
)
270 unsigned int received
;
272 received
= tun_napi_receive(napi
, budget
);
274 if (received
< budget
)
275 napi_complete_done(napi
, received
);
280 static void tun_napi_init(struct tun_struct
*tun
, struct tun_file
*tfile
,
281 bool napi_en
, bool napi_frags
)
283 tfile
->napi_enabled
= napi_en
;
284 tfile
->napi_frags_enabled
= napi_en
&& napi_frags
;
286 netif_tx_napi_add(tun
->dev
, &tfile
->napi
, tun_napi_poll
,
288 napi_enable(&tfile
->napi
);
292 static void tun_napi_disable(struct tun_file
*tfile
)
294 if (tfile
->napi_enabled
)
295 napi_disable(&tfile
->napi
);
298 static void tun_napi_del(struct tun_file
*tfile
)
300 if (tfile
->napi_enabled
)
301 netif_napi_del(&tfile
->napi
);
304 static bool tun_napi_frags_enabled(const struct tun_file
*tfile
)
306 return tfile
->napi_frags_enabled
;
309 #ifdef CONFIG_TUN_VNET_CROSS_LE
310 static inline bool tun_legacy_is_little_endian(struct tun_struct
*tun
)
312 return tun
->flags
& TUN_VNET_BE
? false :
313 virtio_legacy_is_little_endian();
316 static long tun_get_vnet_be(struct tun_struct
*tun
, int __user
*argp
)
318 int be
= !!(tun
->flags
& TUN_VNET_BE
);
320 if (put_user(be
, argp
))
326 static long tun_set_vnet_be(struct tun_struct
*tun
, int __user
*argp
)
330 if (get_user(be
, argp
))
334 tun
->flags
|= TUN_VNET_BE
;
336 tun
->flags
&= ~TUN_VNET_BE
;
341 static inline bool tun_legacy_is_little_endian(struct tun_struct
*tun
)
343 return virtio_legacy_is_little_endian();
346 static long tun_get_vnet_be(struct tun_struct
*tun
, int __user
*argp
)
351 static long tun_set_vnet_be(struct tun_struct
*tun
, int __user
*argp
)
355 #endif /* CONFIG_TUN_VNET_CROSS_LE */
357 static inline bool tun_is_little_endian(struct tun_struct
*tun
)
359 return tun
->flags
& TUN_VNET_LE
||
360 tun_legacy_is_little_endian(tun
);
363 static inline u16
tun16_to_cpu(struct tun_struct
*tun
, __virtio16 val
)
365 return __virtio16_to_cpu(tun_is_little_endian(tun
), val
);
368 static inline __virtio16
cpu_to_tun16(struct tun_struct
*tun
, u16 val
)
370 return __cpu_to_virtio16(tun_is_little_endian(tun
), val
);
373 static inline u32
tun_hashfn(u32 rxhash
)
375 return rxhash
& TUN_MASK_FLOW_ENTRIES
;
378 static struct tun_flow_entry
*tun_flow_find(struct hlist_head
*head
, u32 rxhash
)
380 struct tun_flow_entry
*e
;
382 hlist_for_each_entry_rcu(e
, head
, hash_link
) {
383 if (e
->rxhash
== rxhash
)
389 static struct tun_flow_entry
*tun_flow_create(struct tun_struct
*tun
,
390 struct hlist_head
*head
,
391 u32 rxhash
, u16 queue_index
)
393 struct tun_flow_entry
*e
= kmalloc(sizeof(*e
), GFP_ATOMIC
);
396 netif_info(tun
, tx_queued
, tun
->dev
,
397 "create flow: hash %u index %u\n",
398 rxhash
, queue_index
);
399 e
->updated
= jiffies
;
402 e
->queue_index
= queue_index
;
404 hlist_add_head_rcu(&e
->hash_link
, head
);
410 static void tun_flow_delete(struct tun_struct
*tun
, struct tun_flow_entry
*e
)
412 netif_info(tun
, tx_queued
, tun
->dev
, "delete flow: hash %u index %u\n",
413 e
->rxhash
, e
->queue_index
);
414 hlist_del_rcu(&e
->hash_link
);
419 static void tun_flow_flush(struct tun_struct
*tun
)
423 spin_lock_bh(&tun
->lock
);
424 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++) {
425 struct tun_flow_entry
*e
;
426 struct hlist_node
*n
;
428 hlist_for_each_entry_safe(e
, n
, &tun
->flows
[i
], hash_link
)
429 tun_flow_delete(tun
, e
);
431 spin_unlock_bh(&tun
->lock
);
434 static void tun_flow_delete_by_queue(struct tun_struct
*tun
, u16 queue_index
)
438 spin_lock_bh(&tun
->lock
);
439 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++) {
440 struct tun_flow_entry
*e
;
441 struct hlist_node
*n
;
443 hlist_for_each_entry_safe(e
, n
, &tun
->flows
[i
], hash_link
) {
444 if (e
->queue_index
== queue_index
)
445 tun_flow_delete(tun
, e
);
448 spin_unlock_bh(&tun
->lock
);
451 static void tun_flow_cleanup(struct timer_list
*t
)
453 struct tun_struct
*tun
= from_timer(tun
, t
, flow_gc_timer
);
454 unsigned long delay
= tun
->ageing_time
;
455 unsigned long next_timer
= jiffies
+ delay
;
456 unsigned long count
= 0;
459 spin_lock(&tun
->lock
);
460 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++) {
461 struct tun_flow_entry
*e
;
462 struct hlist_node
*n
;
464 hlist_for_each_entry_safe(e
, n
, &tun
->flows
[i
], hash_link
) {
465 unsigned long this_timer
;
467 this_timer
= e
->updated
+ delay
;
468 if (time_before_eq(this_timer
, jiffies
)) {
469 tun_flow_delete(tun
, e
);
473 if (time_before(this_timer
, next_timer
))
474 next_timer
= this_timer
;
479 mod_timer(&tun
->flow_gc_timer
, round_jiffies_up(next_timer
));
480 spin_unlock(&tun
->lock
);
483 static void tun_flow_update(struct tun_struct
*tun
, u32 rxhash
,
484 struct tun_file
*tfile
)
486 struct hlist_head
*head
;
487 struct tun_flow_entry
*e
;
488 unsigned long delay
= tun
->ageing_time
;
489 u16 queue_index
= tfile
->queue_index
;
491 head
= &tun
->flows
[tun_hashfn(rxhash
)];
495 e
= tun_flow_find(head
, rxhash
);
497 /* TODO: keep queueing to old queue until it's empty? */
498 if (READ_ONCE(e
->queue_index
) != queue_index
)
499 WRITE_ONCE(e
->queue_index
, queue_index
);
500 if (e
->updated
!= jiffies
)
501 e
->updated
= jiffies
;
502 sock_rps_record_flow_hash(e
->rps_rxhash
);
504 spin_lock_bh(&tun
->lock
);
505 if (!tun_flow_find(head
, rxhash
) &&
506 tun
->flow_count
< MAX_TAP_FLOWS
)
507 tun_flow_create(tun
, head
, rxhash
, queue_index
);
509 if (!timer_pending(&tun
->flow_gc_timer
))
510 mod_timer(&tun
->flow_gc_timer
,
511 round_jiffies_up(jiffies
+ delay
));
512 spin_unlock_bh(&tun
->lock
);
518 /* Save the hash received in the stack receive path and update the
519 * flow_hash table accordingly.
521 static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry
*e
, u32 hash
)
523 if (unlikely(e
->rps_rxhash
!= hash
))
524 e
->rps_rxhash
= hash
;
527 /* We try to identify a flow through its rxhash. The reason that
528 * we do not check rxq no. is because some cards(e.g 82599), chooses
529 * the rxq based on the txq where the last packet of the flow comes. As
530 * the userspace application move between processors, we may get a
531 * different rxq no. here.
533 static u16
tun_automq_select_queue(struct tun_struct
*tun
, struct sk_buff
*skb
)
535 struct tun_flow_entry
*e
;
539 numqueues
= READ_ONCE(tun
->numqueues
);
541 txq
= __skb_get_hash_symmetric(skb
);
542 e
= tun_flow_find(&tun
->flows
[tun_hashfn(txq
)], txq
);
544 tun_flow_save_rps_rxhash(e
, txq
);
545 txq
= e
->queue_index
;
547 /* use multiply and shift instead of expensive divide */
548 txq
= ((u64
)txq
* numqueues
) >> 32;
554 static u16
tun_ebpf_select_queue(struct tun_struct
*tun
, struct sk_buff
*skb
)
556 struct tun_prog
*prog
;
560 numqueues
= READ_ONCE(tun
->numqueues
);
564 prog
= rcu_dereference(tun
->steering_prog
);
566 ret
= bpf_prog_run_clear_cb(prog
->prog
, skb
);
568 return ret
% numqueues
;
571 static u16
tun_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
572 struct net_device
*sb_dev
)
574 struct tun_struct
*tun
= netdev_priv(dev
);
578 if (rcu_dereference(tun
->steering_prog
))
579 ret
= tun_ebpf_select_queue(tun
, skb
);
581 ret
= tun_automq_select_queue(tun
, skb
);
587 static inline bool tun_not_capable(struct tun_struct
*tun
)
589 const struct cred
*cred
= current_cred();
590 struct net
*net
= dev_net(tun
->dev
);
592 return ((uid_valid(tun
->owner
) && !uid_eq(cred
->euid
, tun
->owner
)) ||
593 (gid_valid(tun
->group
) && !in_egroup_p(tun
->group
))) &&
594 !ns_capable(net
->user_ns
, CAP_NET_ADMIN
);
597 static void tun_set_real_num_queues(struct tun_struct
*tun
)
599 netif_set_real_num_tx_queues(tun
->dev
, tun
->numqueues
);
600 netif_set_real_num_rx_queues(tun
->dev
, tun
->numqueues
);
603 static void tun_disable_queue(struct tun_struct
*tun
, struct tun_file
*tfile
)
605 tfile
->detached
= tun
;
606 list_add_tail(&tfile
->next
, &tun
->disabled
);
610 static struct tun_struct
*tun_enable_queue(struct tun_file
*tfile
)
612 struct tun_struct
*tun
= tfile
->detached
;
614 tfile
->detached
= NULL
;
615 list_del_init(&tfile
->next
);
620 void tun_ptr_free(void *ptr
)
624 if (tun_is_xdp_frame(ptr
)) {
625 struct xdp_frame
*xdpf
= tun_ptr_to_xdp(ptr
);
627 xdp_return_frame(xdpf
);
629 __skb_array_destroy_skb(ptr
);
632 EXPORT_SYMBOL_GPL(tun_ptr_free
);
634 static void tun_queue_purge(struct tun_file
*tfile
)
638 while ((ptr
= ptr_ring_consume(&tfile
->tx_ring
)) != NULL
)
641 skb_queue_purge(&tfile
->sk
.sk_write_queue
);
642 skb_queue_purge(&tfile
->sk
.sk_error_queue
);
645 static void __tun_detach(struct tun_file
*tfile
, bool clean
)
647 struct tun_file
*ntfile
;
648 struct tun_struct
*tun
;
650 tun
= rtnl_dereference(tfile
->tun
);
653 tun_napi_disable(tfile
);
657 if (tun
&& !tfile
->detached
) {
658 u16 index
= tfile
->queue_index
;
659 BUG_ON(index
>= tun
->numqueues
);
661 rcu_assign_pointer(tun
->tfiles
[index
],
662 tun
->tfiles
[tun
->numqueues
- 1]);
663 ntfile
= rtnl_dereference(tun
->tfiles
[index
]);
664 ntfile
->queue_index
= index
;
665 rcu_assign_pointer(tun
->tfiles
[tun
->numqueues
- 1],
670 RCU_INIT_POINTER(tfile
->tun
, NULL
);
671 sock_put(&tfile
->sk
);
673 tun_disable_queue(tun
, tfile
);
676 tun_flow_delete_by_queue(tun
, tun
->numqueues
+ 1);
677 /* Drop read queue */
678 tun_queue_purge(tfile
);
679 tun_set_real_num_queues(tun
);
680 } else if (tfile
->detached
&& clean
) {
681 tun
= tun_enable_queue(tfile
);
682 sock_put(&tfile
->sk
);
686 if (tun
&& tun
->numqueues
== 0 && tun
->numdisabled
== 0) {
687 netif_carrier_off(tun
->dev
);
689 if (!(tun
->flags
& IFF_PERSIST
) &&
690 tun
->dev
->reg_state
== NETREG_REGISTERED
)
691 unregister_netdevice(tun
->dev
);
694 xdp_rxq_info_unreg(&tfile
->xdp_rxq
);
695 ptr_ring_cleanup(&tfile
->tx_ring
, tun_ptr_free
);
696 sock_put(&tfile
->sk
);
700 static void tun_detach(struct tun_file
*tfile
, bool clean
)
702 struct tun_struct
*tun
;
703 struct net_device
*dev
;
706 tun
= rtnl_dereference(tfile
->tun
);
707 dev
= tun
? tun
->dev
: NULL
;
708 __tun_detach(tfile
, clean
);
710 netdev_state_change(dev
);
714 static void tun_detach_all(struct net_device
*dev
)
716 struct tun_struct
*tun
= netdev_priv(dev
);
717 struct tun_file
*tfile
, *tmp
;
718 int i
, n
= tun
->numqueues
;
720 for (i
= 0; i
< n
; i
++) {
721 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
723 tun_napi_disable(tfile
);
724 tfile
->socket
.sk
->sk_shutdown
= RCV_SHUTDOWN
;
725 tfile
->socket
.sk
->sk_data_ready(tfile
->socket
.sk
);
726 RCU_INIT_POINTER(tfile
->tun
, NULL
);
729 list_for_each_entry(tfile
, &tun
->disabled
, next
) {
730 tfile
->socket
.sk
->sk_shutdown
= RCV_SHUTDOWN
;
731 tfile
->socket
.sk
->sk_data_ready(tfile
->socket
.sk
);
732 RCU_INIT_POINTER(tfile
->tun
, NULL
);
734 BUG_ON(tun
->numqueues
!= 0);
737 for (i
= 0; i
< n
; i
++) {
738 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
740 /* Drop read queue */
741 tun_queue_purge(tfile
);
742 xdp_rxq_info_unreg(&tfile
->xdp_rxq
);
743 sock_put(&tfile
->sk
);
745 list_for_each_entry_safe(tfile
, tmp
, &tun
->disabled
, next
) {
746 tun_enable_queue(tfile
);
747 tun_queue_purge(tfile
);
748 xdp_rxq_info_unreg(&tfile
->xdp_rxq
);
749 sock_put(&tfile
->sk
);
751 BUG_ON(tun
->numdisabled
!= 0);
753 if (tun
->flags
& IFF_PERSIST
)
754 module_put(THIS_MODULE
);
757 static int tun_attach(struct tun_struct
*tun
, struct file
*file
,
758 bool skip_filter
, bool napi
, bool napi_frags
,
761 struct tun_file
*tfile
= file
->private_data
;
762 struct net_device
*dev
= tun
->dev
;
765 err
= security_tun_dev_attach(tfile
->socket
.sk
, tun
->security
);
770 if (rtnl_dereference(tfile
->tun
) && !tfile
->detached
)
774 if (!(tun
->flags
& IFF_MULTI_QUEUE
) && tun
->numqueues
== 1)
778 if (!tfile
->detached
&&
779 tun
->numqueues
+ tun
->numdisabled
== MAX_TAP_QUEUES
)
784 /* Re-attach the filter to persist device */
785 if (!skip_filter
&& (tun
->filter_attached
== true)) {
786 lock_sock(tfile
->socket
.sk
);
787 err
= sk_attach_filter(&tun
->fprog
, tfile
->socket
.sk
);
788 release_sock(tfile
->socket
.sk
);
793 if (!tfile
->detached
&&
794 ptr_ring_resize(&tfile
->tx_ring
, dev
->tx_queue_len
,
795 GFP_KERNEL
, tun_ptr_free
)) {
800 tfile
->queue_index
= tun
->numqueues
;
801 tfile
->socket
.sk
->sk_shutdown
&= ~RCV_SHUTDOWN
;
803 if (tfile
->detached
) {
804 /* Re-attach detached tfile, updating XDP queue_index */
805 WARN_ON(!xdp_rxq_info_is_reg(&tfile
->xdp_rxq
));
807 if (tfile
->xdp_rxq
.queue_index
!= tfile
->queue_index
)
808 tfile
->xdp_rxq
.queue_index
= tfile
->queue_index
;
810 /* Setup XDP RX-queue info, for new tfile getting attached */
811 err
= xdp_rxq_info_reg(&tfile
->xdp_rxq
,
812 tun
->dev
, tfile
->queue_index
);
815 err
= xdp_rxq_info_reg_mem_model(&tfile
->xdp_rxq
,
816 MEM_TYPE_PAGE_SHARED
, NULL
);
818 xdp_rxq_info_unreg(&tfile
->xdp_rxq
);
824 if (tfile
->detached
) {
825 tun_enable_queue(tfile
);
827 sock_hold(&tfile
->sk
);
828 tun_napi_init(tun
, tfile
, napi
, napi_frags
);
831 if (rtnl_dereference(tun
->xdp_prog
))
832 sock_set_flag(&tfile
->sk
, SOCK_XDP
);
834 /* device is allowed to go away first, so no need to hold extra
838 /* Publish tfile->tun and tun->tfiles only after we've fully
839 * initialized tfile; otherwise we risk using half-initialized
843 rcu_assign_pointer(tfile
->tun
, tun
);
844 rcu_assign_pointer(tun
->tfiles
[tun
->numqueues
], tfile
);
846 tun_set_real_num_queues(tun
);
851 static struct tun_struct
*tun_get(struct tun_file
*tfile
)
853 struct tun_struct
*tun
;
856 tun
= rcu_dereference(tfile
->tun
);
864 static void tun_put(struct tun_struct
*tun
)
870 static void addr_hash_set(u32
*mask
, const u8
*addr
)
872 int n
= ether_crc(ETH_ALEN
, addr
) >> 26;
873 mask
[n
>> 5] |= (1 << (n
& 31));
876 static unsigned int addr_hash_test(const u32
*mask
, const u8
*addr
)
878 int n
= ether_crc(ETH_ALEN
, addr
) >> 26;
879 return mask
[n
>> 5] & (1 << (n
& 31));
882 static int update_filter(struct tap_filter
*filter
, void __user
*arg
)
884 struct { u8 u
[ETH_ALEN
]; } *addr
;
885 struct tun_filter uf
;
886 int err
, alen
, n
, nexact
;
888 if (copy_from_user(&uf
, arg
, sizeof(uf
)))
897 alen
= ETH_ALEN
* uf
.count
;
898 addr
= memdup_user(arg
+ sizeof(uf
), alen
);
900 return PTR_ERR(addr
);
902 /* The filter is updated without holding any locks. Which is
903 * perfectly safe. We disable it first and in the worst
904 * case we'll accept a few undesired packets. */
908 /* Use first set of addresses as an exact filter */
909 for (n
= 0; n
< uf
.count
&& n
< FLT_EXACT_COUNT
; n
++)
910 memcpy(filter
->addr
[n
], addr
[n
].u
, ETH_ALEN
);
914 /* Remaining multicast addresses are hashed,
915 * unicast will leave the filter disabled. */
916 memset(filter
->mask
, 0, sizeof(filter
->mask
));
917 for (; n
< uf
.count
; n
++) {
918 if (!is_multicast_ether_addr(addr
[n
].u
)) {
919 err
= 0; /* no filter */
922 addr_hash_set(filter
->mask
, addr
[n
].u
);
925 /* For ALLMULTI just set the mask to all ones.
926 * This overrides the mask populated above. */
927 if ((uf
.flags
& TUN_FLT_ALLMULTI
))
928 memset(filter
->mask
, ~0, sizeof(filter
->mask
));
930 /* Now enable the filter */
932 filter
->count
= nexact
;
934 /* Return the number of exact filters */
941 /* Returns: 0 - drop, !=0 - accept */
942 static int run_filter(struct tap_filter
*filter
, const struct sk_buff
*skb
)
944 /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
946 struct ethhdr
*eh
= (struct ethhdr
*) skb
->data
;
950 for (i
= 0; i
< filter
->count
; i
++)
951 if (ether_addr_equal(eh
->h_dest
, filter
->addr
[i
]))
954 /* Inexact match (multicast only) */
955 if (is_multicast_ether_addr(eh
->h_dest
))
956 return addr_hash_test(filter
->mask
, eh
->h_dest
);
962 * Checks whether the packet is accepted or not.
963 * Returns: 0 - drop, !=0 - accept
965 static int check_filter(struct tap_filter
*filter
, const struct sk_buff
*skb
)
970 return run_filter(filter
, skb
);
973 /* Network device part of the driver */
975 static const struct ethtool_ops tun_ethtool_ops
;
977 /* Net device detach from fd. */
978 static void tun_net_uninit(struct net_device
*dev
)
983 /* Net device open. */
984 static int tun_net_open(struct net_device
*dev
)
986 netif_tx_start_all_queues(dev
);
991 /* Net device close. */
992 static int tun_net_close(struct net_device
*dev
)
994 netif_tx_stop_all_queues(dev
);
998 /* Net device start xmit */
999 static void tun_automq_xmit(struct tun_struct
*tun
, struct sk_buff
*skb
)
1002 if (tun
->numqueues
== 1 && static_branch_unlikely(&rps_needed
)) {
1003 /* Select queue was not called for the skbuff, so we extract the
1004 * RPS hash and save it into the flow_table here.
1006 struct tun_flow_entry
*e
;
1009 rxhash
= __skb_get_hash_symmetric(skb
);
1010 e
= tun_flow_find(&tun
->flows
[tun_hashfn(rxhash
)], rxhash
);
1012 tun_flow_save_rps_rxhash(e
, rxhash
);
1017 static unsigned int run_ebpf_filter(struct tun_struct
*tun
,
1018 struct sk_buff
*skb
,
1021 struct tun_prog
*prog
= rcu_dereference(tun
->filter_prog
);
1024 len
= bpf_prog_run_clear_cb(prog
->prog
, skb
);
1029 /* Net device start xmit */
1030 static netdev_tx_t
tun_net_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1032 struct tun_struct
*tun
= netdev_priv(dev
);
1033 int txq
= skb
->queue_mapping
;
1034 struct tun_file
*tfile
;
1038 tfile
= rcu_dereference(tun
->tfiles
[txq
]);
1040 /* Drop packet if interface is not attached */
1044 if (!rcu_dereference(tun
->steering_prog
))
1045 tun_automq_xmit(tun
, skb
);
1047 netif_info(tun
, tx_queued
, tun
->dev
, "%s %d\n", __func__
, skb
->len
);
1049 /* Drop if the filter does not like it.
1050 * This is a noop if the filter is disabled.
1051 * Filter can be enabled only for the TAP devices. */
1052 if (!check_filter(&tun
->txflt
, skb
))
1055 if (tfile
->socket
.sk
->sk_filter
&&
1056 sk_filter(tfile
->socket
.sk
, skb
))
1059 len
= run_ebpf_filter(tun
, skb
, len
);
1060 if (len
== 0 || pskb_trim(skb
, len
))
1063 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
1066 skb_tx_timestamp(skb
);
1068 /* Orphan the skb - required as we might hang on to it
1069 * for indefinite time.
1075 if (ptr_ring_produce(&tfile
->tx_ring
, skb
))
1078 /* Notify and wake up reader process */
1079 if (tfile
->flags
& TUN_FASYNC
)
1080 kill_fasync(&tfile
->fasync
, SIGIO
, POLL_IN
);
1081 tfile
->socket
.sk
->sk_data_ready(tfile
->socket
.sk
);
1084 return NETDEV_TX_OK
;
1087 this_cpu_inc(tun
->pcpu_stats
->tx_dropped
);
1091 return NET_XMIT_DROP
;
1094 static void tun_net_mclist(struct net_device
*dev
)
1097 * This callback is supposed to deal with mc filter in
1098 * _rx_ path and has nothing to do with the _tx_ path.
1099 * In rx path we always accept everything userspace gives us.
1103 static netdev_features_t
tun_net_fix_features(struct net_device
*dev
,
1104 netdev_features_t features
)
1106 struct tun_struct
*tun
= netdev_priv(dev
);
1108 return (features
& tun
->set_features
) | (features
& ~TUN_USER_FEATURES
);
1111 static void tun_set_headroom(struct net_device
*dev
, int new_hr
)
1113 struct tun_struct
*tun
= netdev_priv(dev
);
1115 if (new_hr
< NET_SKB_PAD
)
1116 new_hr
= NET_SKB_PAD
;
1118 tun
->align
= new_hr
;
1122 tun_net_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
1124 u32 rx_dropped
= 0, tx_dropped
= 0, rx_frame_errors
= 0;
1125 struct tun_struct
*tun
= netdev_priv(dev
);
1126 struct tun_pcpu_stats
*p
;
1129 for_each_possible_cpu(i
) {
1130 u64 rxpackets
, rxbytes
, txpackets
, txbytes
;
1133 p
= per_cpu_ptr(tun
->pcpu_stats
, i
);
1135 start
= u64_stats_fetch_begin(&p
->syncp
);
1136 rxpackets
= u64_stats_read(&p
->rx_packets
);
1137 rxbytes
= u64_stats_read(&p
->rx_bytes
);
1138 txpackets
= u64_stats_read(&p
->tx_packets
);
1139 txbytes
= u64_stats_read(&p
->tx_bytes
);
1140 } while (u64_stats_fetch_retry(&p
->syncp
, start
));
1142 stats
->rx_packets
+= rxpackets
;
1143 stats
->rx_bytes
+= rxbytes
;
1144 stats
->tx_packets
+= txpackets
;
1145 stats
->tx_bytes
+= txbytes
;
1148 rx_dropped
+= p
->rx_dropped
;
1149 rx_frame_errors
+= p
->rx_frame_errors
;
1150 tx_dropped
+= p
->tx_dropped
;
1152 stats
->rx_dropped
= rx_dropped
;
1153 stats
->rx_frame_errors
= rx_frame_errors
;
1154 stats
->tx_dropped
= tx_dropped
;
1157 static int tun_xdp_set(struct net_device
*dev
, struct bpf_prog
*prog
,
1158 struct netlink_ext_ack
*extack
)
1160 struct tun_struct
*tun
= netdev_priv(dev
);
1161 struct tun_file
*tfile
;
1162 struct bpf_prog
*old_prog
;
1165 old_prog
= rtnl_dereference(tun
->xdp_prog
);
1166 rcu_assign_pointer(tun
->xdp_prog
, prog
);
1168 bpf_prog_put(old_prog
);
1170 for (i
= 0; i
< tun
->numqueues
; i
++) {
1171 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
1173 sock_set_flag(&tfile
->sk
, SOCK_XDP
);
1175 sock_reset_flag(&tfile
->sk
, SOCK_XDP
);
1177 list_for_each_entry(tfile
, &tun
->disabled
, next
) {
1179 sock_set_flag(&tfile
->sk
, SOCK_XDP
);
1181 sock_reset_flag(&tfile
->sk
, SOCK_XDP
);
1187 static int tun_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
1189 switch (xdp
->command
) {
1190 case XDP_SETUP_PROG
:
1191 return tun_xdp_set(dev
, xdp
->prog
, xdp
->extack
);
1197 static int tun_net_change_carrier(struct net_device
*dev
, bool new_carrier
)
1200 struct tun_struct
*tun
= netdev_priv(dev
);
1202 if (!tun
->numqueues
)
1205 netif_carrier_on(dev
);
1207 netif_carrier_off(dev
);
1212 static const struct net_device_ops tun_netdev_ops
= {
1213 .ndo_uninit
= tun_net_uninit
,
1214 .ndo_open
= tun_net_open
,
1215 .ndo_stop
= tun_net_close
,
1216 .ndo_start_xmit
= tun_net_xmit
,
1217 .ndo_fix_features
= tun_net_fix_features
,
1218 .ndo_select_queue
= tun_select_queue
,
1219 .ndo_set_rx_headroom
= tun_set_headroom
,
1220 .ndo_get_stats64
= tun_net_get_stats64
,
1221 .ndo_change_carrier
= tun_net_change_carrier
,
1224 static void __tun_xdp_flush_tfile(struct tun_file
*tfile
)
1226 /* Notify and wake up reader process */
1227 if (tfile
->flags
& TUN_FASYNC
)
1228 kill_fasync(&tfile
->fasync
, SIGIO
, POLL_IN
);
1229 tfile
->socket
.sk
->sk_data_ready(tfile
->socket
.sk
);
1232 static int tun_xdp_xmit(struct net_device
*dev
, int n
,
1233 struct xdp_frame
**frames
, u32 flags
)
1235 struct tun_struct
*tun
= netdev_priv(dev
);
1236 struct tun_file
*tfile
;
1242 if (unlikely(flags
& ~XDP_XMIT_FLAGS_MASK
))
1248 numqueues
= READ_ONCE(tun
->numqueues
);
1251 return -ENXIO
; /* Caller will free/return all frames */
1254 tfile
= rcu_dereference(tun
->tfiles
[smp_processor_id() %
1256 if (unlikely(!tfile
))
1259 spin_lock(&tfile
->tx_ring
.producer_lock
);
1260 for (i
= 0; i
< n
; i
++) {
1261 struct xdp_frame
*xdp
= frames
[i
];
1262 /* Encode the XDP flag into lowest bit for consumer to differ
1263 * XDP buffer from sk_buff.
1265 void *frame
= tun_xdp_to_ptr(xdp
);
1267 if (__ptr_ring_produce(&tfile
->tx_ring
, frame
)) {
1268 this_cpu_inc(tun
->pcpu_stats
->tx_dropped
);
1269 xdp_return_frame_rx_napi(xdp
);
1273 spin_unlock(&tfile
->tx_ring
.producer_lock
);
1275 if (flags
& XDP_XMIT_FLUSH
)
1276 __tun_xdp_flush_tfile(tfile
);
1282 static int tun_xdp_tx(struct net_device
*dev
, struct xdp_buff
*xdp
)
1284 struct xdp_frame
*frame
= xdp_convert_buff_to_frame(xdp
);
1286 if (unlikely(!frame
))
1289 return tun_xdp_xmit(dev
, 1, &frame
, XDP_XMIT_FLUSH
);
1292 static const struct net_device_ops tap_netdev_ops
= {
1293 .ndo_uninit
= tun_net_uninit
,
1294 .ndo_open
= tun_net_open
,
1295 .ndo_stop
= tun_net_close
,
1296 .ndo_start_xmit
= tun_net_xmit
,
1297 .ndo_fix_features
= tun_net_fix_features
,
1298 .ndo_set_rx_mode
= tun_net_mclist
,
1299 .ndo_set_mac_address
= eth_mac_addr
,
1300 .ndo_validate_addr
= eth_validate_addr
,
1301 .ndo_select_queue
= tun_select_queue
,
1302 .ndo_features_check
= passthru_features_check
,
1303 .ndo_set_rx_headroom
= tun_set_headroom
,
1304 .ndo_get_stats64
= tun_net_get_stats64
,
1306 .ndo_xdp_xmit
= tun_xdp_xmit
,
1307 .ndo_change_carrier
= tun_net_change_carrier
,
1310 static void tun_flow_init(struct tun_struct
*tun
)
1314 for (i
= 0; i
< TUN_NUM_FLOW_ENTRIES
; i
++)
1315 INIT_HLIST_HEAD(&tun
->flows
[i
]);
1317 tun
->ageing_time
= TUN_FLOW_EXPIRE
;
1318 timer_setup(&tun
->flow_gc_timer
, tun_flow_cleanup
, 0);
1319 mod_timer(&tun
->flow_gc_timer
,
1320 round_jiffies_up(jiffies
+ tun
->ageing_time
));
1323 static void tun_flow_uninit(struct tun_struct
*tun
)
1325 del_timer_sync(&tun
->flow_gc_timer
);
1326 tun_flow_flush(tun
);
1330 #define MAX_MTU 65535
1332 /* Initialize net device. */
1333 static void tun_net_init(struct net_device
*dev
)
1335 struct tun_struct
*tun
= netdev_priv(dev
);
1337 switch (tun
->flags
& TUN_TYPE_MASK
) {
1339 dev
->netdev_ops
= &tun_netdev_ops
;
1340 dev
->header_ops
= &ip_tunnel_header_ops
;
1342 /* Point-to-Point TUN Device */
1343 dev
->hard_header_len
= 0;
1347 /* Zero header length */
1348 dev
->type
= ARPHRD_NONE
;
1349 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
| IFF_MULTICAST
;
1353 dev
->netdev_ops
= &tap_netdev_ops
;
1354 /* Ethernet TAP Device */
1356 dev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
1357 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1359 eth_hw_addr_random(dev
);
1364 dev
->min_mtu
= MIN_MTU
;
1365 dev
->max_mtu
= MAX_MTU
- dev
->hard_header_len
;
1368 static bool tun_sock_writeable(struct tun_struct
*tun
, struct tun_file
*tfile
)
1370 struct sock
*sk
= tfile
->socket
.sk
;
1372 return (tun
->dev
->flags
& IFF_UP
) && sock_writeable(sk
);
1375 /* Character device part */
1378 static __poll_t
tun_chr_poll(struct file
*file
, poll_table
*wait
)
1380 struct tun_file
*tfile
= file
->private_data
;
1381 struct tun_struct
*tun
= tun_get(tfile
);
1388 sk
= tfile
->socket
.sk
;
1390 poll_wait(file
, sk_sleep(sk
), wait
);
1392 if (!ptr_ring_empty(&tfile
->tx_ring
))
1393 mask
|= EPOLLIN
| EPOLLRDNORM
;
1395 /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
1396 * guarantee EPOLLOUT to be raised by either here or
1397 * tun_sock_write_space(). Then process could get notification
1398 * after it writes to a down device and meets -EIO.
1400 if (tun_sock_writeable(tun
, tfile
) ||
1401 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
) &&
1402 tun_sock_writeable(tun
, tfile
)))
1403 mask
|= EPOLLOUT
| EPOLLWRNORM
;
1405 if (tun
->dev
->reg_state
!= NETREG_REGISTERED
)
1412 static struct sk_buff
*tun_napi_alloc_frags(struct tun_file
*tfile
,
1414 const struct iov_iter
*it
)
1416 struct sk_buff
*skb
;
1421 if (it
->nr_segs
> MAX_SKB_FRAGS
+ 1)
1422 return ERR_PTR(-ENOMEM
);
1425 skb
= napi_get_frags(&tfile
->napi
);
1428 return ERR_PTR(-ENOMEM
);
1430 linear
= iov_iter_single_seg_count(it
);
1431 err
= __skb_grow(skb
, linear
);
1436 skb
->data_len
= len
- linear
;
1437 skb
->truesize
+= skb
->data_len
;
1439 for (i
= 1; i
< it
->nr_segs
; i
++) {
1440 size_t fragsz
= it
->iov
[i
].iov_len
;
1444 if (fragsz
== 0 || fragsz
> PAGE_SIZE
) {
1448 frag
= netdev_alloc_frag(fragsz
);
1453 page
= virt_to_head_page(frag
);
1454 skb_fill_page_desc(skb
, i
- 1, page
,
1455 frag
- page_address(page
), fragsz
);
1460 /* frees skb and all frags allocated with napi_alloc_frag() */
1461 napi_free_frags(&tfile
->napi
);
1462 return ERR_PTR(err
);
1465 /* prepad is the amount to reserve at front. len is length after that.
1466 * linear is a hint as to how much to copy (usually headers). */
1467 static struct sk_buff
*tun_alloc_skb(struct tun_file
*tfile
,
1468 size_t prepad
, size_t len
,
1469 size_t linear
, int noblock
)
1471 struct sock
*sk
= tfile
->socket
.sk
;
1472 struct sk_buff
*skb
;
1475 /* Under a page? Don't bother with paged skb. */
1476 if (prepad
+ len
< PAGE_SIZE
|| !linear
)
1479 skb
= sock_alloc_send_pskb(sk
, prepad
+ linear
, len
- linear
, noblock
,
1482 return ERR_PTR(err
);
1484 skb_reserve(skb
, prepad
);
1485 skb_put(skb
, linear
);
1486 skb
->data_len
= len
- linear
;
1487 skb
->len
+= len
- linear
;
1492 static void tun_rx_batched(struct tun_struct
*tun
, struct tun_file
*tfile
,
1493 struct sk_buff
*skb
, int more
)
1495 struct sk_buff_head
*queue
= &tfile
->sk
.sk_write_queue
;
1496 struct sk_buff_head process_queue
;
1497 u32 rx_batched
= tun
->rx_batched
;
1500 if (!rx_batched
|| (!more
&& skb_queue_empty(queue
))) {
1502 skb_record_rx_queue(skb
, tfile
->queue_index
);
1503 netif_receive_skb(skb
);
1508 spin_lock(&queue
->lock
);
1509 if (!more
|| skb_queue_len(queue
) == rx_batched
) {
1510 __skb_queue_head_init(&process_queue
);
1511 skb_queue_splice_tail_init(queue
, &process_queue
);
1514 __skb_queue_tail(queue
, skb
);
1516 spin_unlock(&queue
->lock
);
1519 struct sk_buff
*nskb
;
1522 while ((nskb
= __skb_dequeue(&process_queue
))) {
1523 skb_record_rx_queue(nskb
, tfile
->queue_index
);
1524 netif_receive_skb(nskb
);
1526 skb_record_rx_queue(skb
, tfile
->queue_index
);
1527 netif_receive_skb(skb
);
1532 static bool tun_can_build_skb(struct tun_struct
*tun
, struct tun_file
*tfile
,
1533 int len
, int noblock
, bool zerocopy
)
1535 if ((tun
->flags
& TUN_TYPE_MASK
) != IFF_TAP
)
1538 if (tfile
->socket
.sk
->sk_sndbuf
!= INT_MAX
)
1547 if (SKB_DATA_ALIGN(len
+ TUN_RX_PAD
) +
1548 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)) > PAGE_SIZE
)
1554 static struct sk_buff
*__tun_build_skb(struct tun_file
*tfile
,
1555 struct page_frag
*alloc_frag
, char *buf
,
1556 int buflen
, int len
, int pad
)
1558 struct sk_buff
*skb
= build_skb(buf
, buflen
);
1561 return ERR_PTR(-ENOMEM
);
1563 skb_reserve(skb
, pad
);
1565 skb_set_owner_w(skb
, tfile
->socket
.sk
);
1567 get_page(alloc_frag
->page
);
1568 alloc_frag
->offset
+= buflen
;
1573 static int tun_xdp_act(struct tun_struct
*tun
, struct bpf_prog
*xdp_prog
,
1574 struct xdp_buff
*xdp
, u32 act
)
1580 err
= xdp_do_redirect(tun
->dev
, xdp
, xdp_prog
);
1585 err
= tun_xdp_tx(tun
->dev
, xdp
);
1592 bpf_warn_invalid_xdp_action(act
);
1595 trace_xdp_exception(tun
->dev
, xdp_prog
, act
);
1598 this_cpu_inc(tun
->pcpu_stats
->rx_dropped
);
1605 static struct sk_buff
*tun_build_skb(struct tun_struct
*tun
,
1606 struct tun_file
*tfile
,
1607 struct iov_iter
*from
,
1608 struct virtio_net_hdr
*hdr
,
1609 int len
, int *skb_xdp
)
1611 struct page_frag
*alloc_frag
= ¤t
->task_frag
;
1612 struct bpf_prog
*xdp_prog
;
1613 int buflen
= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
1616 int pad
= TUN_RX_PAD
;
1620 xdp_prog
= rcu_dereference(tun
->xdp_prog
);
1622 pad
+= XDP_PACKET_HEADROOM
;
1623 buflen
+= SKB_DATA_ALIGN(len
+ pad
);
1626 alloc_frag
->offset
= ALIGN((u64
)alloc_frag
->offset
, SMP_CACHE_BYTES
);
1627 if (unlikely(!skb_page_frag_refill(buflen
, alloc_frag
, GFP_KERNEL
)))
1628 return ERR_PTR(-ENOMEM
);
1630 buf
= (char *)page_address(alloc_frag
->page
) + alloc_frag
->offset
;
1631 copied
= copy_page_from_iter(alloc_frag
->page
,
1632 alloc_frag
->offset
+ pad
,
1635 return ERR_PTR(-EFAULT
);
1637 /* There's a small window that XDP may be set after the check
1638 * of xdp_prog above, this should be rare and for simplicity
1639 * we do XDP on skb in case the headroom is not enough.
1641 if (hdr
->gso_type
|| !xdp_prog
) {
1643 return __tun_build_skb(tfile
, alloc_frag
, buf
, buflen
, len
,
1651 xdp_prog
= rcu_dereference(tun
->xdp_prog
);
1653 struct xdp_buff xdp
;
1656 xdp
.data_hard_start
= buf
;
1657 xdp
.data
= buf
+ pad
;
1658 xdp_set_data_meta_invalid(&xdp
);
1659 xdp
.data_end
= xdp
.data
+ len
;
1660 xdp
.rxq
= &tfile
->xdp_rxq
;
1661 xdp
.frame_sz
= buflen
;
1663 act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
1664 if (act
== XDP_REDIRECT
|| act
== XDP_TX
) {
1665 get_page(alloc_frag
->page
);
1666 alloc_frag
->offset
+= buflen
;
1668 err
= tun_xdp_act(tun
, xdp_prog
, &xdp
, act
);
1670 if (act
== XDP_REDIRECT
|| act
== XDP_TX
)
1671 put_page(alloc_frag
->page
);
1675 if (err
== XDP_REDIRECT
)
1677 if (err
!= XDP_PASS
)
1680 pad
= xdp
.data
- xdp
.data_hard_start
;
1681 len
= xdp
.data_end
- xdp
.data
;
1686 return __tun_build_skb(tfile
, alloc_frag
, buf
, buflen
, len
, pad
);
1694 /* Get packet from user space buffer */
1695 static ssize_t
tun_get_user(struct tun_struct
*tun
, struct tun_file
*tfile
,
1696 void *msg_control
, struct iov_iter
*from
,
1697 int noblock
, bool more
)
1699 struct tun_pi pi
= { 0, cpu_to_be16(ETH_P_IP
) };
1700 struct sk_buff
*skb
;
1701 size_t total_len
= iov_iter_count(from
);
1702 size_t len
= total_len
, align
= tun
->align
, linear
;
1703 struct virtio_net_hdr gso
= { 0 };
1704 struct tun_pcpu_stats
*stats
;
1707 bool zerocopy
= false;
1711 bool frags
= tun_napi_frags_enabled(tfile
);
1713 if (!(tun
->flags
& IFF_NO_PI
)) {
1714 if (len
< sizeof(pi
))
1718 if (!copy_from_iter_full(&pi
, sizeof(pi
), from
))
1722 if (tun
->flags
& IFF_VNET_HDR
) {
1723 int vnet_hdr_sz
= READ_ONCE(tun
->vnet_hdr_sz
);
1725 if (len
< vnet_hdr_sz
)
1729 if (!copy_from_iter_full(&gso
, sizeof(gso
), from
))
1732 if ((gso
.flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) &&
1733 tun16_to_cpu(tun
, gso
.csum_start
) + tun16_to_cpu(tun
, gso
.csum_offset
) + 2 > tun16_to_cpu(tun
, gso
.hdr_len
))
1734 gso
.hdr_len
= cpu_to_tun16(tun
, tun16_to_cpu(tun
, gso
.csum_start
) + tun16_to_cpu(tun
, gso
.csum_offset
) + 2);
1736 if (tun16_to_cpu(tun
, gso
.hdr_len
) > len
)
1738 iov_iter_advance(from
, vnet_hdr_sz
- sizeof(gso
));
1741 if ((tun
->flags
& TUN_TYPE_MASK
) == IFF_TAP
) {
1742 align
+= NET_IP_ALIGN
;
1743 if (unlikely(len
< ETH_HLEN
||
1744 (gso
.hdr_len
&& tun16_to_cpu(tun
, gso
.hdr_len
) < ETH_HLEN
)))
1748 good_linear
= SKB_MAX_HEAD(align
);
1751 struct iov_iter i
= *from
;
1753 /* There are 256 bytes to be copied in skb, so there is
1754 * enough room for skb expand head in case it is used.
1755 * The rest of the buffer is mapped from userspace.
1757 copylen
= gso
.hdr_len
? tun16_to_cpu(tun
, gso
.hdr_len
) : GOODCOPY_LEN
;
1758 if (copylen
> good_linear
)
1759 copylen
= good_linear
;
1761 iov_iter_advance(&i
, copylen
);
1762 if (iov_iter_npages(&i
, INT_MAX
) <= MAX_SKB_FRAGS
)
1766 if (!frags
&& tun_can_build_skb(tun
, tfile
, len
, noblock
, zerocopy
)) {
1767 /* For the packet that is not easy to be processed
1768 * (e.g gso or jumbo packet), we will do it at after
1769 * skb was created with generic XDP routine.
1771 skb
= tun_build_skb(tun
, tfile
, from
, &gso
, len
, &skb_xdp
);
1773 this_cpu_inc(tun
->pcpu_stats
->rx_dropped
);
1774 return PTR_ERR(skb
);
1781 if (tun16_to_cpu(tun
, gso
.hdr_len
) > good_linear
)
1782 linear
= good_linear
;
1784 linear
= tun16_to_cpu(tun
, gso
.hdr_len
);
1788 mutex_lock(&tfile
->napi_mutex
);
1789 skb
= tun_napi_alloc_frags(tfile
, copylen
, from
);
1790 /* tun_napi_alloc_frags() enforces a layout for the skb.
1791 * If zerocopy is enabled, then this layout will be
1792 * overwritten by zerocopy_sg_from_iter().
1796 skb
= tun_alloc_skb(tfile
, align
, copylen
, linear
,
1801 if (PTR_ERR(skb
) != -EAGAIN
)
1802 this_cpu_inc(tun
->pcpu_stats
->rx_dropped
);
1804 mutex_unlock(&tfile
->napi_mutex
);
1805 return PTR_ERR(skb
);
1809 err
= zerocopy_sg_from_iter(skb
, from
);
1811 err
= skb_copy_datagram_from_iter(skb
, 0, from
, len
);
1816 this_cpu_inc(tun
->pcpu_stats
->rx_dropped
);
1819 tfile
->napi
.skb
= NULL
;
1820 mutex_unlock(&tfile
->napi_mutex
);
1827 if (virtio_net_hdr_to_skb(skb
, &gso
, tun_is_little_endian(tun
))) {
1828 this_cpu_inc(tun
->pcpu_stats
->rx_frame_errors
);
1831 tfile
->napi
.skb
= NULL
;
1832 mutex_unlock(&tfile
->napi_mutex
);
1838 switch (tun
->flags
& TUN_TYPE_MASK
) {
1840 if (tun
->flags
& IFF_NO_PI
) {
1841 u8 ip_version
= skb
->len
? (skb
->data
[0] >> 4) : 0;
1843 switch (ip_version
) {
1845 pi
.proto
= htons(ETH_P_IP
);
1848 pi
.proto
= htons(ETH_P_IPV6
);
1851 this_cpu_inc(tun
->pcpu_stats
->rx_dropped
);
1857 skb_reset_mac_header(skb
);
1858 skb
->protocol
= pi
.proto
;
1859 skb
->dev
= tun
->dev
;
1862 if (frags
&& !pskb_may_pull(skb
, ETH_HLEN
)) {
1866 skb
->protocol
= eth_type_trans(skb
, tun
->dev
);
1870 /* copy skb_ubuf_info for callback when skb has no error */
1872 skb_shinfo(skb
)->destructor_arg
= msg_control
;
1873 skb_shinfo(skb
)->tx_flags
|= SKBTX_DEV_ZEROCOPY
;
1874 skb_shinfo(skb
)->tx_flags
|= SKBTX_SHARED_FRAG
;
1875 } else if (msg_control
) {
1876 struct ubuf_info
*uarg
= msg_control
;
1877 uarg
->callback(uarg
, false);
1880 skb_reset_network_header(skb
);
1881 skb_probe_transport_header(skb
);
1882 skb_record_rx_queue(skb
, tfile
->queue_index
);
1885 struct bpf_prog
*xdp_prog
;
1890 xdp_prog
= rcu_dereference(tun
->xdp_prog
);
1892 ret
= do_xdp_generic(xdp_prog
, skb
);
1893 if (ret
!= XDP_PASS
) {
1897 tfile
->napi
.skb
= NULL
;
1898 mutex_unlock(&tfile
->napi_mutex
);
1907 /* Compute the costly rx hash only if needed for flow updates.
1908 * We may get a very small possibility of OOO during switching, not
1909 * worth to optimize.
1911 if (!rcu_access_pointer(tun
->steering_prog
) && tun
->numqueues
> 1 &&
1913 rxhash
= __skb_get_hash_symmetric(skb
);
1916 if (unlikely(!(tun
->dev
->flags
& IFF_UP
))) {
1925 /* Exercise flow dissector code path. */
1926 skb_push(skb
, ETH_HLEN
);
1927 headlen
= eth_get_headlen(tun
->dev
, skb
->data
,
1930 if (unlikely(headlen
> skb_headlen(skb
))) {
1931 this_cpu_inc(tun
->pcpu_stats
->rx_dropped
);
1932 napi_free_frags(&tfile
->napi
);
1934 mutex_unlock(&tfile
->napi_mutex
);
1940 napi_gro_frags(&tfile
->napi
);
1942 mutex_unlock(&tfile
->napi_mutex
);
1943 } else if (tfile
->napi_enabled
) {
1944 struct sk_buff_head
*queue
= &tfile
->sk
.sk_write_queue
;
1947 spin_lock_bh(&queue
->lock
);
1948 __skb_queue_tail(queue
, skb
);
1949 queue_len
= skb_queue_len(queue
);
1950 spin_unlock(&queue
->lock
);
1952 if (!more
|| queue_len
> NAPI_POLL_WEIGHT
)
1953 napi_schedule(&tfile
->napi
);
1956 } else if (!IS_ENABLED(CONFIG_4KSTACKS
)) {
1957 tun_rx_batched(tun
, tfile
, skb
, more
);
1963 stats
= get_cpu_ptr(tun
->pcpu_stats
);
1964 u64_stats_update_begin(&stats
->syncp
);
1965 u64_stats_inc(&stats
->rx_packets
);
1966 u64_stats_add(&stats
->rx_bytes
, len
);
1967 u64_stats_update_end(&stats
->syncp
);
1971 tun_flow_update(tun
, rxhash
, tfile
);
1976 static ssize_t
tun_chr_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1978 struct file
*file
= iocb
->ki_filp
;
1979 struct tun_file
*tfile
= file
->private_data
;
1980 struct tun_struct
*tun
= tun_get(tfile
);
1986 result
= tun_get_user(tun
, tfile
, NULL
, from
,
1987 file
->f_flags
& O_NONBLOCK
, false);
1993 static ssize_t
tun_put_user_xdp(struct tun_struct
*tun
,
1994 struct tun_file
*tfile
,
1995 struct xdp_frame
*xdp_frame
,
1996 struct iov_iter
*iter
)
1998 int vnet_hdr_sz
= 0;
1999 size_t size
= xdp_frame
->len
;
2000 struct tun_pcpu_stats
*stats
;
2003 if (tun
->flags
& IFF_VNET_HDR
) {
2004 struct virtio_net_hdr gso
= { 0 };
2006 vnet_hdr_sz
= READ_ONCE(tun
->vnet_hdr_sz
);
2007 if (unlikely(iov_iter_count(iter
) < vnet_hdr_sz
))
2009 if (unlikely(copy_to_iter(&gso
, sizeof(gso
), iter
) !=
2012 iov_iter_advance(iter
, vnet_hdr_sz
- sizeof(gso
));
2015 ret
= copy_to_iter(xdp_frame
->data
, size
, iter
) + vnet_hdr_sz
;
2017 stats
= get_cpu_ptr(tun
->pcpu_stats
);
2018 u64_stats_update_begin(&stats
->syncp
);
2019 u64_stats_inc(&stats
->tx_packets
);
2020 u64_stats_add(&stats
->tx_bytes
, ret
);
2021 u64_stats_update_end(&stats
->syncp
);
2022 put_cpu_ptr(tun
->pcpu_stats
);
2027 /* Put packet to the user space buffer */
2028 static ssize_t
tun_put_user(struct tun_struct
*tun
,
2029 struct tun_file
*tfile
,
2030 struct sk_buff
*skb
,
2031 struct iov_iter
*iter
)
2033 struct tun_pi pi
= { 0, skb
->protocol
};
2034 struct tun_pcpu_stats
*stats
;
2036 int vlan_offset
= 0;
2038 int vnet_hdr_sz
= 0;
2040 if (skb_vlan_tag_present(skb
))
2041 vlan_hlen
= VLAN_HLEN
;
2043 if (tun
->flags
& IFF_VNET_HDR
)
2044 vnet_hdr_sz
= READ_ONCE(tun
->vnet_hdr_sz
);
2046 total
= skb
->len
+ vlan_hlen
+ vnet_hdr_sz
;
2048 if (!(tun
->flags
& IFF_NO_PI
)) {
2049 if (iov_iter_count(iter
) < sizeof(pi
))
2052 total
+= sizeof(pi
);
2053 if (iov_iter_count(iter
) < total
) {
2054 /* Packet will be striped */
2055 pi
.flags
|= TUN_PKT_STRIP
;
2058 if (copy_to_iter(&pi
, sizeof(pi
), iter
) != sizeof(pi
))
2063 struct virtio_net_hdr gso
;
2065 if (iov_iter_count(iter
) < vnet_hdr_sz
)
2068 if (virtio_net_hdr_from_skb(skb
, &gso
,
2069 tun_is_little_endian(tun
), true,
2071 struct skb_shared_info
*sinfo
= skb_shinfo(skb
);
2072 pr_err("unexpected GSO type: "
2073 "0x%x, gso_size %d, hdr_len %d\n",
2074 sinfo
->gso_type
, tun16_to_cpu(tun
, gso
.gso_size
),
2075 tun16_to_cpu(tun
, gso
.hdr_len
));
2076 print_hex_dump(KERN_ERR
, "tun: ",
2079 min((int)tun16_to_cpu(tun
, gso
.hdr_len
), 64), true);
2084 if (copy_to_iter(&gso
, sizeof(gso
), iter
) != sizeof(gso
))
2087 iov_iter_advance(iter
, vnet_hdr_sz
- sizeof(gso
));
2094 veth
.h_vlan_proto
= skb
->vlan_proto
;
2095 veth
.h_vlan_TCI
= htons(skb_vlan_tag_get(skb
));
2097 vlan_offset
= offsetof(struct vlan_ethhdr
, h_vlan_proto
);
2099 ret
= skb_copy_datagram_iter(skb
, 0, iter
, vlan_offset
);
2100 if (ret
|| !iov_iter_count(iter
))
2103 ret
= copy_to_iter(&veth
, sizeof(veth
), iter
);
2104 if (ret
!= sizeof(veth
) || !iov_iter_count(iter
))
2108 skb_copy_datagram_iter(skb
, vlan_offset
, iter
, skb
->len
- vlan_offset
);
2111 /* caller is in process context, */
2112 stats
= get_cpu_ptr(tun
->pcpu_stats
);
2113 u64_stats_update_begin(&stats
->syncp
);
2114 u64_stats_inc(&stats
->tx_packets
);
2115 u64_stats_add(&stats
->tx_bytes
, skb
->len
+ vlan_hlen
);
2116 u64_stats_update_end(&stats
->syncp
);
2117 put_cpu_ptr(tun
->pcpu_stats
);
2122 static void *tun_ring_recv(struct tun_file
*tfile
, int noblock
, int *err
)
2124 DECLARE_WAITQUEUE(wait
, current
);
2128 ptr
= ptr_ring_consume(&tfile
->tx_ring
);
2136 add_wait_queue(&tfile
->socket
.wq
.wait
, &wait
);
2139 set_current_state(TASK_INTERRUPTIBLE
);
2140 ptr
= ptr_ring_consume(&tfile
->tx_ring
);
2143 if (signal_pending(current
)) {
2144 error
= -ERESTARTSYS
;
2147 if (tfile
->socket
.sk
->sk_shutdown
& RCV_SHUTDOWN
) {
2155 __set_current_state(TASK_RUNNING
);
2156 remove_wait_queue(&tfile
->socket
.wq
.wait
, &wait
);
2163 static ssize_t
tun_do_read(struct tun_struct
*tun
, struct tun_file
*tfile
,
2164 struct iov_iter
*to
,
2165 int noblock
, void *ptr
)
2170 if (!iov_iter_count(to
)) {
2176 /* Read frames from ring */
2177 ptr
= tun_ring_recv(tfile
, noblock
, &err
);
2182 if (tun_is_xdp_frame(ptr
)) {
2183 struct xdp_frame
*xdpf
= tun_ptr_to_xdp(ptr
);
2185 ret
= tun_put_user_xdp(tun
, tfile
, xdpf
, to
);
2186 xdp_return_frame(xdpf
);
2188 struct sk_buff
*skb
= ptr
;
2190 ret
= tun_put_user(tun
, tfile
, skb
, to
);
2191 if (unlikely(ret
< 0))
2200 static ssize_t
tun_chr_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
2202 struct file
*file
= iocb
->ki_filp
;
2203 struct tun_file
*tfile
= file
->private_data
;
2204 struct tun_struct
*tun
= tun_get(tfile
);
2205 ssize_t len
= iov_iter_count(to
), ret
;
2209 ret
= tun_do_read(tun
, tfile
, to
, file
->f_flags
& O_NONBLOCK
, NULL
);
2210 ret
= min_t(ssize_t
, ret
, len
);
2217 static void tun_prog_free(struct rcu_head
*rcu
)
2219 struct tun_prog
*prog
= container_of(rcu
, struct tun_prog
, rcu
);
2221 bpf_prog_destroy(prog
->prog
);
2225 static int __tun_set_ebpf(struct tun_struct
*tun
,
2226 struct tun_prog __rcu
**prog_p
,
2227 struct bpf_prog
*prog
)
2229 struct tun_prog
*old
, *new = NULL
;
2232 new = kmalloc(sizeof(*new), GFP_KERNEL
);
2238 spin_lock_bh(&tun
->lock
);
2239 old
= rcu_dereference_protected(*prog_p
,
2240 lockdep_is_held(&tun
->lock
));
2241 rcu_assign_pointer(*prog_p
, new);
2242 spin_unlock_bh(&tun
->lock
);
2245 call_rcu(&old
->rcu
, tun_prog_free
);
2250 static void tun_free_netdev(struct net_device
*dev
)
2252 struct tun_struct
*tun
= netdev_priv(dev
);
2254 BUG_ON(!(list_empty(&tun
->disabled
)));
2256 free_percpu(tun
->pcpu_stats
);
2257 /* We clear pcpu_stats so that tun_set_iff() can tell if
2258 * tun_free_netdev() has been called from register_netdevice().
2260 tun
->pcpu_stats
= NULL
;
2262 tun_flow_uninit(tun
);
2263 security_tun_dev_free_security(tun
->security
);
2264 __tun_set_ebpf(tun
, &tun
->steering_prog
, NULL
);
2265 __tun_set_ebpf(tun
, &tun
->filter_prog
, NULL
);
2268 static void tun_setup(struct net_device
*dev
)
2270 struct tun_struct
*tun
= netdev_priv(dev
);
2272 tun
->owner
= INVALID_UID
;
2273 tun
->group
= INVALID_GID
;
2274 tun_default_link_ksettings(dev
, &tun
->link_ksettings
);
2276 dev
->ethtool_ops
= &tun_ethtool_ops
;
2277 dev
->needs_free_netdev
= true;
2278 dev
->priv_destructor
= tun_free_netdev
;
2279 /* We prefer our own queue length */
2280 dev
->tx_queue_len
= TUN_READQ_SIZE
;
2283 /* Trivial set of netlink ops to allow deleting tun or tap
2284 * device with netlink.
2286 static int tun_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
2287 struct netlink_ext_ack
*extack
)
2289 NL_SET_ERR_MSG(extack
,
2290 "tun/tap creation via rtnetlink is not supported.");
2294 static size_t tun_get_size(const struct net_device
*dev
)
2296 BUILD_BUG_ON(sizeof(u32
) != sizeof(uid_t
));
2297 BUILD_BUG_ON(sizeof(u32
) != sizeof(gid_t
));
2299 return nla_total_size(sizeof(uid_t
)) + /* OWNER */
2300 nla_total_size(sizeof(gid_t
)) + /* GROUP */
2301 nla_total_size(sizeof(u8
)) + /* TYPE */
2302 nla_total_size(sizeof(u8
)) + /* PI */
2303 nla_total_size(sizeof(u8
)) + /* VNET_HDR */
2304 nla_total_size(sizeof(u8
)) + /* PERSIST */
2305 nla_total_size(sizeof(u8
)) + /* MULTI_QUEUE */
2306 nla_total_size(sizeof(u32
)) + /* NUM_QUEUES */
2307 nla_total_size(sizeof(u32
)) + /* NUM_DISABLED_QUEUES */
2311 static int tun_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
2313 struct tun_struct
*tun
= netdev_priv(dev
);
2315 if (nla_put_u8(skb
, IFLA_TUN_TYPE
, tun
->flags
& TUN_TYPE_MASK
))
2316 goto nla_put_failure
;
2317 if (uid_valid(tun
->owner
) &&
2318 nla_put_u32(skb
, IFLA_TUN_OWNER
,
2319 from_kuid_munged(current_user_ns(), tun
->owner
)))
2320 goto nla_put_failure
;
2321 if (gid_valid(tun
->group
) &&
2322 nla_put_u32(skb
, IFLA_TUN_GROUP
,
2323 from_kgid_munged(current_user_ns(), tun
->group
)))
2324 goto nla_put_failure
;
2325 if (nla_put_u8(skb
, IFLA_TUN_PI
, !(tun
->flags
& IFF_NO_PI
)))
2326 goto nla_put_failure
;
2327 if (nla_put_u8(skb
, IFLA_TUN_VNET_HDR
, !!(tun
->flags
& IFF_VNET_HDR
)))
2328 goto nla_put_failure
;
2329 if (nla_put_u8(skb
, IFLA_TUN_PERSIST
, !!(tun
->flags
& IFF_PERSIST
)))
2330 goto nla_put_failure
;
2331 if (nla_put_u8(skb
, IFLA_TUN_MULTI_QUEUE
,
2332 !!(tun
->flags
& IFF_MULTI_QUEUE
)))
2333 goto nla_put_failure
;
2334 if (tun
->flags
& IFF_MULTI_QUEUE
) {
2335 if (nla_put_u32(skb
, IFLA_TUN_NUM_QUEUES
, tun
->numqueues
))
2336 goto nla_put_failure
;
2337 if (nla_put_u32(skb
, IFLA_TUN_NUM_DISABLED_QUEUES
,
2339 goto nla_put_failure
;
2348 static struct rtnl_link_ops tun_link_ops __read_mostly
= {
2350 .priv_size
= sizeof(struct tun_struct
),
2352 .validate
= tun_validate
,
2353 .get_size
= tun_get_size
,
2354 .fill_info
= tun_fill_info
,
2357 static void tun_sock_write_space(struct sock
*sk
)
2359 struct tun_file
*tfile
;
2360 wait_queue_head_t
*wqueue
;
2362 if (!sock_writeable(sk
))
2365 if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
))
2368 wqueue
= sk_sleep(sk
);
2369 if (wqueue
&& waitqueue_active(wqueue
))
2370 wake_up_interruptible_sync_poll(wqueue
, EPOLLOUT
|
2371 EPOLLWRNORM
| EPOLLWRBAND
);
2373 tfile
= container_of(sk
, struct tun_file
, sk
);
2374 kill_fasync(&tfile
->fasync
, SIGIO
, POLL_OUT
);
2377 static void tun_put_page(struct tun_page
*tpage
)
2380 __page_frag_cache_drain(tpage
->page
, tpage
->count
);
2383 static int tun_xdp_one(struct tun_struct
*tun
,
2384 struct tun_file
*tfile
,
2385 struct xdp_buff
*xdp
, int *flush
,
2386 struct tun_page
*tpage
)
2388 unsigned int datasize
= xdp
->data_end
- xdp
->data
;
2389 struct tun_xdp_hdr
*hdr
= xdp
->data_hard_start
;
2390 struct virtio_net_hdr
*gso
= &hdr
->gso
;
2391 struct tun_pcpu_stats
*stats
;
2392 struct bpf_prog
*xdp_prog
;
2393 struct sk_buff
*skb
= NULL
;
2394 u32 rxhash
= 0, act
;
2395 int buflen
= hdr
->buflen
;
2397 bool skb_xdp
= false;
2400 xdp_prog
= rcu_dereference(tun
->xdp_prog
);
2402 if (gso
->gso_type
) {
2406 xdp_set_data_meta_invalid(xdp
);
2407 xdp
->rxq
= &tfile
->xdp_rxq
;
2408 xdp
->frame_sz
= buflen
;
2410 act
= bpf_prog_run_xdp(xdp_prog
, xdp
);
2411 err
= tun_xdp_act(tun
, xdp_prog
, xdp
, act
);
2413 put_page(virt_to_head_page(xdp
->data
));
2426 page
= virt_to_head_page(xdp
->data
);
2427 if (tpage
->page
== page
) {
2430 tun_put_page(tpage
);
2439 skb
= build_skb(xdp
->data_hard_start
, buflen
);
2445 skb_reserve(skb
, xdp
->data
- xdp
->data_hard_start
);
2446 skb_put(skb
, xdp
->data_end
- xdp
->data
);
2448 if (virtio_net_hdr_to_skb(skb
, gso
, tun_is_little_endian(tun
))) {
2449 this_cpu_inc(tun
->pcpu_stats
->rx_frame_errors
);
2455 skb
->protocol
= eth_type_trans(skb
, tun
->dev
);
2456 skb_reset_network_header(skb
);
2457 skb_probe_transport_header(skb
);
2458 skb_record_rx_queue(skb
, tfile
->queue_index
);
2461 err
= do_xdp_generic(xdp_prog
, skb
);
2462 if (err
!= XDP_PASS
)
2466 if (!rcu_dereference(tun
->steering_prog
) && tun
->numqueues
> 1 &&
2468 rxhash
= __skb_get_hash_symmetric(skb
);
2470 netif_receive_skb(skb
);
2472 /* No need for get_cpu_ptr() here since this function is
2473 * always called with bh disabled
2475 stats
= this_cpu_ptr(tun
->pcpu_stats
);
2476 u64_stats_update_begin(&stats
->syncp
);
2477 u64_stats_inc(&stats
->rx_packets
);
2478 u64_stats_add(&stats
->rx_bytes
, datasize
);
2479 u64_stats_update_end(&stats
->syncp
);
2482 tun_flow_update(tun
, rxhash
, tfile
);
2488 static int tun_sendmsg(struct socket
*sock
, struct msghdr
*m
, size_t total_len
)
2491 struct tun_file
*tfile
= container_of(sock
, struct tun_file
, socket
);
2492 struct tun_struct
*tun
= tun_get(tfile
);
2493 struct tun_msg_ctl
*ctl
= m
->msg_control
;
2494 struct xdp_buff
*xdp
;
2499 if (ctl
&& (ctl
->type
== TUN_MSG_PTR
)) {
2500 struct tun_page tpage
;
2504 memset(&tpage
, 0, sizeof(tpage
));
2509 for (i
= 0; i
< n
; i
++) {
2510 xdp
= &((struct xdp_buff
*)ctl
->ptr
)[i
];
2511 tun_xdp_one(tun
, tfile
, xdp
, &flush
, &tpage
);
2520 tun_put_page(&tpage
);
2526 ret
= tun_get_user(tun
, tfile
, ctl
? ctl
->ptr
: NULL
, &m
->msg_iter
,
2527 m
->msg_flags
& MSG_DONTWAIT
,
2528 m
->msg_flags
& MSG_MORE
);
2534 static int tun_recvmsg(struct socket
*sock
, struct msghdr
*m
, size_t total_len
,
2537 struct tun_file
*tfile
= container_of(sock
, struct tun_file
, socket
);
2538 struct tun_struct
*tun
= tun_get(tfile
);
2539 void *ptr
= m
->msg_control
;
2547 if (flags
& ~(MSG_DONTWAIT
|MSG_TRUNC
|MSG_ERRQUEUE
)) {
2551 if (flags
& MSG_ERRQUEUE
) {
2552 ret
= sock_recv_errqueue(sock
->sk
, m
, total_len
,
2553 SOL_PACKET
, TUN_TX_TIMESTAMP
);
2556 ret
= tun_do_read(tun
, tfile
, &m
->msg_iter
, flags
& MSG_DONTWAIT
, ptr
);
2557 if (ret
> (ssize_t
)total_len
) {
2558 m
->msg_flags
|= MSG_TRUNC
;
2559 ret
= flags
& MSG_TRUNC
? ret
: total_len
;
2572 static int tun_ptr_peek_len(void *ptr
)
2575 if (tun_is_xdp_frame(ptr
)) {
2576 struct xdp_frame
*xdpf
= tun_ptr_to_xdp(ptr
);
2580 return __skb_array_len_with_tag(ptr
);
2586 static int tun_peek_len(struct socket
*sock
)
2588 struct tun_file
*tfile
= container_of(sock
, struct tun_file
, socket
);
2589 struct tun_struct
*tun
;
2592 tun
= tun_get(tfile
);
2596 ret
= PTR_RING_PEEK_CALL(&tfile
->tx_ring
, tun_ptr_peek_len
);
2602 /* Ops structure to mimic raw sockets with tun */
2603 static const struct proto_ops tun_socket_ops
= {
2604 .peek_len
= tun_peek_len
,
2605 .sendmsg
= tun_sendmsg
,
2606 .recvmsg
= tun_recvmsg
,
2609 static struct proto tun_proto
= {
2611 .owner
= THIS_MODULE
,
2612 .obj_size
= sizeof(struct tun_file
),
2615 static int tun_flags(struct tun_struct
*tun
)
2617 return tun
->flags
& (TUN_FEATURES
| IFF_PERSIST
| IFF_TUN
| IFF_TAP
);
2620 static ssize_t
tun_show_flags(struct device
*dev
, struct device_attribute
*attr
,
2623 struct tun_struct
*tun
= netdev_priv(to_net_dev(dev
));
2624 return sprintf(buf
, "0x%x\n", tun_flags(tun
));
2627 static ssize_t
tun_show_owner(struct device
*dev
, struct device_attribute
*attr
,
2630 struct tun_struct
*tun
= netdev_priv(to_net_dev(dev
));
2631 return uid_valid(tun
->owner
)?
2632 sprintf(buf
, "%u\n",
2633 from_kuid_munged(current_user_ns(), tun
->owner
)):
2634 sprintf(buf
, "-1\n");
2637 static ssize_t
tun_show_group(struct device
*dev
, struct device_attribute
*attr
,
2640 struct tun_struct
*tun
= netdev_priv(to_net_dev(dev
));
2641 return gid_valid(tun
->group
) ?
2642 sprintf(buf
, "%u\n",
2643 from_kgid_munged(current_user_ns(), tun
->group
)):
2644 sprintf(buf
, "-1\n");
2647 static DEVICE_ATTR(tun_flags
, 0444, tun_show_flags
, NULL
);
2648 static DEVICE_ATTR(owner
, 0444, tun_show_owner
, NULL
);
2649 static DEVICE_ATTR(group
, 0444, tun_show_group
, NULL
);
2651 static struct attribute
*tun_dev_attrs
[] = {
2652 &dev_attr_tun_flags
.attr
,
2653 &dev_attr_owner
.attr
,
2654 &dev_attr_group
.attr
,
2658 static const struct attribute_group tun_attr_group
= {
2659 .attrs
= tun_dev_attrs
2662 static int tun_set_iff(struct net
*net
, struct file
*file
, struct ifreq
*ifr
)
2664 struct tun_struct
*tun
;
2665 struct tun_file
*tfile
= file
->private_data
;
2666 struct net_device
*dev
;
2669 if (tfile
->detached
)
2672 if ((ifr
->ifr_flags
& IFF_NAPI_FRAGS
)) {
2673 if (!capable(CAP_NET_ADMIN
))
2676 if (!(ifr
->ifr_flags
& IFF_NAPI
) ||
2677 (ifr
->ifr_flags
& TUN_TYPE_MASK
) != IFF_TAP
)
2681 dev
= __dev_get_by_name(net
, ifr
->ifr_name
);
2683 if (ifr
->ifr_flags
& IFF_TUN_EXCL
)
2685 if ((ifr
->ifr_flags
& IFF_TUN
) && dev
->netdev_ops
== &tun_netdev_ops
)
2686 tun
= netdev_priv(dev
);
2687 else if ((ifr
->ifr_flags
& IFF_TAP
) && dev
->netdev_ops
== &tap_netdev_ops
)
2688 tun
= netdev_priv(dev
);
2692 if (!!(ifr
->ifr_flags
& IFF_MULTI_QUEUE
) !=
2693 !!(tun
->flags
& IFF_MULTI_QUEUE
))
2696 if (tun_not_capable(tun
))
2698 err
= security_tun_dev_open(tun
->security
);
2702 err
= tun_attach(tun
, file
, ifr
->ifr_flags
& IFF_NOFILTER
,
2703 ifr
->ifr_flags
& IFF_NAPI
,
2704 ifr
->ifr_flags
& IFF_NAPI_FRAGS
, true);
2708 if (tun
->flags
& IFF_MULTI_QUEUE
&&
2709 (tun
->numqueues
+ tun
->numdisabled
> 1)) {
2710 /* One or more queue has already been attached, no need
2711 * to initialize the device again.
2713 netdev_state_change(dev
);
2717 tun
->flags
= (tun
->flags
& ~TUN_FEATURES
) |
2718 (ifr
->ifr_flags
& TUN_FEATURES
);
2720 netdev_state_change(dev
);
2723 unsigned long flags
= 0;
2724 int queues
= ifr
->ifr_flags
& IFF_MULTI_QUEUE
?
2727 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
2729 err
= security_tun_dev_create();
2734 if (ifr
->ifr_flags
& IFF_TUN
) {
2738 } else if (ifr
->ifr_flags
& IFF_TAP
) {
2746 name
= ifr
->ifr_name
;
2748 dev
= alloc_netdev_mqs(sizeof(struct tun_struct
), name
,
2749 NET_NAME_UNKNOWN
, tun_setup
, queues
,
2755 dev_net_set(dev
, net
);
2756 dev
->rtnl_link_ops
= &tun_link_ops
;
2757 dev
->ifindex
= tfile
->ifindex
;
2758 dev
->sysfs_groups
[0] = &tun_attr_group
;
2760 tun
= netdev_priv(dev
);
2763 tun
->txflt
.count
= 0;
2764 tun
->vnet_hdr_sz
= sizeof(struct virtio_net_hdr
);
2766 tun
->align
= NET_SKB_PAD
;
2767 tun
->filter_attached
= false;
2768 tun
->sndbuf
= tfile
->socket
.sk
->sk_sndbuf
;
2769 tun
->rx_batched
= 0;
2770 RCU_INIT_POINTER(tun
->steering_prog
, NULL
);
2772 tun
->pcpu_stats
= netdev_alloc_pcpu_stats(struct tun_pcpu_stats
);
2773 if (!tun
->pcpu_stats
) {
2778 spin_lock_init(&tun
->lock
);
2780 err
= security_tun_dev_alloc_security(&tun
->security
);
2787 dev
->hw_features
= NETIF_F_SG
| NETIF_F_FRAGLIST
|
2788 TUN_USER_FEATURES
| NETIF_F_HW_VLAN_CTAG_TX
|
2789 NETIF_F_HW_VLAN_STAG_TX
;
2790 dev
->features
= dev
->hw_features
| NETIF_F_LLTX
;
2791 dev
->vlan_features
= dev
->features
&
2792 ~(NETIF_F_HW_VLAN_CTAG_TX
|
2793 NETIF_F_HW_VLAN_STAG_TX
);
2795 tun
->flags
= (tun
->flags
& ~TUN_FEATURES
) |
2796 (ifr
->ifr_flags
& TUN_FEATURES
);
2798 INIT_LIST_HEAD(&tun
->disabled
);
2799 err
= tun_attach(tun
, file
, false, ifr
->ifr_flags
& IFF_NAPI
,
2800 ifr
->ifr_flags
& IFF_NAPI_FRAGS
, false);
2804 err
= register_netdevice(tun
->dev
);
2807 /* free_netdev() won't check refcnt, to aovid race
2808 * with dev_put() we need publish tun after registration.
2810 rcu_assign_pointer(tfile
->tun
, tun
);
2813 netif_carrier_on(tun
->dev
);
2815 /* Make sure persistent devices do not get stuck in
2818 if (netif_running(tun
->dev
))
2819 netif_tx_wake_all_queues(tun
->dev
);
2821 strcpy(ifr
->ifr_name
, tun
->dev
->name
);
2825 tun_detach_all(dev
);
2826 /* We are here because register_netdevice() has failed.
2827 * If register_netdevice() already called tun_free_netdev()
2828 * while dealing with the error, tun->pcpu_stats has been cleared.
2830 if (!tun
->pcpu_stats
)
2834 tun_flow_uninit(tun
);
2835 security_tun_dev_free_security(tun
->security
);
2837 free_percpu(tun
->pcpu_stats
);
2843 static void tun_get_iff(struct tun_struct
*tun
, struct ifreq
*ifr
)
2845 strcpy(ifr
->ifr_name
, tun
->dev
->name
);
2847 ifr
->ifr_flags
= tun_flags(tun
);
2851 /* This is like a cut-down ethtool ops, except done via tun fd so no
2852 * privs required. */
2853 static int set_offload(struct tun_struct
*tun
, unsigned long arg
)
2855 netdev_features_t features
= 0;
2857 if (arg
& TUN_F_CSUM
) {
2858 features
|= NETIF_F_HW_CSUM
;
2861 if (arg
& (TUN_F_TSO4
|TUN_F_TSO6
)) {
2862 if (arg
& TUN_F_TSO_ECN
) {
2863 features
|= NETIF_F_TSO_ECN
;
2864 arg
&= ~TUN_F_TSO_ECN
;
2866 if (arg
& TUN_F_TSO4
)
2867 features
|= NETIF_F_TSO
;
2868 if (arg
& TUN_F_TSO6
)
2869 features
|= NETIF_F_TSO6
;
2870 arg
&= ~(TUN_F_TSO4
|TUN_F_TSO6
);
2876 /* This gives the user a way to test for new features in future by
2877 * trying to set them. */
2881 tun
->set_features
= features
;
2882 tun
->dev
->wanted_features
&= ~TUN_USER_FEATURES
;
2883 tun
->dev
->wanted_features
|= features
;
2884 netdev_update_features(tun
->dev
);
2889 static void tun_detach_filter(struct tun_struct
*tun
, int n
)
2892 struct tun_file
*tfile
;
2894 for (i
= 0; i
< n
; i
++) {
2895 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
2896 lock_sock(tfile
->socket
.sk
);
2897 sk_detach_filter(tfile
->socket
.sk
);
2898 release_sock(tfile
->socket
.sk
);
2901 tun
->filter_attached
= false;
2904 static int tun_attach_filter(struct tun_struct
*tun
)
2907 struct tun_file
*tfile
;
2909 for (i
= 0; i
< tun
->numqueues
; i
++) {
2910 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
2911 lock_sock(tfile
->socket
.sk
);
2912 ret
= sk_attach_filter(&tun
->fprog
, tfile
->socket
.sk
);
2913 release_sock(tfile
->socket
.sk
);
2915 tun_detach_filter(tun
, i
);
2920 tun
->filter_attached
= true;
2924 static void tun_set_sndbuf(struct tun_struct
*tun
)
2926 struct tun_file
*tfile
;
2929 for (i
= 0; i
< tun
->numqueues
; i
++) {
2930 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
2931 tfile
->socket
.sk
->sk_sndbuf
= tun
->sndbuf
;
2935 static int tun_set_queue(struct file
*file
, struct ifreq
*ifr
)
2937 struct tun_file
*tfile
= file
->private_data
;
2938 struct tun_struct
*tun
;
2943 if (ifr
->ifr_flags
& IFF_ATTACH_QUEUE
) {
2944 tun
= tfile
->detached
;
2949 ret
= security_tun_dev_attach_queue(tun
->security
);
2952 ret
= tun_attach(tun
, file
, false, tun
->flags
& IFF_NAPI
,
2953 tun
->flags
& IFF_NAPI_FRAGS
, true);
2954 } else if (ifr
->ifr_flags
& IFF_DETACH_QUEUE
) {
2955 tun
= rtnl_dereference(tfile
->tun
);
2956 if (!tun
|| !(tun
->flags
& IFF_MULTI_QUEUE
) || tfile
->detached
)
2959 __tun_detach(tfile
, false);
2964 netdev_state_change(tun
->dev
);
2971 static int tun_set_ebpf(struct tun_struct
*tun
, struct tun_prog __rcu
**prog_p
,
2974 struct bpf_prog
*prog
;
2977 if (copy_from_user(&fd
, data
, sizeof(fd
)))
2983 prog
= bpf_prog_get_type(fd
, BPF_PROG_TYPE_SOCKET_FILTER
);
2985 return PTR_ERR(prog
);
2988 return __tun_set_ebpf(tun
, prog_p
, prog
);
2991 static long __tun_chr_ioctl(struct file
*file
, unsigned int cmd
,
2992 unsigned long arg
, int ifreq_len
)
2994 struct tun_file
*tfile
= file
->private_data
;
2995 struct net
*net
= sock_net(&tfile
->sk
);
2996 struct tun_struct
*tun
;
2997 void __user
* argp
= (void __user
*)arg
;
2998 unsigned int ifindex
, carrier
;
3006 bool do_notify
= false;
3008 if (cmd
== TUNSETIFF
|| cmd
== TUNSETQUEUE
||
3009 (_IOC_TYPE(cmd
) == SOCK_IOC_TYPE
&& cmd
!= SIOCGSKNS
)) {
3010 if (copy_from_user(&ifr
, argp
, ifreq_len
))
3013 memset(&ifr
, 0, sizeof(ifr
));
3015 if (cmd
== TUNGETFEATURES
) {
3016 /* Currently this just means: "what IFF flags are valid?".
3017 * This is needed because we never checked for invalid flags on
3020 return put_user(IFF_TUN
| IFF_TAP
| TUN_FEATURES
,
3021 (unsigned int __user
*)argp
);
3022 } else if (cmd
== TUNSETQUEUE
) {
3023 return tun_set_queue(file
, &ifr
);
3024 } else if (cmd
== SIOCGSKNS
) {
3025 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
3027 return open_related_ns(&net
->ns
, get_net_ns
);
3033 tun
= tun_get(tfile
);
3034 if (cmd
== TUNSETIFF
) {
3039 ifr
.ifr_name
[IFNAMSIZ
-1] = '\0';
3041 ret
= tun_set_iff(net
, file
, &ifr
);
3046 if (copy_to_user(argp
, &ifr
, ifreq_len
))
3050 if (cmd
== TUNSETIFINDEX
) {
3056 if (copy_from_user(&ifindex
, argp
, sizeof(ifindex
)))
3060 tfile
->ifindex
= ifindex
;
3068 netif_info(tun
, drv
, tun
->dev
, "tun_chr_ioctl cmd %u\n", cmd
);
3070 net
= dev_net(tun
->dev
);
3074 tun_get_iff(tun
, &ifr
);
3076 if (tfile
->detached
)
3077 ifr
.ifr_flags
|= IFF_DETACH_QUEUE
;
3078 if (!tfile
->socket
.sk
->sk_filter
)
3079 ifr
.ifr_flags
|= IFF_NOFILTER
;
3081 if (copy_to_user(argp
, &ifr
, ifreq_len
))
3086 /* Disable/Enable checksum */
3088 /* [unimplemented] */
3089 netif_info(tun
, drv
, tun
->dev
, "ignored: set checksum %s\n",
3090 arg
? "disabled" : "enabled");
3094 /* Disable/Enable persist mode. Keep an extra reference to the
3095 * module to prevent the module being unprobed.
3097 if (arg
&& !(tun
->flags
& IFF_PERSIST
)) {
3098 tun
->flags
|= IFF_PERSIST
;
3099 __module_get(THIS_MODULE
);
3102 if (!arg
&& (tun
->flags
& IFF_PERSIST
)) {
3103 tun
->flags
&= ~IFF_PERSIST
;
3104 module_put(THIS_MODULE
);
3108 netif_info(tun
, drv
, tun
->dev
, "persist %s\n",
3109 arg
? "enabled" : "disabled");
3113 /* Set owner of the device */
3114 owner
= make_kuid(current_user_ns(), arg
);
3115 if (!uid_valid(owner
)) {
3121 netif_info(tun
, drv
, tun
->dev
, "owner set to %u\n",
3122 from_kuid(&init_user_ns
, tun
->owner
));
3126 /* Set group of the device */
3127 group
= make_kgid(current_user_ns(), arg
);
3128 if (!gid_valid(group
)) {
3134 netif_info(tun
, drv
, tun
->dev
, "group set to %u\n",
3135 from_kgid(&init_user_ns
, tun
->group
));
3139 /* Only allow setting the type when the interface is down */
3140 if (tun
->dev
->flags
& IFF_UP
) {
3141 netif_info(tun
, drv
, tun
->dev
,
3142 "Linktype set failed because interface is up\n");
3145 tun
->dev
->type
= (int) arg
;
3146 netif_info(tun
, drv
, tun
->dev
, "linktype set to %d\n",
3153 tun
->msg_enable
= (u32
)arg
;
3157 ret
= set_offload(tun
, arg
);
3160 case TUNSETTXFILTER
:
3161 /* Can be set only for TAPs */
3163 if ((tun
->flags
& TUN_TYPE_MASK
) != IFF_TAP
)
3165 ret
= update_filter(&tun
->txflt
, (void __user
*)arg
);
3169 /* Get hw address */
3170 memcpy(ifr
.ifr_hwaddr
.sa_data
, tun
->dev
->dev_addr
, ETH_ALEN
);
3171 ifr
.ifr_hwaddr
.sa_family
= tun
->dev
->type
;
3172 if (copy_to_user(argp
, &ifr
, ifreq_len
))
3177 /* Set hw address */
3178 ret
= dev_set_mac_address(tun
->dev
, &ifr
.ifr_hwaddr
, NULL
);
3182 sndbuf
= tfile
->socket
.sk
->sk_sndbuf
;
3183 if (copy_to_user(argp
, &sndbuf
, sizeof(sndbuf
)))
3188 if (copy_from_user(&sndbuf
, argp
, sizeof(sndbuf
))) {
3197 tun
->sndbuf
= sndbuf
;
3198 tun_set_sndbuf(tun
);
3201 case TUNGETVNETHDRSZ
:
3202 vnet_hdr_sz
= tun
->vnet_hdr_sz
;
3203 if (copy_to_user(argp
, &vnet_hdr_sz
, sizeof(vnet_hdr_sz
)))
3207 case TUNSETVNETHDRSZ
:
3208 if (copy_from_user(&vnet_hdr_sz
, argp
, sizeof(vnet_hdr_sz
))) {
3212 if (vnet_hdr_sz
< (int)sizeof(struct virtio_net_hdr
)) {
3217 tun
->vnet_hdr_sz
= vnet_hdr_sz
;
3221 le
= !!(tun
->flags
& TUN_VNET_LE
);
3222 if (put_user(le
, (int __user
*)argp
))
3227 if (get_user(le
, (int __user
*)argp
)) {
3232 tun
->flags
|= TUN_VNET_LE
;
3234 tun
->flags
&= ~TUN_VNET_LE
;
3238 ret
= tun_get_vnet_be(tun
, argp
);
3242 ret
= tun_set_vnet_be(tun
, argp
);
3245 case TUNATTACHFILTER
:
3246 /* Can be set only for TAPs */
3248 if ((tun
->flags
& TUN_TYPE_MASK
) != IFF_TAP
)
3251 if (copy_from_user(&tun
->fprog
, argp
, sizeof(tun
->fprog
)))
3254 ret
= tun_attach_filter(tun
);
3257 case TUNDETACHFILTER
:
3258 /* Can be set only for TAPs */
3260 if ((tun
->flags
& TUN_TYPE_MASK
) != IFF_TAP
)
3263 tun_detach_filter(tun
, tun
->numqueues
);
3268 if ((tun
->flags
& TUN_TYPE_MASK
) != IFF_TAP
)
3271 if (copy_to_user(argp
, &tun
->fprog
, sizeof(tun
->fprog
)))
3276 case TUNSETSTEERINGEBPF
:
3277 ret
= tun_set_ebpf(tun
, &tun
->steering_prog
, argp
);
3280 case TUNSETFILTEREBPF
:
3281 ret
= tun_set_ebpf(tun
, &tun
->filter_prog
, argp
);
3286 if (copy_from_user(&carrier
, argp
, sizeof(carrier
)))
3289 ret
= tun_net_change_carrier(tun
->dev
, (bool)carrier
);
3292 case TUNGETDEVNETNS
:
3294 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
3296 ret
= open_related_ns(&net
->ns
, get_net_ns
);
3305 netdev_state_change(tun
->dev
);
3314 static long tun_chr_ioctl(struct file
*file
,
3315 unsigned int cmd
, unsigned long arg
)
3317 return __tun_chr_ioctl(file
, cmd
, arg
, sizeof (struct ifreq
));
3320 #ifdef CONFIG_COMPAT
3321 static long tun_chr_compat_ioctl(struct file
*file
,
3322 unsigned int cmd
, unsigned long arg
)
3327 case TUNSETTXFILTER
:
3332 arg
= (unsigned long)compat_ptr(arg
);
3335 arg
= (compat_ulong_t
)arg
;
3340 * compat_ifreq is shorter than ifreq, so we must not access beyond
3341 * the end of that structure. All fields that are used in this
3342 * driver are compatible though, we don't need to convert the
3345 return __tun_chr_ioctl(file
, cmd
, arg
, sizeof(struct compat_ifreq
));
3347 #endif /* CONFIG_COMPAT */
3349 static int tun_chr_fasync(int fd
, struct file
*file
, int on
)
3351 struct tun_file
*tfile
= file
->private_data
;
3354 if ((ret
= fasync_helper(fd
, file
, on
, &tfile
->fasync
)) < 0)
3358 __f_setown(file
, task_pid(current
), PIDTYPE_TGID
, 0);
3359 tfile
->flags
|= TUN_FASYNC
;
3361 tfile
->flags
&= ~TUN_FASYNC
;
3367 static int tun_chr_open(struct inode
*inode
, struct file
* file
)
3369 struct net
*net
= current
->nsproxy
->net_ns
;
3370 struct tun_file
*tfile
;
3372 tfile
= (struct tun_file
*)sk_alloc(net
, AF_UNSPEC
, GFP_KERNEL
,
3376 if (ptr_ring_init(&tfile
->tx_ring
, 0, GFP_KERNEL
)) {
3377 sk_free(&tfile
->sk
);
3381 mutex_init(&tfile
->napi_mutex
);
3382 RCU_INIT_POINTER(tfile
->tun
, NULL
);
3386 init_waitqueue_head(&tfile
->socket
.wq
.wait
);
3388 tfile
->socket
.file
= file
;
3389 tfile
->socket
.ops
= &tun_socket_ops
;
3391 sock_init_data(&tfile
->socket
, &tfile
->sk
);
3393 tfile
->sk
.sk_write_space
= tun_sock_write_space
;
3394 tfile
->sk
.sk_sndbuf
= INT_MAX
;
3396 file
->private_data
= tfile
;
3397 INIT_LIST_HEAD(&tfile
->next
);
3399 sock_set_flag(&tfile
->sk
, SOCK_ZEROCOPY
);
3404 static int tun_chr_close(struct inode
*inode
, struct file
*file
)
3406 struct tun_file
*tfile
= file
->private_data
;
3408 tun_detach(tfile
, true);
3413 #ifdef CONFIG_PROC_FS
3414 static void tun_chr_show_fdinfo(struct seq_file
*m
, struct file
*file
)
3416 struct tun_file
*tfile
= file
->private_data
;
3417 struct tun_struct
*tun
;
3420 memset(&ifr
, 0, sizeof(ifr
));
3423 tun
= tun_get(tfile
);
3425 tun_get_iff(tun
, &ifr
);
3431 seq_printf(m
, "iff:\t%s\n", ifr
.ifr_name
);
3435 static const struct file_operations tun_fops
= {
3436 .owner
= THIS_MODULE
,
3437 .llseek
= no_llseek
,
3438 .read_iter
= tun_chr_read_iter
,
3439 .write_iter
= tun_chr_write_iter
,
3440 .poll
= tun_chr_poll
,
3441 .unlocked_ioctl
= tun_chr_ioctl
,
3442 #ifdef CONFIG_COMPAT
3443 .compat_ioctl
= tun_chr_compat_ioctl
,
3445 .open
= tun_chr_open
,
3446 .release
= tun_chr_close
,
3447 .fasync
= tun_chr_fasync
,
3448 #ifdef CONFIG_PROC_FS
3449 .show_fdinfo
= tun_chr_show_fdinfo
,
3453 static struct miscdevice tun_miscdev
= {
3456 .nodename
= "net/tun",
3460 /* ethtool interface */
3462 static void tun_default_link_ksettings(struct net_device
*dev
,
3463 struct ethtool_link_ksettings
*cmd
)
3465 ethtool_link_ksettings_zero_link_mode(cmd
, supported
);
3466 ethtool_link_ksettings_zero_link_mode(cmd
, advertising
);
3467 cmd
->base
.speed
= SPEED_10
;
3468 cmd
->base
.duplex
= DUPLEX_FULL
;
3469 cmd
->base
.port
= PORT_TP
;
3470 cmd
->base
.phy_address
= 0;
3471 cmd
->base
.autoneg
= AUTONEG_DISABLE
;
3474 static int tun_get_link_ksettings(struct net_device
*dev
,
3475 struct ethtool_link_ksettings
*cmd
)
3477 struct tun_struct
*tun
= netdev_priv(dev
);
3479 memcpy(cmd
, &tun
->link_ksettings
, sizeof(*cmd
));
3483 static int tun_set_link_ksettings(struct net_device
*dev
,
3484 const struct ethtool_link_ksettings
*cmd
)
3486 struct tun_struct
*tun
= netdev_priv(dev
);
3488 memcpy(&tun
->link_ksettings
, cmd
, sizeof(*cmd
));
3492 static void tun_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
3494 struct tun_struct
*tun
= netdev_priv(dev
);
3496 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
3497 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
3499 switch (tun
->flags
& TUN_TYPE_MASK
) {
3501 strlcpy(info
->bus_info
, "tun", sizeof(info
->bus_info
));
3504 strlcpy(info
->bus_info
, "tap", sizeof(info
->bus_info
));
3509 static u32
tun_get_msglevel(struct net_device
*dev
)
3511 struct tun_struct
*tun
= netdev_priv(dev
);
3513 return tun
->msg_enable
;
3516 static void tun_set_msglevel(struct net_device
*dev
, u32 value
)
3518 struct tun_struct
*tun
= netdev_priv(dev
);
3520 tun
->msg_enable
= value
;
3523 static int tun_get_coalesce(struct net_device
*dev
,
3524 struct ethtool_coalesce
*ec
)
3526 struct tun_struct
*tun
= netdev_priv(dev
);
3528 ec
->rx_max_coalesced_frames
= tun
->rx_batched
;
3533 static int tun_set_coalesce(struct net_device
*dev
,
3534 struct ethtool_coalesce
*ec
)
3536 struct tun_struct
*tun
= netdev_priv(dev
);
3538 if (ec
->rx_max_coalesced_frames
> NAPI_POLL_WEIGHT
)
3539 tun
->rx_batched
= NAPI_POLL_WEIGHT
;
3541 tun
->rx_batched
= ec
->rx_max_coalesced_frames
;
3546 static const struct ethtool_ops tun_ethtool_ops
= {
3547 .supported_coalesce_params
= ETHTOOL_COALESCE_RX_MAX_FRAMES
,
3548 .get_drvinfo
= tun_get_drvinfo
,
3549 .get_msglevel
= tun_get_msglevel
,
3550 .set_msglevel
= tun_set_msglevel
,
3551 .get_link
= ethtool_op_get_link
,
3552 .get_ts_info
= ethtool_op_get_ts_info
,
3553 .get_coalesce
= tun_get_coalesce
,
3554 .set_coalesce
= tun_set_coalesce
,
3555 .get_link_ksettings
= tun_get_link_ksettings
,
3556 .set_link_ksettings
= tun_set_link_ksettings
,
3559 static int tun_queue_resize(struct tun_struct
*tun
)
3561 struct net_device
*dev
= tun
->dev
;
3562 struct tun_file
*tfile
;
3563 struct ptr_ring
**rings
;
3564 int n
= tun
->numqueues
+ tun
->numdisabled
;
3567 rings
= kmalloc_array(n
, sizeof(*rings
), GFP_KERNEL
);
3571 for (i
= 0; i
< tun
->numqueues
; i
++) {
3572 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
3573 rings
[i
] = &tfile
->tx_ring
;
3575 list_for_each_entry(tfile
, &tun
->disabled
, next
)
3576 rings
[i
++] = &tfile
->tx_ring
;
3578 ret
= ptr_ring_resize_multiple(rings
, n
,
3579 dev
->tx_queue_len
, GFP_KERNEL
,
3586 static int tun_device_event(struct notifier_block
*unused
,
3587 unsigned long event
, void *ptr
)
3589 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
3590 struct tun_struct
*tun
= netdev_priv(dev
);
3593 if (dev
->rtnl_link_ops
!= &tun_link_ops
)
3597 case NETDEV_CHANGE_TX_QUEUE_LEN
:
3598 if (tun_queue_resize(tun
))
3602 for (i
= 0; i
< tun
->numqueues
; i
++) {
3603 struct tun_file
*tfile
;
3605 tfile
= rtnl_dereference(tun
->tfiles
[i
]);
3606 tfile
->socket
.sk
->sk_write_space(tfile
->socket
.sk
);
3616 static struct notifier_block tun_notifier_block __read_mostly
= {
3617 .notifier_call
= tun_device_event
,
3620 static int __init
tun_init(void)
3624 pr_info("%s, %s\n", DRV_DESCRIPTION
, DRV_VERSION
);
3626 ret
= rtnl_link_register(&tun_link_ops
);
3628 pr_err("Can't register link_ops\n");
3632 ret
= misc_register(&tun_miscdev
);
3634 pr_err("Can't register misc device %d\n", TUN_MINOR
);
3638 ret
= register_netdevice_notifier(&tun_notifier_block
);
3640 pr_err("Can't register netdevice notifier\n");
3647 misc_deregister(&tun_miscdev
);
3649 rtnl_link_unregister(&tun_link_ops
);
3654 static void tun_cleanup(void)
3656 misc_deregister(&tun_miscdev
);
3657 rtnl_link_unregister(&tun_link_ops
);
3658 unregister_netdevice_notifier(&tun_notifier_block
);
3661 /* Get an underlying socket object from tun file. Returns error unless file is
3662 * attached to a device. The returned object works like a packet socket, it
3663 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
3664 * holding a reference to the file for as long as the socket is in use. */
3665 struct socket
*tun_get_socket(struct file
*file
)
3667 struct tun_file
*tfile
;
3668 if (file
->f_op
!= &tun_fops
)
3669 return ERR_PTR(-EINVAL
);
3670 tfile
= file
->private_data
;
3672 return ERR_PTR(-EBADFD
);
3673 return &tfile
->socket
;
3675 EXPORT_SYMBOL_GPL(tun_get_socket
);
3677 struct ptr_ring
*tun_get_tx_ring(struct file
*file
)
3679 struct tun_file
*tfile
;
3681 if (file
->f_op
!= &tun_fops
)
3682 return ERR_PTR(-EINVAL
);
3683 tfile
= file
->private_data
;
3685 return ERR_PTR(-EBADFD
);
3686 return &tfile
->tx_ring
;
3688 EXPORT_SYMBOL_GPL(tun_get_tx_ring
);
3690 module_init(tun_init
);
3691 module_exit(tun_cleanup
);
3692 MODULE_DESCRIPTION(DRV_DESCRIPTION
);
3693 MODULE_AUTHOR(DRV_COPYRIGHT
);
3694 MODULE_LICENSE("GPL");
3695 MODULE_ALIAS_MISCDEV(TUN_MINOR
);
3696 MODULE_ALIAS("devname:net/tun");