]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * TUN - Universal TUN/TAP device driver. | |
3 | * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; either version 2 of the License, or | |
8 | * (at your option) any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ | |
16 | */ | |
17 | ||
18 | /* | |
19 | * Changes: | |
20 | * | |
21 | * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 | |
22 | * Add TUNSETLINK ioctl to set the link encapsulation | |
23 | * | |
24 | * Mark Smith <markzzzsmith@yahoo.com.au> | |
25 | * Use eth_random_addr() for tap MAC address. | |
26 | * | |
27 | * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 | |
28 | * Fixes in packet dropping, queue length setting and queue wakeup. | |
29 | * Increased default tx queue length. | |
30 | * Added ethtool API. | |
31 | * Minor cleanups | |
32 | * | |
33 | * Daniel Podlejski <underley@underley.eu.org> | |
34 | * Modifications for 2.3.99-pre5 kernel. | |
35 | */ | |
36 | ||
37 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
38 | ||
39 | #define DRV_NAME "tun" | |
40 | #define DRV_VERSION "1.6" | |
41 | #define DRV_DESCRIPTION "Universal TUN/TAP device driver" | |
42 | #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" | |
43 | ||
44 | #include <linux/module.h> | |
45 | #include <linux/errno.h> | |
46 | #include <linux/kernel.h> | |
47 | #include <linux/major.h> | |
48 | #include <linux/slab.h> | |
49 | #include <linux/poll.h> | |
50 | #include <linux/fcntl.h> | |
51 | #include <linux/init.h> | |
52 | #include <linux/skbuff.h> | |
53 | #include <linux/netdevice.h> | |
54 | #include <linux/etherdevice.h> | |
55 | #include <linux/miscdevice.h> | |
56 | #include <linux/ethtool.h> | |
57 | #include <linux/rtnetlink.h> | |
58 | #include <linux/compat.h> | |
59 | #include <linux/if.h> | |
60 | #include <linux/if_arp.h> | |
61 | #include <linux/if_ether.h> | |
62 | #include <linux/if_tun.h> | |
63 | #include <linux/if_vlan.h> | |
64 | #include <linux/crc32.h> | |
65 | #include <linux/nsproxy.h> | |
66 | #include <linux/virtio_net.h> | |
67 | #include <linux/rcupdate.h> | |
68 | #include <net/net_namespace.h> | |
69 | #include <net/netns/generic.h> | |
70 | #include <net/rtnetlink.h> | |
71 | #include <net/sock.h> | |
72 | #include <linux/seq_file.h> | |
73 | #include <linux/uio.h> | |
74 | ||
75 | #include <asm/uaccess.h> | |
76 | ||
77 | /* Uncomment to enable debugging */ | |
78 | /* #define TUN_DEBUG 1 */ | |
79 | ||
80 | #ifdef TUN_DEBUG | |
81 | static int debug; | |
82 | ||
83 | #define tun_debug(level, tun, fmt, args...) \ | |
84 | do { \ | |
85 | if (tun->debug) \ | |
86 | netdev_printk(level, tun->dev, fmt, ##args); \ | |
87 | } while (0) | |
88 | #define DBG1(level, fmt, args...) \ | |
89 | do { \ | |
90 | if (debug == 2) \ | |
91 | printk(level fmt, ##args); \ | |
92 | } while (0) | |
93 | #else | |
94 | #define tun_debug(level, tun, fmt, args...) \ | |
95 | do { \ | |
96 | if (0) \ | |
97 | netdev_printk(level, tun->dev, fmt, ##args); \ | |
98 | } while (0) | |
99 | #define DBG1(level, fmt, args...) \ | |
100 | do { \ | |
101 | if (0) \ | |
102 | printk(level fmt, ##args); \ | |
103 | } while (0) | |
104 | #endif | |
105 | ||
106 | /* TUN device flags */ | |
107 | ||
108 | /* IFF_ATTACH_QUEUE is never stored in device flags, | |
109 | * overload it to mean fasync when stored there. | |
110 | */ | |
111 | #define TUN_FASYNC IFF_ATTACH_QUEUE | |
112 | /* High bits in flags field are unused. */ | |
113 | #define TUN_VNET_LE 0x80000000 | |
114 | #define TUN_VNET_BE 0x40000000 | |
115 | ||
116 | #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ | |
117 | IFF_MULTI_QUEUE) | |
118 | #define GOODCOPY_LEN 128 | |
119 | ||
120 | #define FLT_EXACT_COUNT 8 | |
121 | struct tap_filter { | |
122 | unsigned int count; /* Number of addrs. Zero means disabled */ | |
123 | u32 mask[2]; /* Mask of the hashed addrs */ | |
124 | unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; | |
125 | }; | |
126 | ||
127 | /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal | |
128 | * to max number of VCPUs in guest. */ | |
129 | #define MAX_TAP_QUEUES 256 | |
130 | #define MAX_TAP_FLOWS 4096 | |
131 | ||
132 | #define TUN_FLOW_EXPIRE (3 * HZ) | |
133 | ||
134 | /* A tun_file connects an open character device to a tuntap netdevice. It | |
135 | * also contains all socket related structures (except sock_fprog and tap_filter) | |
136 | * to serve as one transmit queue for tuntap device. The sock_fprog and | |
137 | * tap_filter were kept in tun_struct since they were used for filtering for the | |
138 | * netdevice not for a specific queue (at least I didn't see the requirement for | |
139 | * this). | |
140 | * | |
141 | * RCU usage: | |
142 | * The tun_file and tun_struct are loosely coupled, the pointer from one to the | |
143 | * other can only be read while rcu_read_lock or rtnl_lock is held. | |
144 | */ | |
145 | struct tun_file { | |
146 | struct sock sk; | |
147 | struct socket socket; | |
148 | struct socket_wq wq; | |
149 | struct tun_struct __rcu *tun; | |
150 | struct fasync_struct *fasync; | |
151 | /* only used for fasnyc */ | |
152 | unsigned int flags; | |
153 | union { | |
154 | u16 queue_index; | |
155 | unsigned int ifindex; | |
156 | }; | |
157 | struct list_head next; | |
158 | struct tun_struct *detached; | |
159 | }; | |
160 | ||
161 | struct tun_flow_entry { | |
162 | struct hlist_node hash_link; | |
163 | struct rcu_head rcu; | |
164 | struct tun_struct *tun; | |
165 | ||
166 | u32 rxhash; | |
167 | u32 rps_rxhash; | |
168 | int queue_index; | |
169 | unsigned long updated; | |
170 | }; | |
171 | ||
172 | #define TUN_NUM_FLOW_ENTRIES 1024 | |
173 | ||
174 | /* Since the socket were moved to tun_file, to preserve the behavior of persist | |
175 | * device, socket filter, sndbuf and vnet header size were restore when the | |
176 | * file were attached to a persist device. | |
177 | */ | |
178 | struct tun_struct { | |
179 | struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; | |
180 | unsigned int numqueues; | |
181 | unsigned int flags; | |
182 | kuid_t owner; | |
183 | kgid_t group; | |
184 | ||
185 | struct net_device *dev; | |
186 | netdev_features_t set_features; | |
187 | #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ | |
188 | NETIF_F_TSO6|NETIF_F_UFO) | |
189 | ||
190 | int vnet_hdr_sz; | |
191 | int sndbuf; | |
192 | struct tap_filter txflt; | |
193 | struct sock_fprog fprog; | |
194 | /* protected by rtnl lock */ | |
195 | bool filter_attached; | |
196 | #ifdef TUN_DEBUG | |
197 | int debug; | |
198 | #endif | |
199 | spinlock_t lock; | |
200 | struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; | |
201 | struct timer_list flow_gc_timer; | |
202 | unsigned long ageing_time; | |
203 | unsigned int numdisabled; | |
204 | struct list_head disabled; | |
205 | void *security; | |
206 | u32 flow_count; | |
207 | }; | |
208 | ||
209 | #ifdef CONFIG_TUN_VNET_CROSS_LE | |
210 | static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) | |
211 | { | |
212 | return tun->flags & TUN_VNET_BE ? false : | |
213 | virtio_legacy_is_little_endian(); | |
214 | } | |
215 | ||
216 | static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) | |
217 | { | |
218 | int be = !!(tun->flags & TUN_VNET_BE); | |
219 | ||
220 | if (put_user(be, argp)) | |
221 | return -EFAULT; | |
222 | ||
223 | return 0; | |
224 | } | |
225 | ||
226 | static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) | |
227 | { | |
228 | int be; | |
229 | ||
230 | if (get_user(be, argp)) | |
231 | return -EFAULT; | |
232 | ||
233 | if (be) | |
234 | tun->flags |= TUN_VNET_BE; | |
235 | else | |
236 | tun->flags &= ~TUN_VNET_BE; | |
237 | ||
238 | return 0; | |
239 | } | |
240 | #else | |
241 | static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) | |
242 | { | |
243 | return virtio_legacy_is_little_endian(); | |
244 | } | |
245 | ||
246 | static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) | |
247 | { | |
248 | return -EINVAL; | |
249 | } | |
250 | ||
251 | static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) | |
252 | { | |
253 | return -EINVAL; | |
254 | } | |
255 | #endif /* CONFIG_TUN_VNET_CROSS_LE */ | |
256 | ||
257 | static inline bool tun_is_little_endian(struct tun_struct *tun) | |
258 | { | |
259 | return tun->flags & TUN_VNET_LE || | |
260 | tun_legacy_is_little_endian(tun); | |
261 | } | |
262 | ||
263 | static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val) | |
264 | { | |
265 | return __virtio16_to_cpu(tun_is_little_endian(tun), val); | |
266 | } | |
267 | ||
268 | static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) | |
269 | { | |
270 | return __cpu_to_virtio16(tun_is_little_endian(tun), val); | |
271 | } | |
272 | ||
273 | static inline u32 tun_hashfn(u32 rxhash) | |
274 | { | |
275 | return rxhash & 0x3ff; | |
276 | } | |
277 | ||
278 | static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) | |
279 | { | |
280 | struct tun_flow_entry *e; | |
281 | ||
282 | hlist_for_each_entry_rcu(e, head, hash_link) { | |
283 | if (e->rxhash == rxhash) | |
284 | return e; | |
285 | } | |
286 | return NULL; | |
287 | } | |
288 | ||
289 | static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, | |
290 | struct hlist_head *head, | |
291 | u32 rxhash, u16 queue_index) | |
292 | { | |
293 | struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC); | |
294 | ||
295 | if (e) { | |
296 | tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n", | |
297 | rxhash, queue_index); | |
298 | e->updated = jiffies; | |
299 | e->rxhash = rxhash; | |
300 | e->rps_rxhash = 0; | |
301 | e->queue_index = queue_index; | |
302 | e->tun = tun; | |
303 | hlist_add_head_rcu(&e->hash_link, head); | |
304 | ++tun->flow_count; | |
305 | } | |
306 | return e; | |
307 | } | |
308 | ||
309 | static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) | |
310 | { | |
311 | tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n", | |
312 | e->rxhash, e->queue_index); | |
313 | hlist_del_rcu(&e->hash_link); | |
314 | kfree_rcu(e, rcu); | |
315 | --tun->flow_count; | |
316 | } | |
317 | ||
318 | static void tun_flow_flush(struct tun_struct *tun) | |
319 | { | |
320 | int i; | |
321 | ||
322 | spin_lock_bh(&tun->lock); | |
323 | for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { | |
324 | struct tun_flow_entry *e; | |
325 | struct hlist_node *n; | |
326 | ||
327 | hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) | |
328 | tun_flow_delete(tun, e); | |
329 | } | |
330 | spin_unlock_bh(&tun->lock); | |
331 | } | |
332 | ||
333 | static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) | |
334 | { | |
335 | int i; | |
336 | ||
337 | spin_lock_bh(&tun->lock); | |
338 | for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { | |
339 | struct tun_flow_entry *e; | |
340 | struct hlist_node *n; | |
341 | ||
342 | hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { | |
343 | if (e->queue_index == queue_index) | |
344 | tun_flow_delete(tun, e); | |
345 | } | |
346 | } | |
347 | spin_unlock_bh(&tun->lock); | |
348 | } | |
349 | ||
350 | static void tun_flow_cleanup(unsigned long data) | |
351 | { | |
352 | struct tun_struct *tun = (struct tun_struct *)data; | |
353 | unsigned long delay = tun->ageing_time; | |
354 | unsigned long next_timer = jiffies + delay; | |
355 | unsigned long count = 0; | |
356 | int i; | |
357 | ||
358 | tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n"); | |
359 | ||
360 | spin_lock_bh(&tun->lock); | |
361 | for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { | |
362 | struct tun_flow_entry *e; | |
363 | struct hlist_node *n; | |
364 | ||
365 | hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { | |
366 | unsigned long this_timer; | |
367 | count++; | |
368 | this_timer = e->updated + delay; | |
369 | if (time_before_eq(this_timer, jiffies)) | |
370 | tun_flow_delete(tun, e); | |
371 | else if (time_before(this_timer, next_timer)) | |
372 | next_timer = this_timer; | |
373 | } | |
374 | } | |
375 | ||
376 | if (count) | |
377 | mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); | |
378 | spin_unlock_bh(&tun->lock); | |
379 | } | |
380 | ||
381 | static void tun_flow_update(struct tun_struct *tun, u32 rxhash, | |
382 | struct tun_file *tfile) | |
383 | { | |
384 | struct hlist_head *head; | |
385 | struct tun_flow_entry *e; | |
386 | unsigned long delay = tun->ageing_time; | |
387 | u16 queue_index = tfile->queue_index; | |
388 | ||
389 | if (!rxhash) | |
390 | return; | |
391 | else | |
392 | head = &tun->flows[tun_hashfn(rxhash)]; | |
393 | ||
394 | rcu_read_lock(); | |
395 | ||
396 | /* We may get a very small possibility of OOO during switching, not | |
397 | * worth to optimize.*/ | |
398 | if (tun->numqueues == 1 || tfile->detached) | |
399 | goto unlock; | |
400 | ||
401 | e = tun_flow_find(head, rxhash); | |
402 | if (likely(e)) { | |
403 | /* TODO: keep queueing to old queue until it's empty? */ | |
404 | e->queue_index = queue_index; | |
405 | e->updated = jiffies; | |
406 | sock_rps_record_flow_hash(e->rps_rxhash); | |
407 | } else { | |
408 | spin_lock_bh(&tun->lock); | |
409 | if (!tun_flow_find(head, rxhash) && | |
410 | tun->flow_count < MAX_TAP_FLOWS) | |
411 | tun_flow_create(tun, head, rxhash, queue_index); | |
412 | ||
413 | if (!timer_pending(&tun->flow_gc_timer)) | |
414 | mod_timer(&tun->flow_gc_timer, | |
415 | round_jiffies_up(jiffies + delay)); | |
416 | spin_unlock_bh(&tun->lock); | |
417 | } | |
418 | ||
419 | unlock: | |
420 | rcu_read_unlock(); | |
421 | } | |
422 | ||
423 | /** | |
424 | * Save the hash received in the stack receive path and update the | |
425 | * flow_hash table accordingly. | |
426 | */ | |
427 | static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) | |
428 | { | |
429 | if (unlikely(e->rps_rxhash != hash)) | |
430 | e->rps_rxhash = hash; | |
431 | } | |
432 | ||
433 | /* We try to identify a flow through its rxhash first. The reason that | |
434 | * we do not check rxq no. is because some cards(e.g 82599), chooses | |
435 | * the rxq based on the txq where the last packet of the flow comes. As | |
436 | * the userspace application move between processors, we may get a | |
437 | * different rxq no. here. If we could not get rxhash, then we would | |
438 | * hope the rxq no. may help here. | |
439 | */ | |
440 | static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, | |
441 | void *accel_priv, select_queue_fallback_t fallback) | |
442 | { | |
443 | struct tun_struct *tun = netdev_priv(dev); | |
444 | struct tun_flow_entry *e; | |
445 | u32 txq = 0; | |
446 | u32 numqueues = 0; | |
447 | ||
448 | rcu_read_lock(); | |
449 | numqueues = ACCESS_ONCE(tun->numqueues); | |
450 | ||
451 | txq = skb_get_hash(skb); | |
452 | if (txq) { | |
453 | e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); | |
454 | if (e) { | |
455 | tun_flow_save_rps_rxhash(e, txq); | |
456 | txq = e->queue_index; | |
457 | } else | |
458 | /* use multiply and shift instead of expensive divide */ | |
459 | txq = ((u64)txq * numqueues) >> 32; | |
460 | } else if (likely(skb_rx_queue_recorded(skb))) { | |
461 | txq = skb_get_rx_queue(skb); | |
462 | while (unlikely(txq >= numqueues)) | |
463 | txq -= numqueues; | |
464 | } | |
465 | ||
466 | rcu_read_unlock(); | |
467 | return txq; | |
468 | } | |
469 | ||
470 | static inline bool tun_not_capable(struct tun_struct *tun) | |
471 | { | |
472 | const struct cred *cred = current_cred(); | |
473 | struct net *net = dev_net(tun->dev); | |
474 | ||
475 | return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || | |
476 | (gid_valid(tun->group) && !in_egroup_p(tun->group))) && | |
477 | !ns_capable(net->user_ns, CAP_NET_ADMIN); | |
478 | } | |
479 | ||
480 | static void tun_set_real_num_queues(struct tun_struct *tun) | |
481 | { | |
482 | netif_set_real_num_tx_queues(tun->dev, tun->numqueues); | |
483 | netif_set_real_num_rx_queues(tun->dev, tun->numqueues); | |
484 | } | |
485 | ||
486 | static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) | |
487 | { | |
488 | tfile->detached = tun; | |
489 | list_add_tail(&tfile->next, &tun->disabled); | |
490 | ++tun->numdisabled; | |
491 | } | |
492 | ||
493 | static struct tun_struct *tun_enable_queue(struct tun_file *tfile) | |
494 | { | |
495 | struct tun_struct *tun = tfile->detached; | |
496 | ||
497 | tfile->detached = NULL; | |
498 | list_del_init(&tfile->next); | |
499 | --tun->numdisabled; | |
500 | return tun; | |
501 | } | |
502 | ||
503 | static void tun_queue_purge(struct tun_file *tfile) | |
504 | { | |
505 | skb_queue_purge(&tfile->sk.sk_receive_queue); | |
506 | skb_queue_purge(&tfile->sk.sk_error_queue); | |
507 | } | |
508 | ||
509 | static void __tun_detach(struct tun_file *tfile, bool clean) | |
510 | { | |
511 | struct tun_file *ntfile; | |
512 | struct tun_struct *tun; | |
513 | ||
514 | tun = rtnl_dereference(tfile->tun); | |
515 | ||
516 | if (tun && !tfile->detached) { | |
517 | u16 index = tfile->queue_index; | |
518 | BUG_ON(index >= tun->numqueues); | |
519 | ||
520 | rcu_assign_pointer(tun->tfiles[index], | |
521 | tun->tfiles[tun->numqueues - 1]); | |
522 | ntfile = rtnl_dereference(tun->tfiles[index]); | |
523 | ntfile->queue_index = index; | |
524 | ||
525 | --tun->numqueues; | |
526 | if (clean) { | |
527 | RCU_INIT_POINTER(tfile->tun, NULL); | |
528 | sock_put(&tfile->sk); | |
529 | } else | |
530 | tun_disable_queue(tun, tfile); | |
531 | ||
532 | synchronize_net(); | |
533 | tun_flow_delete_by_queue(tun, tun->numqueues + 1); | |
534 | /* Drop read queue */ | |
535 | tun_queue_purge(tfile); | |
536 | tun_set_real_num_queues(tun); | |
537 | } else if (tfile->detached && clean) { | |
538 | tun = tun_enable_queue(tfile); | |
539 | sock_put(&tfile->sk); | |
540 | } | |
541 | ||
542 | if (clean) { | |
543 | if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { | |
544 | netif_carrier_off(tun->dev); | |
545 | ||
546 | if (!(tun->flags & IFF_PERSIST) && | |
547 | tun->dev->reg_state == NETREG_REGISTERED) | |
548 | unregister_netdevice(tun->dev); | |
549 | } | |
550 | sock_put(&tfile->sk); | |
551 | } | |
552 | } | |
553 | ||
554 | static void tun_detach(struct tun_file *tfile, bool clean) | |
555 | { | |
556 | rtnl_lock(); | |
557 | __tun_detach(tfile, clean); | |
558 | rtnl_unlock(); | |
559 | } | |
560 | ||
561 | static void tun_detach_all(struct net_device *dev) | |
562 | { | |
563 | struct tun_struct *tun = netdev_priv(dev); | |
564 | struct tun_file *tfile, *tmp; | |
565 | int i, n = tun->numqueues; | |
566 | ||
567 | for (i = 0; i < n; i++) { | |
568 | tfile = rtnl_dereference(tun->tfiles[i]); | |
569 | BUG_ON(!tfile); | |
570 | tfile->socket.sk->sk_data_ready(tfile->socket.sk); | |
571 | RCU_INIT_POINTER(tfile->tun, NULL); | |
572 | --tun->numqueues; | |
573 | } | |
574 | list_for_each_entry(tfile, &tun->disabled, next) { | |
575 | tfile->socket.sk->sk_data_ready(tfile->socket.sk); | |
576 | RCU_INIT_POINTER(tfile->tun, NULL); | |
577 | } | |
578 | BUG_ON(tun->numqueues != 0); | |
579 | ||
580 | synchronize_net(); | |
581 | for (i = 0; i < n; i++) { | |
582 | tfile = rtnl_dereference(tun->tfiles[i]); | |
583 | /* Drop read queue */ | |
584 | tun_queue_purge(tfile); | |
585 | sock_put(&tfile->sk); | |
586 | } | |
587 | list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { | |
588 | tun_enable_queue(tfile); | |
589 | tun_queue_purge(tfile); | |
590 | sock_put(&tfile->sk); | |
591 | } | |
592 | BUG_ON(tun->numdisabled != 0); | |
593 | ||
594 | if (tun->flags & IFF_PERSIST) | |
595 | module_put(THIS_MODULE); | |
596 | } | |
597 | ||
598 | static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter) | |
599 | { | |
600 | struct tun_file *tfile = file->private_data; | |
601 | int err; | |
602 | ||
603 | err = security_tun_dev_attach(tfile->socket.sk, tun->security); | |
604 | if (err < 0) | |
605 | goto out; | |
606 | ||
607 | err = -EINVAL; | |
608 | if (rtnl_dereference(tfile->tun) && !tfile->detached) | |
609 | goto out; | |
610 | ||
611 | err = -EBUSY; | |
612 | if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) | |
613 | goto out; | |
614 | ||
615 | err = -E2BIG; | |
616 | if (!tfile->detached && | |
617 | tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) | |
618 | goto out; | |
619 | ||
620 | err = 0; | |
621 | ||
622 | /* Re-attach the filter to persist device */ | |
623 | if (!skip_filter && (tun->filter_attached == true)) { | |
624 | err = sk_attach_filter(&tun->fprog, tfile->socket.sk); | |
625 | if (!err) | |
626 | goto out; | |
627 | } | |
628 | tfile->queue_index = tun->numqueues; | |
629 | rcu_assign_pointer(tfile->tun, tun); | |
630 | rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); | |
631 | tun->numqueues++; | |
632 | ||
633 | if (tfile->detached) | |
634 | tun_enable_queue(tfile); | |
635 | else | |
636 | sock_hold(&tfile->sk); | |
637 | ||
638 | tun_set_real_num_queues(tun); | |
639 | ||
640 | /* device is allowed to go away first, so no need to hold extra | |
641 | * refcnt. | |
642 | */ | |
643 | ||
644 | out: | |
645 | return err; | |
646 | } | |
647 | ||
648 | static struct tun_struct *__tun_get(struct tun_file *tfile) | |
649 | { | |
650 | struct tun_struct *tun; | |
651 | ||
652 | rcu_read_lock(); | |
653 | tun = rcu_dereference(tfile->tun); | |
654 | if (tun) | |
655 | dev_hold(tun->dev); | |
656 | rcu_read_unlock(); | |
657 | ||
658 | return tun; | |
659 | } | |
660 | ||
661 | static struct tun_struct *tun_get(struct file *file) | |
662 | { | |
663 | return __tun_get(file->private_data); | |
664 | } | |
665 | ||
666 | static void tun_put(struct tun_struct *tun) | |
667 | { | |
668 | dev_put(tun->dev); | |
669 | } | |
670 | ||
671 | /* TAP filtering */ | |
672 | static void addr_hash_set(u32 *mask, const u8 *addr) | |
673 | { | |
674 | int n = ether_crc(ETH_ALEN, addr) >> 26; | |
675 | mask[n >> 5] |= (1 << (n & 31)); | |
676 | } | |
677 | ||
678 | static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) | |
679 | { | |
680 | int n = ether_crc(ETH_ALEN, addr) >> 26; | |
681 | return mask[n >> 5] & (1 << (n & 31)); | |
682 | } | |
683 | ||
684 | static int update_filter(struct tap_filter *filter, void __user *arg) | |
685 | { | |
686 | struct { u8 u[ETH_ALEN]; } *addr; | |
687 | struct tun_filter uf; | |
688 | int err, alen, n, nexact; | |
689 | ||
690 | if (copy_from_user(&uf, arg, sizeof(uf))) | |
691 | return -EFAULT; | |
692 | ||
693 | if (!uf.count) { | |
694 | /* Disabled */ | |
695 | filter->count = 0; | |
696 | return 0; | |
697 | } | |
698 | ||
699 | alen = ETH_ALEN * uf.count; | |
700 | addr = kmalloc(alen, GFP_KERNEL); | |
701 | if (!addr) | |
702 | return -ENOMEM; | |
703 | ||
704 | if (copy_from_user(addr, arg + sizeof(uf), alen)) { | |
705 | err = -EFAULT; | |
706 | goto done; | |
707 | } | |
708 | ||
709 | /* The filter is updated without holding any locks. Which is | |
710 | * perfectly safe. We disable it first and in the worst | |
711 | * case we'll accept a few undesired packets. */ | |
712 | filter->count = 0; | |
713 | wmb(); | |
714 | ||
715 | /* Use first set of addresses as an exact filter */ | |
716 | for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) | |
717 | memcpy(filter->addr[n], addr[n].u, ETH_ALEN); | |
718 | ||
719 | nexact = n; | |
720 | ||
721 | /* Remaining multicast addresses are hashed, | |
722 | * unicast will leave the filter disabled. */ | |
723 | memset(filter->mask, 0, sizeof(filter->mask)); | |
724 | for (; n < uf.count; n++) { | |
725 | if (!is_multicast_ether_addr(addr[n].u)) { | |
726 | err = 0; /* no filter */ | |
727 | goto done; | |
728 | } | |
729 | addr_hash_set(filter->mask, addr[n].u); | |
730 | } | |
731 | ||
732 | /* For ALLMULTI just set the mask to all ones. | |
733 | * This overrides the mask populated above. */ | |
734 | if ((uf.flags & TUN_FLT_ALLMULTI)) | |
735 | memset(filter->mask, ~0, sizeof(filter->mask)); | |
736 | ||
737 | /* Now enable the filter */ | |
738 | wmb(); | |
739 | filter->count = nexact; | |
740 | ||
741 | /* Return the number of exact filters */ | |
742 | err = nexact; | |
743 | ||
744 | done: | |
745 | kfree(addr); | |
746 | return err; | |
747 | } | |
748 | ||
749 | /* Returns: 0 - drop, !=0 - accept */ | |
750 | static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) | |
751 | { | |
752 | /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect | |
753 | * at this point. */ | |
754 | struct ethhdr *eh = (struct ethhdr *) skb->data; | |
755 | int i; | |
756 | ||
757 | /* Exact match */ | |
758 | for (i = 0; i < filter->count; i++) | |
759 | if (ether_addr_equal(eh->h_dest, filter->addr[i])) | |
760 | return 1; | |
761 | ||
762 | /* Inexact match (multicast only) */ | |
763 | if (is_multicast_ether_addr(eh->h_dest)) | |
764 | return addr_hash_test(filter->mask, eh->h_dest); | |
765 | ||
766 | return 0; | |
767 | } | |
768 | ||
769 | /* | |
770 | * Checks whether the packet is accepted or not. | |
771 | * Returns: 0 - drop, !=0 - accept | |
772 | */ | |
773 | static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) | |
774 | { | |
775 | if (!filter->count) | |
776 | return 1; | |
777 | ||
778 | return run_filter(filter, skb); | |
779 | } | |
780 | ||
781 | /* Network device part of the driver */ | |
782 | ||
783 | static const struct ethtool_ops tun_ethtool_ops; | |
784 | ||
785 | /* Net device detach from fd. */ | |
786 | static void tun_net_uninit(struct net_device *dev) | |
787 | { | |
788 | tun_detach_all(dev); | |
789 | } | |
790 | ||
791 | /* Net device open. */ | |
792 | static int tun_net_open(struct net_device *dev) | |
793 | { | |
794 | netif_tx_start_all_queues(dev); | |
795 | return 0; | |
796 | } | |
797 | ||
798 | /* Net device close. */ | |
799 | static int tun_net_close(struct net_device *dev) | |
800 | { | |
801 | netif_tx_stop_all_queues(dev); | |
802 | return 0; | |
803 | } | |
804 | ||
805 | /* Net device start xmit */ | |
806 | static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) | |
807 | { | |
808 | struct tun_struct *tun = netdev_priv(dev); | |
809 | int txq = skb->queue_mapping; | |
810 | struct tun_file *tfile; | |
811 | u32 numqueues = 0; | |
812 | ||
813 | rcu_read_lock(); | |
814 | tfile = rcu_dereference(tun->tfiles[txq]); | |
815 | numqueues = ACCESS_ONCE(tun->numqueues); | |
816 | ||
817 | /* Drop packet if interface is not attached */ | |
818 | if (txq >= numqueues) | |
819 | goto drop; | |
820 | ||
821 | if (numqueues == 1) { | |
822 | /* Select queue was not called for the skbuff, so we extract the | |
823 | * RPS hash and save it into the flow_table here. | |
824 | */ | |
825 | __u32 rxhash; | |
826 | ||
827 | rxhash = skb_get_hash(skb); | |
828 | if (rxhash) { | |
829 | struct tun_flow_entry *e; | |
830 | e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], | |
831 | rxhash); | |
832 | if (e) | |
833 | tun_flow_save_rps_rxhash(e, rxhash); | |
834 | } | |
835 | } | |
836 | ||
837 | tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); | |
838 | ||
839 | BUG_ON(!tfile); | |
840 | ||
841 | /* Drop if the filter does not like it. | |
842 | * This is a noop if the filter is disabled. | |
843 | * Filter can be enabled only for the TAP devices. */ | |
844 | if (!check_filter(&tun->txflt, skb)) | |
845 | goto drop; | |
846 | ||
847 | if (tfile->socket.sk->sk_filter && | |
848 | sk_filter(tfile->socket.sk, skb)) | |
849 | goto drop; | |
850 | ||
851 | /* Limit the number of packets queued by dividing txq length with the | |
852 | * number of queues. | |
853 | */ | |
854 | if (skb_queue_len(&tfile->socket.sk->sk_receive_queue) * numqueues | |
855 | >= dev->tx_queue_len) | |
856 | goto drop; | |
857 | ||
858 | if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) | |
859 | goto drop; | |
860 | ||
861 | if (skb->sk && sk_fullsock(skb->sk)) { | |
862 | sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags); | |
863 | sw_tx_timestamp(skb); | |
864 | } | |
865 | ||
866 | /* Orphan the skb - required as we might hang on to it | |
867 | * for indefinite time. | |
868 | */ | |
869 | skb_orphan(skb); | |
870 | ||
871 | nf_reset(skb); | |
872 | ||
873 | /* Enqueue packet */ | |
874 | skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb); | |
875 | ||
876 | /* Notify and wake up reader process */ | |
877 | if (tfile->flags & TUN_FASYNC) | |
878 | kill_fasync(&tfile->fasync, SIGIO, POLL_IN); | |
879 | tfile->socket.sk->sk_data_ready(tfile->socket.sk); | |
880 | ||
881 | rcu_read_unlock(); | |
882 | return NETDEV_TX_OK; | |
883 | ||
884 | drop: | |
885 | dev->stats.tx_dropped++; | |
886 | skb_tx_error(skb); | |
887 | kfree_skb(skb); | |
888 | rcu_read_unlock(); | |
889 | return NET_XMIT_DROP; | |
890 | } | |
891 | ||
892 | static void tun_net_mclist(struct net_device *dev) | |
893 | { | |
894 | /* | |
895 | * This callback is supposed to deal with mc filter in | |
896 | * _rx_ path and has nothing to do with the _tx_ path. | |
897 | * In rx path we always accept everything userspace gives us. | |
898 | */ | |
899 | } | |
900 | ||
901 | #define MIN_MTU 68 | |
902 | #define MAX_MTU 65535 | |
903 | ||
904 | static int | |
905 | tun_net_change_mtu(struct net_device *dev, int new_mtu) | |
906 | { | |
907 | if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU) | |
908 | return -EINVAL; | |
909 | dev->mtu = new_mtu; | |
910 | return 0; | |
911 | } | |
912 | ||
913 | static netdev_features_t tun_net_fix_features(struct net_device *dev, | |
914 | netdev_features_t features) | |
915 | { | |
916 | struct tun_struct *tun = netdev_priv(dev); | |
917 | ||
918 | return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); | |
919 | } | |
920 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
921 | static void tun_poll_controller(struct net_device *dev) | |
922 | { | |
923 | /* | |
924 | * Tun only receives frames when: | |
925 | * 1) the char device endpoint gets data from user space | |
926 | * 2) the tun socket gets a sendmsg call from user space | |
927 | * Since both of those are synchronous operations, we are guaranteed | |
928 | * never to have pending data when we poll for it | |
929 | * so there is nothing to do here but return. | |
930 | * We need this though so netpoll recognizes us as an interface that | |
931 | * supports polling, which enables bridge devices in virt setups to | |
932 | * still use netconsole | |
933 | */ | |
934 | return; | |
935 | } | |
936 | #endif | |
937 | static const struct net_device_ops tun_netdev_ops = { | |
938 | .ndo_uninit = tun_net_uninit, | |
939 | .ndo_open = tun_net_open, | |
940 | .ndo_stop = tun_net_close, | |
941 | .ndo_start_xmit = tun_net_xmit, | |
942 | .ndo_change_mtu = tun_net_change_mtu, | |
943 | .ndo_fix_features = tun_net_fix_features, | |
944 | .ndo_select_queue = tun_select_queue, | |
945 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
946 | .ndo_poll_controller = tun_poll_controller, | |
947 | #endif | |
948 | }; | |
949 | ||
950 | static const struct net_device_ops tap_netdev_ops = { | |
951 | .ndo_uninit = tun_net_uninit, | |
952 | .ndo_open = tun_net_open, | |
953 | .ndo_stop = tun_net_close, | |
954 | .ndo_start_xmit = tun_net_xmit, | |
955 | .ndo_change_mtu = tun_net_change_mtu, | |
956 | .ndo_fix_features = tun_net_fix_features, | |
957 | .ndo_set_rx_mode = tun_net_mclist, | |
958 | .ndo_set_mac_address = eth_mac_addr, | |
959 | .ndo_validate_addr = eth_validate_addr, | |
960 | .ndo_select_queue = tun_select_queue, | |
961 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
962 | .ndo_poll_controller = tun_poll_controller, | |
963 | #endif | |
964 | .ndo_features_check = passthru_features_check, | |
965 | }; | |
966 | ||
967 | static void tun_flow_init(struct tun_struct *tun) | |
968 | { | |
969 | int i; | |
970 | ||
971 | for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) | |
972 | INIT_HLIST_HEAD(&tun->flows[i]); | |
973 | ||
974 | tun->ageing_time = TUN_FLOW_EXPIRE; | |
975 | setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun); | |
976 | mod_timer(&tun->flow_gc_timer, | |
977 | round_jiffies_up(jiffies + tun->ageing_time)); | |
978 | } | |
979 | ||
980 | static void tun_flow_uninit(struct tun_struct *tun) | |
981 | { | |
982 | del_timer_sync(&tun->flow_gc_timer); | |
983 | tun_flow_flush(tun); | |
984 | } | |
985 | ||
986 | /* Initialize net device. */ | |
987 | static void tun_net_init(struct net_device *dev) | |
988 | { | |
989 | struct tun_struct *tun = netdev_priv(dev); | |
990 | ||
991 | switch (tun->flags & TUN_TYPE_MASK) { | |
992 | case IFF_TUN: | |
993 | dev->netdev_ops = &tun_netdev_ops; | |
994 | ||
995 | /* Point-to-Point TUN Device */ | |
996 | dev->hard_header_len = 0; | |
997 | dev->addr_len = 0; | |
998 | dev->mtu = 1500; | |
999 | ||
1000 | /* Zero header length */ | |
1001 | dev->type = ARPHRD_NONE; | |
1002 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; | |
1003 | dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */ | |
1004 | break; | |
1005 | ||
1006 | case IFF_TAP: | |
1007 | dev->netdev_ops = &tap_netdev_ops; | |
1008 | /* Ethernet TAP Device */ | |
1009 | ether_setup(dev); | |
1010 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; | |
1011 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | |
1012 | ||
1013 | eth_hw_addr_random(dev); | |
1014 | ||
1015 | dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */ | |
1016 | break; | |
1017 | } | |
1018 | } | |
1019 | ||
1020 | /* Character device part */ | |
1021 | ||
1022 | /* Poll */ | |
1023 | static unsigned int tun_chr_poll(struct file *file, poll_table *wait) | |
1024 | { | |
1025 | struct tun_file *tfile = file->private_data; | |
1026 | struct tun_struct *tun = __tun_get(tfile); | |
1027 | struct sock *sk; | |
1028 | unsigned int mask = 0; | |
1029 | ||
1030 | if (!tun) | |
1031 | return POLLERR; | |
1032 | ||
1033 | sk = tfile->socket.sk; | |
1034 | ||
1035 | tun_debug(KERN_INFO, tun, "tun_chr_poll\n"); | |
1036 | ||
1037 | poll_wait(file, sk_sleep(sk), wait); | |
1038 | ||
1039 | if (!skb_queue_empty(&sk->sk_receive_queue)) | |
1040 | mask |= POLLIN | POLLRDNORM; | |
1041 | ||
1042 | if (sock_writeable(sk) || | |
1043 | (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && | |
1044 | sock_writeable(sk))) | |
1045 | mask |= POLLOUT | POLLWRNORM; | |
1046 | ||
1047 | if (tun->dev->reg_state != NETREG_REGISTERED) | |
1048 | mask = POLLERR; | |
1049 | ||
1050 | tun_put(tun); | |
1051 | return mask; | |
1052 | } | |
1053 | ||
1054 | /* prepad is the amount to reserve at front. len is length after that. | |
1055 | * linear is a hint as to how much to copy (usually headers). */ | |
1056 | static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, | |
1057 | size_t prepad, size_t len, | |
1058 | size_t linear, int noblock) | |
1059 | { | |
1060 | struct sock *sk = tfile->socket.sk; | |
1061 | struct sk_buff *skb; | |
1062 | int err; | |
1063 | ||
1064 | /* Under a page? Don't bother with paged skb. */ | |
1065 | if (prepad + len < PAGE_SIZE || !linear) | |
1066 | linear = len; | |
1067 | ||
1068 | skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, | |
1069 | &err, 0); | |
1070 | if (!skb) | |
1071 | return ERR_PTR(err); | |
1072 | ||
1073 | skb_reserve(skb, prepad); | |
1074 | skb_put(skb, linear); | |
1075 | skb->data_len = len - linear; | |
1076 | skb->len += len - linear; | |
1077 | ||
1078 | return skb; | |
1079 | } | |
1080 | ||
1081 | /* Get packet from user space buffer */ | |
1082 | static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |
1083 | void *msg_control, struct iov_iter *from, | |
1084 | int noblock) | |
1085 | { | |
1086 | struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; | |
1087 | struct sk_buff *skb; | |
1088 | size_t total_len = iov_iter_count(from); | |
1089 | size_t len = total_len, align = NET_SKB_PAD, linear; | |
1090 | struct virtio_net_hdr gso = { 0 }; | |
1091 | int good_linear; | |
1092 | int copylen; | |
1093 | bool zerocopy = false; | |
1094 | int err; | |
1095 | u32 rxhash; | |
1096 | ssize_t n; | |
1097 | ||
1098 | if (!(tun->flags & IFF_NO_PI)) { | |
1099 | if (len < sizeof(pi)) | |
1100 | return -EINVAL; | |
1101 | len -= sizeof(pi); | |
1102 | ||
1103 | n = copy_from_iter(&pi, sizeof(pi), from); | |
1104 | if (n != sizeof(pi)) | |
1105 | return -EFAULT; | |
1106 | } | |
1107 | ||
1108 | if (tun->flags & IFF_VNET_HDR) { | |
1109 | if (len < tun->vnet_hdr_sz) | |
1110 | return -EINVAL; | |
1111 | len -= tun->vnet_hdr_sz; | |
1112 | ||
1113 | n = copy_from_iter(&gso, sizeof(gso), from); | |
1114 | if (n != sizeof(gso)) | |
1115 | return -EFAULT; | |
1116 | ||
1117 | if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && | |
1118 | tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len)) | |
1119 | gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2); | |
1120 | ||
1121 | if (tun16_to_cpu(tun, gso.hdr_len) > len) | |
1122 | return -EINVAL; | |
1123 | iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso)); | |
1124 | } | |
1125 | ||
1126 | if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { | |
1127 | align += NET_IP_ALIGN; | |
1128 | if (unlikely(len < ETH_HLEN || | |
1129 | (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN))) | |
1130 | return -EINVAL; | |
1131 | } | |
1132 | ||
1133 | good_linear = SKB_MAX_HEAD(align); | |
1134 | ||
1135 | if (msg_control) { | |
1136 | struct iov_iter i = *from; | |
1137 | ||
1138 | /* There are 256 bytes to be copied in skb, so there is | |
1139 | * enough room for skb expand head in case it is used. | |
1140 | * The rest of the buffer is mapped from userspace. | |
1141 | */ | |
1142 | copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN; | |
1143 | if (copylen > good_linear) | |
1144 | copylen = good_linear; | |
1145 | linear = copylen; | |
1146 | iov_iter_advance(&i, copylen); | |
1147 | if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) | |
1148 | zerocopy = true; | |
1149 | } | |
1150 | ||
1151 | if (!zerocopy) { | |
1152 | copylen = len; | |
1153 | if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) | |
1154 | linear = good_linear; | |
1155 | else | |
1156 | linear = tun16_to_cpu(tun, gso.hdr_len); | |
1157 | } | |
1158 | ||
1159 | skb = tun_alloc_skb(tfile, align, copylen, linear, noblock); | |
1160 | if (IS_ERR(skb)) { | |
1161 | if (PTR_ERR(skb) != -EAGAIN) | |
1162 | tun->dev->stats.rx_dropped++; | |
1163 | return PTR_ERR(skb); | |
1164 | } | |
1165 | ||
1166 | if (zerocopy) | |
1167 | err = zerocopy_sg_from_iter(skb, from); | |
1168 | else { | |
1169 | err = skb_copy_datagram_from_iter(skb, 0, from, len); | |
1170 | if (!err && msg_control) { | |
1171 | struct ubuf_info *uarg = msg_control; | |
1172 | uarg->callback(uarg, false); | |
1173 | } | |
1174 | } | |
1175 | ||
1176 | if (err) { | |
1177 | tun->dev->stats.rx_dropped++; | |
1178 | kfree_skb(skb); | |
1179 | return -EFAULT; | |
1180 | } | |
1181 | ||
1182 | if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { | |
1183 | if (!skb_partial_csum_set(skb, tun16_to_cpu(tun, gso.csum_start), | |
1184 | tun16_to_cpu(tun, gso.csum_offset))) { | |
1185 | tun->dev->stats.rx_frame_errors++; | |
1186 | kfree_skb(skb); | |
1187 | return -EINVAL; | |
1188 | } | |
1189 | } | |
1190 | ||
1191 | switch (tun->flags & TUN_TYPE_MASK) { | |
1192 | case IFF_TUN: | |
1193 | if (tun->flags & IFF_NO_PI) { | |
1194 | switch (skb->data[0] & 0xf0) { | |
1195 | case 0x40: | |
1196 | pi.proto = htons(ETH_P_IP); | |
1197 | break; | |
1198 | case 0x60: | |
1199 | pi.proto = htons(ETH_P_IPV6); | |
1200 | break; | |
1201 | default: | |
1202 | tun->dev->stats.rx_dropped++; | |
1203 | kfree_skb(skb); | |
1204 | return -EINVAL; | |
1205 | } | |
1206 | } | |
1207 | ||
1208 | skb_reset_mac_header(skb); | |
1209 | skb->protocol = pi.proto; | |
1210 | skb->dev = tun->dev; | |
1211 | break; | |
1212 | case IFF_TAP: | |
1213 | skb->protocol = eth_type_trans(skb, tun->dev); | |
1214 | break; | |
1215 | } | |
1216 | ||
1217 | if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { | |
1218 | pr_debug("GSO!\n"); | |
1219 | switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { | |
1220 | case VIRTIO_NET_HDR_GSO_TCPV4: | |
1221 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; | |
1222 | break; | |
1223 | case VIRTIO_NET_HDR_GSO_TCPV6: | |
1224 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; | |
1225 | break; | |
1226 | case VIRTIO_NET_HDR_GSO_UDP: | |
1227 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; | |
1228 | break; | |
1229 | default: | |
1230 | tun->dev->stats.rx_frame_errors++; | |
1231 | kfree_skb(skb); | |
1232 | return -EINVAL; | |
1233 | } | |
1234 | ||
1235 | if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN) | |
1236 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; | |
1237 | ||
1238 | skb_shinfo(skb)->gso_size = tun16_to_cpu(tun, gso.gso_size); | |
1239 | if (skb_shinfo(skb)->gso_size == 0) { | |
1240 | tun->dev->stats.rx_frame_errors++; | |
1241 | kfree_skb(skb); | |
1242 | return -EINVAL; | |
1243 | } | |
1244 | ||
1245 | /* Header must be checked, and gso_segs computed. */ | |
1246 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | |
1247 | skb_shinfo(skb)->gso_segs = 0; | |
1248 | } | |
1249 | ||
1250 | /* copy skb_ubuf_info for callback when skb has no error */ | |
1251 | if (zerocopy) { | |
1252 | skb_shinfo(skb)->destructor_arg = msg_control; | |
1253 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; | |
1254 | skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; | |
1255 | } | |
1256 | ||
1257 | skb_reset_network_header(skb); | |
1258 | skb_probe_transport_header(skb, 0); | |
1259 | ||
1260 | rxhash = skb_get_hash(skb); | |
1261 | netif_rx_ni(skb); | |
1262 | ||
1263 | tun->dev->stats.rx_packets++; | |
1264 | tun->dev->stats.rx_bytes += len; | |
1265 | ||
1266 | tun_flow_update(tun, rxhash, tfile); | |
1267 | return total_len; | |
1268 | } | |
1269 | ||
1270 | static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) | |
1271 | { | |
1272 | struct file *file = iocb->ki_filp; | |
1273 | struct tun_struct *tun = tun_get(file); | |
1274 | struct tun_file *tfile = file->private_data; | |
1275 | ssize_t result; | |
1276 | ||
1277 | if (!tun) | |
1278 | return -EBADFD; | |
1279 | ||
1280 | result = tun_get_user(tun, tfile, NULL, from, file->f_flags & O_NONBLOCK); | |
1281 | ||
1282 | tun_put(tun); | |
1283 | return result; | |
1284 | } | |
1285 | ||
1286 | /* Put packet to the user space buffer */ | |
1287 | static ssize_t tun_put_user(struct tun_struct *tun, | |
1288 | struct tun_file *tfile, | |
1289 | struct sk_buff *skb, | |
1290 | struct iov_iter *iter) | |
1291 | { | |
1292 | struct tun_pi pi = { 0, skb->protocol }; | |
1293 | ssize_t total; | |
1294 | int vlan_offset = 0; | |
1295 | int vlan_hlen = 0; | |
1296 | int vnet_hdr_sz = 0; | |
1297 | ||
1298 | if (skb_vlan_tag_present(skb)) | |
1299 | vlan_hlen = VLAN_HLEN; | |
1300 | ||
1301 | if (tun->flags & IFF_VNET_HDR) | |
1302 | vnet_hdr_sz = tun->vnet_hdr_sz; | |
1303 | ||
1304 | total = skb->len + vlan_hlen + vnet_hdr_sz; | |
1305 | ||
1306 | if (!(tun->flags & IFF_NO_PI)) { | |
1307 | if (iov_iter_count(iter) < sizeof(pi)) | |
1308 | return -EINVAL; | |
1309 | ||
1310 | total += sizeof(pi); | |
1311 | if (iov_iter_count(iter) < total) { | |
1312 | /* Packet will be striped */ | |
1313 | pi.flags |= TUN_PKT_STRIP; | |
1314 | } | |
1315 | ||
1316 | if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) | |
1317 | return -EFAULT; | |
1318 | } | |
1319 | ||
1320 | if (vnet_hdr_sz) { | |
1321 | struct virtio_net_hdr gso = { 0 }; /* no info leak */ | |
1322 | if (iov_iter_count(iter) < vnet_hdr_sz) | |
1323 | return -EINVAL; | |
1324 | ||
1325 | if (skb_is_gso(skb)) { | |
1326 | struct skb_shared_info *sinfo = skb_shinfo(skb); | |
1327 | ||
1328 | /* This is a hint as to how much should be linear. */ | |
1329 | gso.hdr_len = cpu_to_tun16(tun, skb_headlen(skb)); | |
1330 | gso.gso_size = cpu_to_tun16(tun, sinfo->gso_size); | |
1331 | if (sinfo->gso_type & SKB_GSO_TCPV4) | |
1332 | gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; | |
1333 | else if (sinfo->gso_type & SKB_GSO_TCPV6) | |
1334 | gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; | |
1335 | else if (sinfo->gso_type & SKB_GSO_UDP) | |
1336 | gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; | |
1337 | else { | |
1338 | pr_err("unexpected GSO type: " | |
1339 | "0x%x, gso_size %d, hdr_len %d\n", | |
1340 | sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), | |
1341 | tun16_to_cpu(tun, gso.hdr_len)); | |
1342 | print_hex_dump(KERN_ERR, "tun: ", | |
1343 | DUMP_PREFIX_NONE, | |
1344 | 16, 1, skb->head, | |
1345 | min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); | |
1346 | WARN_ON_ONCE(1); | |
1347 | return -EINVAL; | |
1348 | } | |
1349 | if (sinfo->gso_type & SKB_GSO_TCP_ECN) | |
1350 | gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN; | |
1351 | } else | |
1352 | gso.gso_type = VIRTIO_NET_HDR_GSO_NONE; | |
1353 | ||
1354 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
1355 | gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; | |
1356 | gso.csum_start = cpu_to_tun16(tun, skb_checksum_start_offset(skb) + | |
1357 | vlan_hlen); | |
1358 | gso.csum_offset = cpu_to_tun16(tun, skb->csum_offset); | |
1359 | } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { | |
1360 | gso.flags = VIRTIO_NET_HDR_F_DATA_VALID; | |
1361 | } /* else everything is zero */ | |
1362 | ||
1363 | if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) | |
1364 | return -EFAULT; | |
1365 | ||
1366 | iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); | |
1367 | } | |
1368 | ||
1369 | if (vlan_hlen) { | |
1370 | int ret; | |
1371 | struct { | |
1372 | __be16 h_vlan_proto; | |
1373 | __be16 h_vlan_TCI; | |
1374 | } veth; | |
1375 | ||
1376 | veth.h_vlan_proto = skb->vlan_proto; | |
1377 | veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); | |
1378 | ||
1379 | vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); | |
1380 | ||
1381 | ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); | |
1382 | if (ret || !iov_iter_count(iter)) | |
1383 | goto done; | |
1384 | ||
1385 | ret = copy_to_iter(&veth, sizeof(veth), iter); | |
1386 | if (ret != sizeof(veth) || !iov_iter_count(iter)) | |
1387 | goto done; | |
1388 | } | |
1389 | ||
1390 | skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); | |
1391 | ||
1392 | done: | |
1393 | tun->dev->stats.tx_packets++; | |
1394 | tun->dev->stats.tx_bytes += skb->len + vlan_hlen; | |
1395 | ||
1396 | return total; | |
1397 | } | |
1398 | ||
1399 | static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, | |
1400 | struct iov_iter *to, | |
1401 | int noblock) | |
1402 | { | |
1403 | struct sk_buff *skb; | |
1404 | ssize_t ret; | |
1405 | int peeked, err, off = 0; | |
1406 | ||
1407 | tun_debug(KERN_INFO, tun, "tun_do_read\n"); | |
1408 | ||
1409 | if (!iov_iter_count(to)) | |
1410 | return 0; | |
1411 | ||
1412 | if (tun->dev->reg_state != NETREG_REGISTERED) | |
1413 | return -EIO; | |
1414 | ||
1415 | /* Read frames from queue */ | |
1416 | skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0, | |
1417 | &peeked, &off, &err); | |
1418 | if (!skb) | |
1419 | return err; | |
1420 | ||
1421 | ret = tun_put_user(tun, tfile, skb, to); | |
1422 | if (unlikely(ret < 0)) | |
1423 | kfree_skb(skb); | |
1424 | else | |
1425 | consume_skb(skb); | |
1426 | ||
1427 | return ret; | |
1428 | } | |
1429 | ||
1430 | static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) | |
1431 | { | |
1432 | struct file *file = iocb->ki_filp; | |
1433 | struct tun_file *tfile = file->private_data; | |
1434 | struct tun_struct *tun = __tun_get(tfile); | |
1435 | ssize_t len = iov_iter_count(to), ret; | |
1436 | ||
1437 | if (!tun) | |
1438 | return -EBADFD; | |
1439 | ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK); | |
1440 | ret = min_t(ssize_t, ret, len); | |
1441 | if (ret > 0) | |
1442 | iocb->ki_pos = ret; | |
1443 | tun_put(tun); | |
1444 | return ret; | |
1445 | } | |
1446 | ||
1447 | static void tun_free_netdev(struct net_device *dev) | |
1448 | { | |
1449 | struct tun_struct *tun = netdev_priv(dev); | |
1450 | ||
1451 | BUG_ON(!(list_empty(&tun->disabled))); | |
1452 | tun_flow_uninit(tun); | |
1453 | security_tun_dev_free_security(tun->security); | |
1454 | free_netdev(dev); | |
1455 | } | |
1456 | ||
1457 | static void tun_setup(struct net_device *dev) | |
1458 | { | |
1459 | struct tun_struct *tun = netdev_priv(dev); | |
1460 | ||
1461 | tun->owner = INVALID_UID; | |
1462 | tun->group = INVALID_GID; | |
1463 | ||
1464 | dev->ethtool_ops = &tun_ethtool_ops; | |
1465 | dev->destructor = tun_free_netdev; | |
1466 | } | |
1467 | ||
1468 | /* Trivial set of netlink ops to allow deleting tun or tap | |
1469 | * device with netlink. | |
1470 | */ | |
1471 | static int tun_validate(struct nlattr *tb[], struct nlattr *data[]) | |
1472 | { | |
1473 | return -EINVAL; | |
1474 | } | |
1475 | ||
1476 | static struct rtnl_link_ops tun_link_ops __read_mostly = { | |
1477 | .kind = DRV_NAME, | |
1478 | .priv_size = sizeof(struct tun_struct), | |
1479 | .setup = tun_setup, | |
1480 | .validate = tun_validate, | |
1481 | }; | |
1482 | ||
1483 | static void tun_sock_write_space(struct sock *sk) | |
1484 | { | |
1485 | struct tun_file *tfile; | |
1486 | wait_queue_head_t *wqueue; | |
1487 | ||
1488 | if (!sock_writeable(sk)) | |
1489 | return; | |
1490 | ||
1491 | if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) | |
1492 | return; | |
1493 | ||
1494 | wqueue = sk_sleep(sk); | |
1495 | if (wqueue && waitqueue_active(wqueue)) | |
1496 | wake_up_interruptible_sync_poll(wqueue, POLLOUT | | |
1497 | POLLWRNORM | POLLWRBAND); | |
1498 | ||
1499 | tfile = container_of(sk, struct tun_file, sk); | |
1500 | kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); | |
1501 | } | |
1502 | ||
1503 | static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) | |
1504 | { | |
1505 | int ret; | |
1506 | struct tun_file *tfile = container_of(sock, struct tun_file, socket); | |
1507 | struct tun_struct *tun = __tun_get(tfile); | |
1508 | ||
1509 | if (!tun) | |
1510 | return -EBADFD; | |
1511 | ||
1512 | ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, | |
1513 | m->msg_flags & MSG_DONTWAIT); | |
1514 | tun_put(tun); | |
1515 | return ret; | |
1516 | } | |
1517 | ||
1518 | static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, | |
1519 | int flags) | |
1520 | { | |
1521 | struct tun_file *tfile = container_of(sock, struct tun_file, socket); | |
1522 | struct tun_struct *tun = __tun_get(tfile); | |
1523 | int ret; | |
1524 | ||
1525 | if (!tun) | |
1526 | return -EBADFD; | |
1527 | ||
1528 | if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { | |
1529 | ret = -EINVAL; | |
1530 | goto out; | |
1531 | } | |
1532 | if (flags & MSG_ERRQUEUE) { | |
1533 | ret = sock_recv_errqueue(sock->sk, m, total_len, | |
1534 | SOL_PACKET, TUN_TX_TIMESTAMP); | |
1535 | goto out; | |
1536 | } | |
1537 | ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT); | |
1538 | if (ret > (ssize_t)total_len) { | |
1539 | m->msg_flags |= MSG_TRUNC; | |
1540 | ret = flags & MSG_TRUNC ? ret : total_len; | |
1541 | } | |
1542 | out: | |
1543 | tun_put(tun); | |
1544 | return ret; | |
1545 | } | |
1546 | ||
1547 | /* Ops structure to mimic raw sockets with tun */ | |
1548 | static const struct proto_ops tun_socket_ops = { | |
1549 | .sendmsg = tun_sendmsg, | |
1550 | .recvmsg = tun_recvmsg, | |
1551 | }; | |
1552 | ||
1553 | static struct proto tun_proto = { | |
1554 | .name = "tun", | |
1555 | .owner = THIS_MODULE, | |
1556 | .obj_size = sizeof(struct tun_file), | |
1557 | }; | |
1558 | ||
1559 | static int tun_flags(struct tun_struct *tun) | |
1560 | { | |
1561 | return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); | |
1562 | } | |
1563 | ||
1564 | static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr, | |
1565 | char *buf) | |
1566 | { | |
1567 | struct tun_struct *tun = netdev_priv(to_net_dev(dev)); | |
1568 | return sprintf(buf, "0x%x\n", tun_flags(tun)); | |
1569 | } | |
1570 | ||
1571 | static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr, | |
1572 | char *buf) | |
1573 | { | |
1574 | struct tun_struct *tun = netdev_priv(to_net_dev(dev)); | |
1575 | return uid_valid(tun->owner)? | |
1576 | sprintf(buf, "%u\n", | |
1577 | from_kuid_munged(current_user_ns(), tun->owner)): | |
1578 | sprintf(buf, "-1\n"); | |
1579 | } | |
1580 | ||
1581 | static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, | |
1582 | char *buf) | |
1583 | { | |
1584 | struct tun_struct *tun = netdev_priv(to_net_dev(dev)); | |
1585 | return gid_valid(tun->group) ? | |
1586 | sprintf(buf, "%u\n", | |
1587 | from_kgid_munged(current_user_ns(), tun->group)): | |
1588 | sprintf(buf, "-1\n"); | |
1589 | } | |
1590 | ||
1591 | static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); | |
1592 | static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL); | |
1593 | static DEVICE_ATTR(group, 0444, tun_show_group, NULL); | |
1594 | ||
1595 | static struct attribute *tun_dev_attrs[] = { | |
1596 | &dev_attr_tun_flags.attr, | |
1597 | &dev_attr_owner.attr, | |
1598 | &dev_attr_group.attr, | |
1599 | NULL | |
1600 | }; | |
1601 | ||
1602 | static const struct attribute_group tun_attr_group = { | |
1603 | .attrs = tun_dev_attrs | |
1604 | }; | |
1605 | ||
1606 | static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |
1607 | { | |
1608 | struct tun_struct *tun; | |
1609 | struct tun_file *tfile = file->private_data; | |
1610 | struct net_device *dev; | |
1611 | int err; | |
1612 | ||
1613 | if (tfile->detached) | |
1614 | return -EINVAL; | |
1615 | ||
1616 | dev = __dev_get_by_name(net, ifr->ifr_name); | |
1617 | if (dev) { | |
1618 | if (ifr->ifr_flags & IFF_TUN_EXCL) | |
1619 | return -EBUSY; | |
1620 | if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) | |
1621 | tun = netdev_priv(dev); | |
1622 | else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) | |
1623 | tun = netdev_priv(dev); | |
1624 | else | |
1625 | return -EINVAL; | |
1626 | ||
1627 | if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != | |
1628 | !!(tun->flags & IFF_MULTI_QUEUE)) | |
1629 | return -EINVAL; | |
1630 | ||
1631 | if (tun_not_capable(tun)) | |
1632 | return -EPERM; | |
1633 | err = security_tun_dev_open(tun->security); | |
1634 | if (err < 0) | |
1635 | return err; | |
1636 | ||
1637 | err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER); | |
1638 | if (err < 0) | |
1639 | return err; | |
1640 | ||
1641 | if (tun->flags & IFF_MULTI_QUEUE && | |
1642 | (tun->numqueues + tun->numdisabled > 1)) { | |
1643 | /* One or more queue has already been attached, no need | |
1644 | * to initialize the device again. | |
1645 | */ | |
1646 | return 0; | |
1647 | } | |
1648 | } | |
1649 | else { | |
1650 | char *name; | |
1651 | unsigned long flags = 0; | |
1652 | int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? | |
1653 | MAX_TAP_QUEUES : 1; | |
1654 | ||
1655 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) | |
1656 | return -EPERM; | |
1657 | err = security_tun_dev_create(); | |
1658 | if (err < 0) | |
1659 | return err; | |
1660 | ||
1661 | /* Set dev type */ | |
1662 | if (ifr->ifr_flags & IFF_TUN) { | |
1663 | /* TUN device */ | |
1664 | flags |= IFF_TUN; | |
1665 | name = "tun%d"; | |
1666 | } else if (ifr->ifr_flags & IFF_TAP) { | |
1667 | /* TAP device */ | |
1668 | flags |= IFF_TAP; | |
1669 | name = "tap%d"; | |
1670 | } else | |
1671 | return -EINVAL; | |
1672 | ||
1673 | if (*ifr->ifr_name) | |
1674 | name = ifr->ifr_name; | |
1675 | ||
1676 | dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, | |
1677 | NET_NAME_UNKNOWN, tun_setup, queues, | |
1678 | queues); | |
1679 | ||
1680 | if (!dev) | |
1681 | return -ENOMEM; | |
1682 | ||
1683 | dev_net_set(dev, net); | |
1684 | dev->rtnl_link_ops = &tun_link_ops; | |
1685 | dev->ifindex = tfile->ifindex; | |
1686 | dev->sysfs_groups[0] = &tun_attr_group; | |
1687 | ||
1688 | tun = netdev_priv(dev); | |
1689 | tun->dev = dev; | |
1690 | tun->flags = flags; | |
1691 | tun->txflt.count = 0; | |
1692 | tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); | |
1693 | ||
1694 | tun->filter_attached = false; | |
1695 | tun->sndbuf = tfile->socket.sk->sk_sndbuf; | |
1696 | ||
1697 | spin_lock_init(&tun->lock); | |
1698 | ||
1699 | err = security_tun_dev_alloc_security(&tun->security); | |
1700 | if (err < 0) | |
1701 | goto err_free_dev; | |
1702 | ||
1703 | tun_net_init(dev); | |
1704 | tun_flow_init(tun); | |
1705 | ||
1706 | dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | | |
1707 | TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | | |
1708 | NETIF_F_HW_VLAN_STAG_TX; | |
1709 | dev->features = dev->hw_features; | |
1710 | dev->vlan_features = dev->features & | |
1711 | ~(NETIF_F_HW_VLAN_CTAG_TX | | |
1712 | NETIF_F_HW_VLAN_STAG_TX); | |
1713 | ||
1714 | INIT_LIST_HEAD(&tun->disabled); | |
1715 | err = tun_attach(tun, file, false); | |
1716 | if (err < 0) | |
1717 | goto err_free_flow; | |
1718 | ||
1719 | err = register_netdevice(tun->dev); | |
1720 | if (err < 0) | |
1721 | goto err_detach; | |
1722 | } | |
1723 | ||
1724 | netif_carrier_on(tun->dev); | |
1725 | ||
1726 | tun_debug(KERN_INFO, tun, "tun_set_iff\n"); | |
1727 | ||
1728 | tun->flags = (tun->flags & ~TUN_FEATURES) | | |
1729 | (ifr->ifr_flags & TUN_FEATURES); | |
1730 | ||
1731 | /* Make sure persistent devices do not get stuck in | |
1732 | * xoff state. | |
1733 | */ | |
1734 | if (netif_running(tun->dev)) | |
1735 | netif_tx_wake_all_queues(tun->dev); | |
1736 | ||
1737 | strcpy(ifr->ifr_name, tun->dev->name); | |
1738 | return 0; | |
1739 | ||
1740 | err_detach: | |
1741 | tun_detach_all(dev); | |
1742 | err_free_flow: | |
1743 | tun_flow_uninit(tun); | |
1744 | security_tun_dev_free_security(tun->security); | |
1745 | err_free_dev: | |
1746 | free_netdev(dev); | |
1747 | return err; | |
1748 | } | |
1749 | ||
1750 | static void tun_get_iff(struct net *net, struct tun_struct *tun, | |
1751 | struct ifreq *ifr) | |
1752 | { | |
1753 | tun_debug(KERN_INFO, tun, "tun_get_iff\n"); | |
1754 | ||
1755 | strcpy(ifr->ifr_name, tun->dev->name); | |
1756 | ||
1757 | ifr->ifr_flags = tun_flags(tun); | |
1758 | ||
1759 | } | |
1760 | ||
1761 | /* This is like a cut-down ethtool ops, except done via tun fd so no | |
1762 | * privs required. */ | |
1763 | static int set_offload(struct tun_struct *tun, unsigned long arg) | |
1764 | { | |
1765 | netdev_features_t features = 0; | |
1766 | ||
1767 | if (arg & TUN_F_CSUM) { | |
1768 | features |= NETIF_F_HW_CSUM; | |
1769 | arg &= ~TUN_F_CSUM; | |
1770 | ||
1771 | if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { | |
1772 | if (arg & TUN_F_TSO_ECN) { | |
1773 | features |= NETIF_F_TSO_ECN; | |
1774 | arg &= ~TUN_F_TSO_ECN; | |
1775 | } | |
1776 | if (arg & TUN_F_TSO4) | |
1777 | features |= NETIF_F_TSO; | |
1778 | if (arg & TUN_F_TSO6) | |
1779 | features |= NETIF_F_TSO6; | |
1780 | arg &= ~(TUN_F_TSO4|TUN_F_TSO6); | |
1781 | } | |
1782 | ||
1783 | if (arg & TUN_F_UFO) { | |
1784 | features |= NETIF_F_UFO; | |
1785 | arg &= ~TUN_F_UFO; | |
1786 | } | |
1787 | } | |
1788 | ||
1789 | /* This gives the user a way to test for new features in future by | |
1790 | * trying to set them. */ | |
1791 | if (arg) | |
1792 | return -EINVAL; | |
1793 | ||
1794 | tun->set_features = features; | |
1795 | netdev_update_features(tun->dev); | |
1796 | ||
1797 | return 0; | |
1798 | } | |
1799 | ||
1800 | static void tun_detach_filter(struct tun_struct *tun, int n) | |
1801 | { | |
1802 | int i; | |
1803 | struct tun_file *tfile; | |
1804 | ||
1805 | for (i = 0; i < n; i++) { | |
1806 | tfile = rtnl_dereference(tun->tfiles[i]); | |
1807 | sk_detach_filter(tfile->socket.sk); | |
1808 | } | |
1809 | ||
1810 | tun->filter_attached = false; | |
1811 | } | |
1812 | ||
1813 | static int tun_attach_filter(struct tun_struct *tun) | |
1814 | { | |
1815 | int i, ret = 0; | |
1816 | struct tun_file *tfile; | |
1817 | ||
1818 | for (i = 0; i < tun->numqueues; i++) { | |
1819 | tfile = rtnl_dereference(tun->tfiles[i]); | |
1820 | ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); | |
1821 | if (ret) { | |
1822 | tun_detach_filter(tun, i); | |
1823 | return ret; | |
1824 | } | |
1825 | } | |
1826 | ||
1827 | tun->filter_attached = true; | |
1828 | return ret; | |
1829 | } | |
1830 | ||
1831 | static void tun_set_sndbuf(struct tun_struct *tun) | |
1832 | { | |
1833 | struct tun_file *tfile; | |
1834 | int i; | |
1835 | ||
1836 | for (i = 0; i < tun->numqueues; i++) { | |
1837 | tfile = rtnl_dereference(tun->tfiles[i]); | |
1838 | tfile->socket.sk->sk_sndbuf = tun->sndbuf; | |
1839 | } | |
1840 | } | |
1841 | ||
1842 | static int tun_set_queue(struct file *file, struct ifreq *ifr) | |
1843 | { | |
1844 | struct tun_file *tfile = file->private_data; | |
1845 | struct tun_struct *tun; | |
1846 | int ret = 0; | |
1847 | ||
1848 | rtnl_lock(); | |
1849 | ||
1850 | if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { | |
1851 | tun = tfile->detached; | |
1852 | if (!tun) { | |
1853 | ret = -EINVAL; | |
1854 | goto unlock; | |
1855 | } | |
1856 | ret = security_tun_dev_attach_queue(tun->security); | |
1857 | if (ret < 0) | |
1858 | goto unlock; | |
1859 | ret = tun_attach(tun, file, false); | |
1860 | } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { | |
1861 | tun = rtnl_dereference(tfile->tun); | |
1862 | if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) | |
1863 | ret = -EINVAL; | |
1864 | else | |
1865 | __tun_detach(tfile, false); | |
1866 | } else | |
1867 | ret = -EINVAL; | |
1868 | ||
1869 | unlock: | |
1870 | rtnl_unlock(); | |
1871 | return ret; | |
1872 | } | |
1873 | ||
1874 | static long __tun_chr_ioctl(struct file *file, unsigned int cmd, | |
1875 | unsigned long arg, int ifreq_len) | |
1876 | { | |
1877 | struct tun_file *tfile = file->private_data; | |
1878 | struct tun_struct *tun; | |
1879 | void __user* argp = (void __user*)arg; | |
1880 | struct ifreq ifr; | |
1881 | kuid_t owner; | |
1882 | kgid_t group; | |
1883 | int sndbuf; | |
1884 | int vnet_hdr_sz; | |
1885 | unsigned int ifindex; | |
1886 | int le; | |
1887 | int ret; | |
1888 | ||
1889 | if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) { | |
1890 | if (copy_from_user(&ifr, argp, ifreq_len)) | |
1891 | return -EFAULT; | |
1892 | } else { | |
1893 | memset(&ifr, 0, sizeof(ifr)); | |
1894 | } | |
1895 | if (cmd == TUNGETFEATURES) { | |
1896 | /* Currently this just means: "what IFF flags are valid?". | |
1897 | * This is needed because we never checked for invalid flags on | |
1898 | * TUNSETIFF. | |
1899 | */ | |
1900 | return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES, | |
1901 | (unsigned int __user*)argp); | |
1902 | } else if (cmd == TUNSETQUEUE) | |
1903 | return tun_set_queue(file, &ifr); | |
1904 | ||
1905 | ret = 0; | |
1906 | rtnl_lock(); | |
1907 | ||
1908 | tun = __tun_get(tfile); | |
1909 | if (cmd == TUNSETIFF && !tun) { | |
1910 | ifr.ifr_name[IFNAMSIZ-1] = '\0'; | |
1911 | ||
1912 | ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr); | |
1913 | ||
1914 | if (ret) | |
1915 | goto unlock; | |
1916 | ||
1917 | if (copy_to_user(argp, &ifr, ifreq_len)) | |
1918 | ret = -EFAULT; | |
1919 | goto unlock; | |
1920 | } | |
1921 | if (cmd == TUNSETIFINDEX) { | |
1922 | ret = -EPERM; | |
1923 | if (tun) | |
1924 | goto unlock; | |
1925 | ||
1926 | ret = -EFAULT; | |
1927 | if (copy_from_user(&ifindex, argp, sizeof(ifindex))) | |
1928 | goto unlock; | |
1929 | ||
1930 | ret = 0; | |
1931 | tfile->ifindex = ifindex; | |
1932 | goto unlock; | |
1933 | } | |
1934 | ||
1935 | ret = -EBADFD; | |
1936 | if (!tun) | |
1937 | goto unlock; | |
1938 | ||
1939 | tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd); | |
1940 | ||
1941 | ret = 0; | |
1942 | switch (cmd) { | |
1943 | case TUNGETIFF: | |
1944 | tun_get_iff(current->nsproxy->net_ns, tun, &ifr); | |
1945 | ||
1946 | if (tfile->detached) | |
1947 | ifr.ifr_flags |= IFF_DETACH_QUEUE; | |
1948 | if (!tfile->socket.sk->sk_filter) | |
1949 | ifr.ifr_flags |= IFF_NOFILTER; | |
1950 | ||
1951 | if (copy_to_user(argp, &ifr, ifreq_len)) | |
1952 | ret = -EFAULT; | |
1953 | break; | |
1954 | ||
1955 | case TUNSETNOCSUM: | |
1956 | /* Disable/Enable checksum */ | |
1957 | ||
1958 | /* [unimplemented] */ | |
1959 | tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n", | |
1960 | arg ? "disabled" : "enabled"); | |
1961 | break; | |
1962 | ||
1963 | case TUNSETPERSIST: | |
1964 | /* Disable/Enable persist mode. Keep an extra reference to the | |
1965 | * module to prevent the module being unprobed. | |
1966 | */ | |
1967 | if (arg && !(tun->flags & IFF_PERSIST)) { | |
1968 | tun->flags |= IFF_PERSIST; | |
1969 | __module_get(THIS_MODULE); | |
1970 | } | |
1971 | if (!arg && (tun->flags & IFF_PERSIST)) { | |
1972 | tun->flags &= ~IFF_PERSIST; | |
1973 | module_put(THIS_MODULE); | |
1974 | } | |
1975 | ||
1976 | tun_debug(KERN_INFO, tun, "persist %s\n", | |
1977 | arg ? "enabled" : "disabled"); | |
1978 | break; | |
1979 | ||
1980 | case TUNSETOWNER: | |
1981 | /* Set owner of the device */ | |
1982 | owner = make_kuid(current_user_ns(), arg); | |
1983 | if (!uid_valid(owner)) { | |
1984 | ret = -EINVAL; | |
1985 | break; | |
1986 | } | |
1987 | tun->owner = owner; | |
1988 | tun_debug(KERN_INFO, tun, "owner set to %u\n", | |
1989 | from_kuid(&init_user_ns, tun->owner)); | |
1990 | break; | |
1991 | ||
1992 | case TUNSETGROUP: | |
1993 | /* Set group of the device */ | |
1994 | group = make_kgid(current_user_ns(), arg); | |
1995 | if (!gid_valid(group)) { | |
1996 | ret = -EINVAL; | |
1997 | break; | |
1998 | } | |
1999 | tun->group = group; | |
2000 | tun_debug(KERN_INFO, tun, "group set to %u\n", | |
2001 | from_kgid(&init_user_ns, tun->group)); | |
2002 | break; | |
2003 | ||
2004 | case TUNSETLINK: | |
2005 | /* Only allow setting the type when the interface is down */ | |
2006 | if (tun->dev->flags & IFF_UP) { | |
2007 | tun_debug(KERN_INFO, tun, | |
2008 | "Linktype set failed because interface is up\n"); | |
2009 | ret = -EBUSY; | |
2010 | } else { | |
2011 | tun->dev->type = (int) arg; | |
2012 | tun_debug(KERN_INFO, tun, "linktype set to %d\n", | |
2013 | tun->dev->type); | |
2014 | ret = 0; | |
2015 | } | |
2016 | break; | |
2017 | ||
2018 | #ifdef TUN_DEBUG | |
2019 | case TUNSETDEBUG: | |
2020 | tun->debug = arg; | |
2021 | break; | |
2022 | #endif | |
2023 | case TUNSETOFFLOAD: | |
2024 | ret = set_offload(tun, arg); | |
2025 | break; | |
2026 | ||
2027 | case TUNSETTXFILTER: | |
2028 | /* Can be set only for TAPs */ | |
2029 | ret = -EINVAL; | |
2030 | if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) | |
2031 | break; | |
2032 | ret = update_filter(&tun->txflt, (void __user *)arg); | |
2033 | break; | |
2034 | ||
2035 | case SIOCGIFHWADDR: | |
2036 | /* Get hw address */ | |
2037 | memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); | |
2038 | ifr.ifr_hwaddr.sa_family = tun->dev->type; | |
2039 | if (copy_to_user(argp, &ifr, ifreq_len)) | |
2040 | ret = -EFAULT; | |
2041 | break; | |
2042 | ||
2043 | case SIOCSIFHWADDR: | |
2044 | /* Set hw address */ | |
2045 | tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n", | |
2046 | ifr.ifr_hwaddr.sa_data); | |
2047 | ||
2048 | ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); | |
2049 | break; | |
2050 | ||
2051 | case TUNGETSNDBUF: | |
2052 | sndbuf = tfile->socket.sk->sk_sndbuf; | |
2053 | if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) | |
2054 | ret = -EFAULT; | |
2055 | break; | |
2056 | ||
2057 | case TUNSETSNDBUF: | |
2058 | if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { | |
2059 | ret = -EFAULT; | |
2060 | break; | |
2061 | } | |
2062 | ||
2063 | tun->sndbuf = sndbuf; | |
2064 | tun_set_sndbuf(tun); | |
2065 | break; | |
2066 | ||
2067 | case TUNGETVNETHDRSZ: | |
2068 | vnet_hdr_sz = tun->vnet_hdr_sz; | |
2069 | if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) | |
2070 | ret = -EFAULT; | |
2071 | break; | |
2072 | ||
2073 | case TUNSETVNETHDRSZ: | |
2074 | if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { | |
2075 | ret = -EFAULT; | |
2076 | break; | |
2077 | } | |
2078 | if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { | |
2079 | ret = -EINVAL; | |
2080 | break; | |
2081 | } | |
2082 | ||
2083 | tun->vnet_hdr_sz = vnet_hdr_sz; | |
2084 | break; | |
2085 | ||
2086 | case TUNGETVNETLE: | |
2087 | le = !!(tun->flags & TUN_VNET_LE); | |
2088 | if (put_user(le, (int __user *)argp)) | |
2089 | ret = -EFAULT; | |
2090 | break; | |
2091 | ||
2092 | case TUNSETVNETLE: | |
2093 | if (get_user(le, (int __user *)argp)) { | |
2094 | ret = -EFAULT; | |
2095 | break; | |
2096 | } | |
2097 | if (le) | |
2098 | tun->flags |= TUN_VNET_LE; | |
2099 | else | |
2100 | tun->flags &= ~TUN_VNET_LE; | |
2101 | break; | |
2102 | ||
2103 | case TUNGETVNETBE: | |
2104 | ret = tun_get_vnet_be(tun, argp); | |
2105 | break; | |
2106 | ||
2107 | case TUNSETVNETBE: | |
2108 | ret = tun_set_vnet_be(tun, argp); | |
2109 | break; | |
2110 | ||
2111 | case TUNATTACHFILTER: | |
2112 | /* Can be set only for TAPs */ | |
2113 | ret = -EINVAL; | |
2114 | if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) | |
2115 | break; | |
2116 | ret = -EFAULT; | |
2117 | if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) | |
2118 | break; | |
2119 | ||
2120 | ret = tun_attach_filter(tun); | |
2121 | break; | |
2122 | ||
2123 | case TUNDETACHFILTER: | |
2124 | /* Can be set only for TAPs */ | |
2125 | ret = -EINVAL; | |
2126 | if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) | |
2127 | break; | |
2128 | ret = 0; | |
2129 | tun_detach_filter(tun, tun->numqueues); | |
2130 | break; | |
2131 | ||
2132 | case TUNGETFILTER: | |
2133 | ret = -EINVAL; | |
2134 | if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) | |
2135 | break; | |
2136 | ret = -EFAULT; | |
2137 | if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) | |
2138 | break; | |
2139 | ret = 0; | |
2140 | break; | |
2141 | ||
2142 | default: | |
2143 | ret = -EINVAL; | |
2144 | break; | |
2145 | } | |
2146 | ||
2147 | unlock: | |
2148 | rtnl_unlock(); | |
2149 | if (tun) | |
2150 | tun_put(tun); | |
2151 | return ret; | |
2152 | } | |
2153 | ||
2154 | static long tun_chr_ioctl(struct file *file, | |
2155 | unsigned int cmd, unsigned long arg) | |
2156 | { | |
2157 | return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); | |
2158 | } | |
2159 | ||
2160 | #ifdef CONFIG_COMPAT | |
2161 | static long tun_chr_compat_ioctl(struct file *file, | |
2162 | unsigned int cmd, unsigned long arg) | |
2163 | { | |
2164 | switch (cmd) { | |
2165 | case TUNSETIFF: | |
2166 | case TUNGETIFF: | |
2167 | case TUNSETTXFILTER: | |
2168 | case TUNGETSNDBUF: | |
2169 | case TUNSETSNDBUF: | |
2170 | case SIOCGIFHWADDR: | |
2171 | case SIOCSIFHWADDR: | |
2172 | arg = (unsigned long)compat_ptr(arg); | |
2173 | break; | |
2174 | default: | |
2175 | arg = (compat_ulong_t)arg; | |
2176 | break; | |
2177 | } | |
2178 | ||
2179 | /* | |
2180 | * compat_ifreq is shorter than ifreq, so we must not access beyond | |
2181 | * the end of that structure. All fields that are used in this | |
2182 | * driver are compatible though, we don't need to convert the | |
2183 | * contents. | |
2184 | */ | |
2185 | return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); | |
2186 | } | |
2187 | #endif /* CONFIG_COMPAT */ | |
2188 | ||
2189 | static int tun_chr_fasync(int fd, struct file *file, int on) | |
2190 | { | |
2191 | struct tun_file *tfile = file->private_data; | |
2192 | int ret; | |
2193 | ||
2194 | if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) | |
2195 | goto out; | |
2196 | ||
2197 | if (on) { | |
2198 | __f_setown(file, task_pid(current), PIDTYPE_PID, 0); | |
2199 | tfile->flags |= TUN_FASYNC; | |
2200 | } else | |
2201 | tfile->flags &= ~TUN_FASYNC; | |
2202 | ret = 0; | |
2203 | out: | |
2204 | return ret; | |
2205 | } | |
2206 | ||
2207 | static int tun_chr_open(struct inode *inode, struct file * file) | |
2208 | { | |
2209 | struct net *net = current->nsproxy->net_ns; | |
2210 | struct tun_file *tfile; | |
2211 | ||
2212 | DBG1(KERN_INFO, "tunX: tun_chr_open\n"); | |
2213 | ||
2214 | tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, | |
2215 | &tun_proto, 0); | |
2216 | if (!tfile) | |
2217 | return -ENOMEM; | |
2218 | RCU_INIT_POINTER(tfile->tun, NULL); | |
2219 | tfile->flags = 0; | |
2220 | tfile->ifindex = 0; | |
2221 | ||
2222 | init_waitqueue_head(&tfile->wq.wait); | |
2223 | RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq); | |
2224 | ||
2225 | tfile->socket.file = file; | |
2226 | tfile->socket.ops = &tun_socket_ops; | |
2227 | ||
2228 | sock_init_data(&tfile->socket, &tfile->sk); | |
2229 | ||
2230 | tfile->sk.sk_write_space = tun_sock_write_space; | |
2231 | tfile->sk.sk_sndbuf = INT_MAX; | |
2232 | ||
2233 | file->private_data = tfile; | |
2234 | INIT_LIST_HEAD(&tfile->next); | |
2235 | ||
2236 | sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); | |
2237 | ||
2238 | return 0; | |
2239 | } | |
2240 | ||
2241 | static int tun_chr_close(struct inode *inode, struct file *file) | |
2242 | { | |
2243 | struct tun_file *tfile = file->private_data; | |
2244 | ||
2245 | tun_detach(tfile, true); | |
2246 | ||
2247 | return 0; | |
2248 | } | |
2249 | ||
2250 | #ifdef CONFIG_PROC_FS | |
2251 | static void tun_chr_show_fdinfo(struct seq_file *m, struct file *f) | |
2252 | { | |
2253 | struct tun_struct *tun; | |
2254 | struct ifreq ifr; | |
2255 | ||
2256 | memset(&ifr, 0, sizeof(ifr)); | |
2257 | ||
2258 | rtnl_lock(); | |
2259 | tun = tun_get(f); | |
2260 | if (tun) | |
2261 | tun_get_iff(current->nsproxy->net_ns, tun, &ifr); | |
2262 | rtnl_unlock(); | |
2263 | ||
2264 | if (tun) | |
2265 | tun_put(tun); | |
2266 | ||
2267 | seq_printf(m, "iff:\t%s\n", ifr.ifr_name); | |
2268 | } | |
2269 | #endif | |
2270 | ||
2271 | static const struct file_operations tun_fops = { | |
2272 | .owner = THIS_MODULE, | |
2273 | .llseek = no_llseek, | |
2274 | .read_iter = tun_chr_read_iter, | |
2275 | .write_iter = tun_chr_write_iter, | |
2276 | .poll = tun_chr_poll, | |
2277 | .unlocked_ioctl = tun_chr_ioctl, | |
2278 | #ifdef CONFIG_COMPAT | |
2279 | .compat_ioctl = tun_chr_compat_ioctl, | |
2280 | #endif | |
2281 | .open = tun_chr_open, | |
2282 | .release = tun_chr_close, | |
2283 | .fasync = tun_chr_fasync, | |
2284 | #ifdef CONFIG_PROC_FS | |
2285 | .show_fdinfo = tun_chr_show_fdinfo, | |
2286 | #endif | |
2287 | }; | |
2288 | ||
2289 | static struct miscdevice tun_miscdev = { | |
2290 | .minor = TUN_MINOR, | |
2291 | .name = "tun", | |
2292 | .nodename = "net/tun", | |
2293 | .fops = &tun_fops, | |
2294 | }; | |
2295 | ||
2296 | /* ethtool interface */ | |
2297 | ||
2298 | static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
2299 | { | |
2300 | cmd->supported = 0; | |
2301 | cmd->advertising = 0; | |
2302 | ethtool_cmd_speed_set(cmd, SPEED_10); | |
2303 | cmd->duplex = DUPLEX_FULL; | |
2304 | cmd->port = PORT_TP; | |
2305 | cmd->phy_address = 0; | |
2306 | cmd->transceiver = XCVR_INTERNAL; | |
2307 | cmd->autoneg = AUTONEG_DISABLE; | |
2308 | cmd->maxtxpkt = 0; | |
2309 | cmd->maxrxpkt = 0; | |
2310 | return 0; | |
2311 | } | |
2312 | ||
2313 | static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | |
2314 | { | |
2315 | struct tun_struct *tun = netdev_priv(dev); | |
2316 | ||
2317 | strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); | |
2318 | strlcpy(info->version, DRV_VERSION, sizeof(info->version)); | |
2319 | ||
2320 | switch (tun->flags & TUN_TYPE_MASK) { | |
2321 | case IFF_TUN: | |
2322 | strlcpy(info->bus_info, "tun", sizeof(info->bus_info)); | |
2323 | break; | |
2324 | case IFF_TAP: | |
2325 | strlcpy(info->bus_info, "tap", sizeof(info->bus_info)); | |
2326 | break; | |
2327 | } | |
2328 | } | |
2329 | ||
2330 | static u32 tun_get_msglevel(struct net_device *dev) | |
2331 | { | |
2332 | #ifdef TUN_DEBUG | |
2333 | struct tun_struct *tun = netdev_priv(dev); | |
2334 | return tun->debug; | |
2335 | #else | |
2336 | return -EOPNOTSUPP; | |
2337 | #endif | |
2338 | } | |
2339 | ||
2340 | static void tun_set_msglevel(struct net_device *dev, u32 value) | |
2341 | { | |
2342 | #ifdef TUN_DEBUG | |
2343 | struct tun_struct *tun = netdev_priv(dev); | |
2344 | tun->debug = value; | |
2345 | #endif | |
2346 | } | |
2347 | ||
2348 | static const struct ethtool_ops tun_ethtool_ops = { | |
2349 | .get_settings = tun_get_settings, | |
2350 | .get_drvinfo = tun_get_drvinfo, | |
2351 | .get_msglevel = tun_get_msglevel, | |
2352 | .set_msglevel = tun_set_msglevel, | |
2353 | .get_link = ethtool_op_get_link, | |
2354 | .get_ts_info = ethtool_op_get_ts_info, | |
2355 | }; | |
2356 | ||
2357 | ||
2358 | static int __init tun_init(void) | |
2359 | { | |
2360 | int ret = 0; | |
2361 | ||
2362 | pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); | |
2363 | pr_info("%s\n", DRV_COPYRIGHT); | |
2364 | ||
2365 | ret = rtnl_link_register(&tun_link_ops); | |
2366 | if (ret) { | |
2367 | pr_err("Can't register link_ops\n"); | |
2368 | goto err_linkops; | |
2369 | } | |
2370 | ||
2371 | ret = misc_register(&tun_miscdev); | |
2372 | if (ret) { | |
2373 | pr_err("Can't register misc device %d\n", TUN_MINOR); | |
2374 | goto err_misc; | |
2375 | } | |
2376 | return 0; | |
2377 | err_misc: | |
2378 | rtnl_link_unregister(&tun_link_ops); | |
2379 | err_linkops: | |
2380 | return ret; | |
2381 | } | |
2382 | ||
2383 | static void tun_cleanup(void) | |
2384 | { | |
2385 | misc_deregister(&tun_miscdev); | |
2386 | rtnl_link_unregister(&tun_link_ops); | |
2387 | } | |
2388 | ||
2389 | /* Get an underlying socket object from tun file. Returns error unless file is | |
2390 | * attached to a device. The returned object works like a packet socket, it | |
2391 | * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for | |
2392 | * holding a reference to the file for as long as the socket is in use. */ | |
2393 | struct socket *tun_get_socket(struct file *file) | |
2394 | { | |
2395 | struct tun_file *tfile; | |
2396 | if (file->f_op != &tun_fops) | |
2397 | return ERR_PTR(-EINVAL); | |
2398 | tfile = file->private_data; | |
2399 | if (!tfile) | |
2400 | return ERR_PTR(-EBADFD); | |
2401 | return &tfile->socket; | |
2402 | } | |
2403 | EXPORT_SYMBOL_GPL(tun_get_socket); | |
2404 | ||
2405 | module_init(tun_init); | |
2406 | module_exit(tun_cleanup); | |
2407 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | |
2408 | MODULE_AUTHOR(DRV_COPYRIGHT); | |
2409 | MODULE_LICENSE("GPL"); | |
2410 | MODULE_ALIAS_MISCDEV(TUN_MINOR); | |
2411 | MODULE_ALIAS("devname:net/tun"); |