]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/tap.c
tap: Abstract type of virtual interface from tap implementation
[mirror_ubuntu-jammy-kernel.git] / drivers / net / tap.c
CommitLineData
20d29d7a 1#include <linux/etherdevice.h>
6fe3faf8 2#include <linux/if_tap.h>
f09e2249 3#include <linux/if_vlan.h>
20d29d7a
AB
4#include <linux/interrupt.h>
5#include <linux/nsproxy.h>
6#include <linux/compat.h>
7#include <linux/if_tun.h>
8#include <linux/module.h>
9#include <linux/skbuff.h>
10#include <linux/cache.h>
11#include <linux/sched.h>
12#include <linux/types.h>
5a0e3ad6 13#include <linux/slab.h>
20d29d7a
AB
14#include <linux/wait.h>
15#include <linux/cdev.h>
40401530 16#include <linux/idr.h>
20d29d7a 17#include <linux/fs.h>
6c36d2e2 18#include <linux/uio.h>
20d29d7a
AB
19
20#include <net/net_namespace.h>
21#include <net/rtnetlink.h>
22#include <net/sock.h>
b9fb9ee0 23#include <linux/virtio_net.h>
362899b8 24#include <linux/skb_array.h>
20d29d7a 25
635b8c8e 26#define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
01b07fb3 27
635b8c8e
SG
28#define TAP_VNET_LE 0x80000000
29#define TAP_VNET_BE 0x40000000
8b8e658b
GK
30
31#ifdef CONFIG_TUN_VNET_CROSS_LE
635b8c8e 32static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
8b8e658b 33{
635b8c8e 34 return q->flags & TAP_VNET_BE ? false :
8b8e658b
GK
35 virtio_legacy_is_little_endian();
36}
37
635b8c8e 38static long tap_get_vnet_be(struct tap_queue *q, int __user *sp)
8b8e658b 39{
635b8c8e 40 int s = !!(q->flags & TAP_VNET_BE);
8b8e658b
GK
41
42 if (put_user(s, sp))
43 return -EFAULT;
44
45 return 0;
46}
47
635b8c8e 48static long tap_set_vnet_be(struct tap_queue *q, int __user *sp)
8b8e658b
GK
49{
50 int s;
51
52 if (get_user(s, sp))
53 return -EFAULT;
54
55 if (s)
635b8c8e 56 q->flags |= TAP_VNET_BE;
8b8e658b 57 else
635b8c8e 58 q->flags &= ~TAP_VNET_BE;
8b8e658b
GK
59
60 return 0;
61}
62#else
635b8c8e 63static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
8b8e658b
GK
64{
65 return virtio_legacy_is_little_endian();
66}
67
635b8c8e 68static long tap_get_vnet_be(struct tap_queue *q, int __user *argp)
8b8e658b
GK
69{
70 return -EINVAL;
71}
72
635b8c8e 73static long tap_set_vnet_be(struct tap_queue *q, int __user *argp)
8b8e658b
GK
74{
75 return -EINVAL;
76}
77#endif /* CONFIG_TUN_VNET_CROSS_LE */
6ae7feb3 78
635b8c8e 79static inline bool tap_is_little_endian(struct tap_queue *q)
5b11e15f 80{
635b8c8e
SG
81 return q->flags & TAP_VNET_LE ||
82 tap_legacy_is_little_endian(q);
5b11e15f 83}
6ae7feb3 84
635b8c8e 85static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val)
6ae7feb3 86{
635b8c8e 87 return __virtio16_to_cpu(tap_is_little_endian(q), val);
6ae7feb3
MT
88}
89
635b8c8e 90static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val)
6ae7feb3 91{
635b8c8e 92 return __cpu_to_virtio16(tap_is_little_endian(q), val);
6ae7feb3
MT
93}
94
635b8c8e
SG
95static struct proto tap_proto = {
96 .name = "tap",
20d29d7a 97 .owner = THIS_MODULE,
635b8c8e 98 .obj_size = sizeof(struct tap_queue),
20d29d7a
AB
99};
100
635b8c8e 101#define TAP_NUM_DEVS (1U << MINORBITS)
ebc05ba7
SG
102struct major_info {
103 dev_t major;
104 struct idr minor_idr;
105 struct mutex minor_lock;
106 const char *device_name;
107} macvtap_major;
e09eff7f 108
97bc3633 109#define GOODCOPY_LEN 128
20d29d7a 110
635b8c8e 111static const struct proto_ops tap_socket_ops;
501c774c 112
2be5c767 113#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
f23d538b 114#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
a567dd62 115
6fe3faf8 116static struct tap_dev *tap_dev_get_rcu(const struct net_device *dev)
6acf54f1
VY
117{
118 return rcu_dereference(dev->rx_handler_data);
119}
120
20d29d7a
AB
121/*
122 * RCU usage:
635b8c8e 123 * The tap_queue and the macvlan_dev are loosely coupled, the
02df55d2 124 * pointers from one to the other can only be read while rcu_read_lock
441ac0fc 125 * or rtnl is held.
20d29d7a 126 *
635b8c8e 127 * Both the file and the macvlan_dev hold a reference on the tap_queue
02df55d2
AB
128 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
129 * q->vlan becomes inaccessible. When the files gets closed,
635b8c8e 130 * tap_get_queue() fails.
20d29d7a 131 *
02df55d2
AB
132 * There may still be references to the struct sock inside of the
133 * queue from outbound SKBs, but these never reference back to the
134 * file or the dev. The data structure is freed through __sk_free
135 * when both our references and any pending SKBs are gone.
20d29d7a 136 */
20d29d7a 137
6fe3faf8 138static int tap_enable_queue(struct tap_dev *tap, struct file *file,
635b8c8e 139 struct tap_queue *q)
815f236d 140{
815f236d
JW
141 int err = -EINVAL;
142
441ac0fc 143 ASSERT_RTNL();
815f236d
JW
144
145 if (q->enabled)
146 goto out;
147
148 err = 0;
6fe3faf8
SG
149 rcu_assign_pointer(tap->taps[tap->numvtaps], q);
150 q->queue_index = tap->numvtaps;
815f236d
JW
151 q->enabled = true;
152
6fe3faf8 153 tap->numvtaps++;
815f236d 154out:
815f236d
JW
155 return err;
156}
157
40b8fe45 158/* Requires RTNL */
6fe3faf8 159static int tap_set_queue(struct tap_dev *tap, struct file *file,
635b8c8e 160 struct tap_queue *q)
20d29d7a 161{
6fe3faf8 162 if (tap->numqueues == MAX_TAP_QUEUES)
40b8fe45 163 return -EBUSY;
20d29d7a 164
6fe3faf8
SG
165 rcu_assign_pointer(q->tap, tap);
166 rcu_assign_pointer(tap->taps[tap->numvtaps], q);
02df55d2 167 sock_hold(&q->sk);
20d29d7a
AB
168
169 q->file = file;
6fe3faf8 170 q->queue_index = tap->numvtaps;
815f236d 171 q->enabled = true;
02df55d2 172 file->private_data = q;
6fe3faf8 173 list_add_tail(&q->next, &tap->queue_list);
20d29d7a 174
6fe3faf8
SG
175 tap->numvtaps++;
176 tap->numqueues++;
1565c7c1 177
40b8fe45 178 return 0;
20d29d7a
AB
179}
180
635b8c8e 181static int tap_disable_queue(struct tap_queue *q)
815f236d 182{
6fe3faf8 183 struct tap_dev *tap;
635b8c8e 184 struct tap_queue *nq;
815f236d 185
441ac0fc 186 ASSERT_RTNL();
815f236d
JW
187 if (!q->enabled)
188 return -EINVAL;
189
6fe3faf8 190 tap = rtnl_dereference(q->tap);
441ac0fc 191
6fe3faf8 192 if (tap) {
815f236d 193 int index = q->queue_index;
6fe3faf8
SG
194 BUG_ON(index >= tap->numvtaps);
195 nq = rtnl_dereference(tap->taps[tap->numvtaps - 1]);
815f236d
JW
196 nq->queue_index = index;
197
6fe3faf8
SG
198 rcu_assign_pointer(tap->taps[index], nq);
199 RCU_INIT_POINTER(tap->taps[tap->numvtaps - 1], NULL);
815f236d
JW
200 q->enabled = false;
201
6fe3faf8 202 tap->numvtaps--;
815f236d
JW
203 }
204
205 return 0;
206}
207
20d29d7a 208/*
02df55d2
AB
209 * The file owning the queue got closed, give up both
210 * the reference that the files holds as well as the
211 * one from the macvlan_dev if that still exists.
20d29d7a
AB
212 *
213 * Using the spinlock makes sure that we don't get
214 * to the queue again after destroying it.
20d29d7a 215 */
635b8c8e 216static void tap_put_queue(struct tap_queue *q)
20d29d7a 217{
6fe3faf8 218 struct tap_dev *tap;
20d29d7a 219
441ac0fc 220 rtnl_lock();
6fe3faf8 221 tap = rtnl_dereference(q->tap);
441ac0fc 222
6fe3faf8 223 if (tap) {
815f236d 224 if (q->enabled)
635b8c8e 225 BUG_ON(tap_disable_queue(q));
376b1aab 226
6fe3faf8
SG
227 tap->numqueues--;
228 RCU_INIT_POINTER(q->tap, NULL);
02df55d2 229 sock_put(&q->sk);
815f236d 230 list_del_init(&q->next);
20d29d7a
AB
231 }
232
441ac0fc 233 rtnl_unlock();
20d29d7a
AB
234
235 synchronize_rcu();
236 sock_put(&q->sk);
237}
238
239/*
1565c7c1
KK
240 * Select a queue based on the rxq of the device on which this packet
241 * arrived. If the incoming device is not mq, calculate a flow hash
242 * to select a queue. If all fails, find the first available queue.
243 * Cache vlan->numvtaps since it can become zero during the execution
244 * of this function.
20d29d7a 245 */
6fe3faf8 246static struct tap_queue *tap_get_queue(struct tap_dev *tap,
635b8c8e 247 struct sk_buff *skb)
20d29d7a 248{
6fe3faf8 249 struct tap_queue *queue = NULL;
815f236d
JW
250 /* Access to taps array is protected by rcu, but access to numvtaps
251 * isn't. Below we use it to lookup a queue, but treat it as a hint
252 * and validate that the result isn't NULL - in case we are
253 * racing against queue removal.
254 */
6fe3faf8 255 int numvtaps = ACCESS_ONCE(tap->numvtaps);
1565c7c1
KK
256 __u32 rxq;
257
258 if (!numvtaps)
259 goto out;
260
1b16bf42
JW
261 if (numvtaps == 1)
262 goto single;
263
ef0002b5 264 /* Check if we can use flow to select a queue */
3958afa1 265 rxq = skb_get_hash(skb);
ef0002b5 266 if (rxq) {
6fe3faf8 267 queue = rcu_dereference(tap->taps[rxq % numvtaps]);
376b1aab 268 goto out;
ef0002b5
KK
269 }
270
1565c7c1
KK
271 if (likely(skb_rx_queue_recorded(skb))) {
272 rxq = skb_get_rx_queue(skb);
20d29d7a 273
1565c7c1
KK
274 while (unlikely(rxq >= numvtaps))
275 rxq -= numvtaps;
276
6fe3faf8 277 queue = rcu_dereference(tap->taps[rxq]);
376b1aab 278 goto out;
1565c7c1
KK
279 }
280
1b16bf42 281single:
6fe3faf8 282 queue = rcu_dereference(tap->taps[0]);
1565c7c1 283out:
6fe3faf8 284 return queue;
20d29d7a
AB
285}
286
02df55d2
AB
287/*
288 * The net_device is going away, give up the reference
1565c7c1
KK
289 * that it holds on all queues and safely set the pointer
290 * from the queues to NULL.
02df55d2 291 */
6fe3faf8 292void tap_del_queues(struct tap_dev *tap)
20d29d7a 293{
635b8c8e 294 struct tap_queue *q, *tmp;
02df55d2 295
441ac0fc 296 ASSERT_RTNL();
6fe3faf8 297 list_for_each_entry_safe(q, tmp, &tap->queue_list, next) {
815f236d 298 list_del_init(&q->next);
6fe3faf8 299 RCU_INIT_POINTER(q->tap, NULL);
815f236d 300 if (q->enabled)
6fe3faf8
SG
301 tap->numvtaps--;
302 tap->numqueues--;
dfe816c5 303 sock_put(&q->sk);
564517e8 304 }
6fe3faf8
SG
305 BUG_ON(tap->numvtaps);
306 BUG_ON(tap->numqueues);
635b8c8e 307 /* guarantee that any future tap_set_queue will fail */
6fe3faf8 308 tap->numvtaps = MAX_TAP_QUEUES;
20d29d7a
AB
309}
310
635b8c8e 311rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
20d29d7a 312{
6acf54f1
VY
313 struct sk_buff *skb = *pskb;
314 struct net_device *dev = skb->dev;
6fe3faf8 315 struct tap_dev *tap;
635b8c8e 316 struct tap_queue *q;
a567dd62
VY
317 netdev_features_t features = TAP_FEATURES;
318
6fe3faf8
SG
319 tap = tap_dev_get_rcu(dev);
320 if (!tap)
6acf54f1
VY
321 return RX_HANDLER_PASS;
322
6fe3faf8 323 q = tap_get_queue(tap, skb);
20d29d7a 324 if (!q)
6acf54f1 325 return RX_HANDLER_PASS;
8a35747a 326
362899b8 327 if (__skb_array_full(&q->skb_array))
8a35747a 328 goto drop;
20d29d7a 329
6acf54f1
VY
330 skb_push(skb, ETH_HLEN);
331
3e4f8b78 332 /* Apply the forward feature mask so that we perform segmentation
e5733321
VY
333 * according to users wishes. This only works if VNET_HDR is
334 * enabled.
3e4f8b78 335 */
e5733321 336 if (q->flags & IFF_VNET_HDR)
6fe3faf8 337 features |= tap->tap_features;
8b86a61d 338 if (netif_needs_gso(skb, features)) {
3e4f8b78
VY
339 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
340
341 if (IS_ERR(segs))
342 goto drop;
343
344 if (!segs) {
362899b8
JW
345 if (skb_array_produce(&q->skb_array, skb))
346 goto drop;
3e4f8b78
VY
347 goto wake_up;
348 }
349
be0bd316 350 consume_skb(skb);
3e4f8b78
VY
351 while (segs) {
352 struct sk_buff *nskb = segs->next;
353
354 segs->next = NULL;
362899b8
JW
355 if (skb_array_produce(&q->skb_array, segs)) {
356 kfree_skb(segs);
357 kfree_skb_list(nskb);
358 break;
359 }
3e4f8b78
VY
360 segs = nskb;
361 }
362 } else {
cbdb0427
VY
363 /* If we receive a partial checksum and the tap side
364 * doesn't support checksum offload, compute the checksum.
365 * Note: it doesn't matter which checksum feature to
a8e04698 366 * check, we either support them all or none.
cbdb0427
VY
367 */
368 if (skb->ip_summed == CHECKSUM_PARTIAL &&
a188222b 369 !(features & NETIF_F_CSUM_MASK) &&
cbdb0427
VY
370 skb_checksum_help(skb))
371 goto drop;
362899b8
JW
372 if (skb_array_produce(&q->skb_array, skb))
373 goto drop;
3e4f8b78
VY
374 }
375
376wake_up:
4a4771a5 377 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
6acf54f1 378 return RX_HANDLER_CONSUMED;
8a35747a
HX
379
380drop:
6acf54f1 381 /* Count errors/drops only here, thus don't care about args. */
6fe3faf8
SG
382 if (tap->count_rx_dropped)
383 tap->count_rx_dropped(tap);
8a35747a 384 kfree_skb(skb);
6acf54f1 385 return RX_HANDLER_CONSUMED;
20d29d7a
AB
386}
387
6fe3faf8 388int tap_get_minor(struct tap_dev *tap)
e09eff7f
EB
389{
390 int retval = -ENOMEM;
e09eff7f 391
ebc05ba7 392 mutex_lock(&macvtap_major.minor_lock);
6fe3faf8 393 retval = idr_alloc(&macvtap_major.minor_idr, tap, 1, TAP_NUM_DEVS, GFP_KERNEL);
ec09ebc1 394 if (retval >= 0) {
6fe3faf8 395 tap->minor = retval;
ec09ebc1 396 } else if (retval == -ENOSPC) {
6fe3faf8 397 netdev_err(tap->dev, "Too many tap devices\n");
e09eff7f 398 retval = -EINVAL;
e09eff7f 399 }
ebc05ba7 400 mutex_unlock(&macvtap_major.minor_lock);
ec09ebc1 401 return retval < 0 ? retval : 0;
e09eff7f
EB
402}
403
6fe3faf8 404void tap_free_minor(struct tap_dev *tap)
e09eff7f 405{
ebc05ba7 406 mutex_lock(&macvtap_major.minor_lock);
6fe3faf8
SG
407 if (tap->minor) {
408 idr_remove(&macvtap_major.minor_idr, tap->minor);
409 tap->minor = 0;
e09eff7f 410 }
ebc05ba7 411 mutex_unlock(&macvtap_major.minor_lock);
e09eff7f
EB
412}
413
6fe3faf8 414static struct tap_dev *dev_get_by_tap_minor(int minor)
e09eff7f
EB
415{
416 struct net_device *dev = NULL;
6fe3faf8 417 struct tap_dev *tap;
e09eff7f 418
ebc05ba7 419 mutex_lock(&macvtap_major.minor_lock);
6fe3faf8
SG
420 tap = idr_find(&macvtap_major.minor_idr, minor);
421 if (tap) {
422 dev = tap->dev;
e09eff7f
EB
423 dev_hold(dev);
424 }
ebc05ba7 425 mutex_unlock(&macvtap_major.minor_lock);
6fe3faf8 426 return tap;
e09eff7f
EB
427}
428
635b8c8e 429static void tap_sock_write_space(struct sock *sk)
20d29d7a 430{
43815482
ED
431 wait_queue_head_t *wqueue;
432
20d29d7a 433 if (!sock_writeable(sk) ||
9cd3e072 434 !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
20d29d7a
AB
435 return;
436
43815482
ED
437 wqueue = sk_sleep(sk);
438 if (wqueue && waitqueue_active(wqueue))
439 wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
20d29d7a
AB
440}
441
635b8c8e 442static void tap_sock_destruct(struct sock *sk)
2259fef0 443{
635b8c8e 444 struct tap_queue *q = container_of(sk, struct tap_queue, sk);
362899b8 445
104a4933 446 skb_array_cleanup(&q->skb_array);
2259fef0
EB
447}
448
635b8c8e 449static int tap_open(struct inode *inode, struct file *file)
20d29d7a
AB
450{
451 struct net *net = current->nsproxy->net_ns;
6fe3faf8 452 struct tap_dev *tap;
635b8c8e 453 struct tap_queue *q;
40b8fe45 454 int err = -ENODEV;
20d29d7a 455
40b8fe45 456 rtnl_lock();
6fe3faf8
SG
457 tap = dev_get_by_tap_minor(iminor(inode));
458 if (!tap)
362899b8 459 goto err;
20d29d7a 460
20d29d7a 461 err = -ENOMEM;
635b8c8e
SG
462 q = (struct tap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
463 &tap_proto, 0);
20d29d7a 464 if (!q)
362899b8 465 goto err;
20d29d7a 466
d9a90a31 467 RCU_INIT_POINTER(q->sock.wq, &q->wq);
43815482 468 init_waitqueue_head(&q->wq.wait);
20d29d7a
AB
469 q->sock.type = SOCK_RAW;
470 q->sock.state = SS_CONNECTED;
501c774c 471 q->sock.file = file;
635b8c8e 472 q->sock.ops = &tap_socket_ops;
20d29d7a 473 sock_init_data(&q->sock, &q->sk);
635b8c8e
SG
474 q->sk.sk_write_space = tap_sock_write_space;
475 q->sk.sk_destruct = tap_sock_destruct;
b9fb9ee0 476 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
55afbd08 477 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
20d29d7a 478
97bc3633 479 /*
635b8c8e 480 * so far only KVM virtio_net uses tap, enable zero copy between
97bc3633 481 * guest kernel and host kernel when lower device supports zerocopy
047af9cf
EB
482 *
483 * The macvlan supports zerocopy iff the lower device supports zero
484 * copy so we don't have to look at the lower device directly.
97bc3633 485 */
6fe3faf8 486 if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG))
047af9cf 487 sock_set_flag(&q->sk, SOCK_ZEROCOPY);
97bc3633 488
362899b8 489 err = -ENOMEM;
6fe3faf8 490 if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL))
362899b8
JW
491 goto err_array;
492
6fe3faf8 493 err = tap_set_queue(tap, file, q);
20d29d7a 494 if (err)
362899b8 495 goto err_queue;
20d29d7a 496
6fe3faf8 497 dev_put(tap->dev);
362899b8
JW
498
499 rtnl_unlock();
500 return err;
501
502err_queue:
503 skb_array_cleanup(&q->skb_array);
504err_array:
505 sock_put(&q->sk);
506err:
6fe3faf8
SG
507 if (tap)
508 dev_put(tap->dev);
20d29d7a 509
40b8fe45 510 rtnl_unlock();
20d29d7a
AB
511 return err;
512}
513
635b8c8e 514static int tap_release(struct inode *inode, struct file *file)
20d29d7a 515{
635b8c8e
SG
516 struct tap_queue *q = file->private_data;
517 tap_put_queue(q);
20d29d7a
AB
518 return 0;
519}
520
635b8c8e 521static unsigned int tap_poll(struct file *file, poll_table *wait)
20d29d7a 522{
635b8c8e 523 struct tap_queue *q = file->private_data;
20d29d7a
AB
524 unsigned int mask = POLLERR;
525
526 if (!q)
527 goto out;
528
529 mask = 0;
43815482 530 poll_wait(file, &q->wq.wait, wait);
20d29d7a 531
362899b8 532 if (!skb_array_empty(&q->skb_array))
20d29d7a
AB
533 mask |= POLLIN | POLLRDNORM;
534
535 if (sock_writeable(&q->sk) ||
9cd3e072 536 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
20d29d7a
AB
537 sock_writeable(&q->sk)))
538 mask |= POLLOUT | POLLWRNORM;
539
540out:
20d29d7a
AB
541 return mask;
542}
543
635b8c8e
SG
544static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad,
545 size_t len, size_t linear,
b9fb9ee0
AB
546 int noblock, int *err)
547{
548 struct sk_buff *skb;
549
550 /* Under a page? Don't bother with paged skb. */
551 if (prepad + len < PAGE_SIZE || !linear)
552 linear = len;
553
554 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
28d64271 555 err, 0);
b9fb9ee0
AB
556 if (!skb)
557 return NULL;
558
559 skb_reserve(skb, prepad);
560 skb_put(skb, linear);
561 skb->data_len = len - linear;
562 skb->len += len - linear;
563
564 return skb;
565}
566
2f1d8b9e 567/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
635b8c8e 568#define TAP_RESERVE HH_DATA_OFF(ETH_HLEN)
2f1d8b9e 569
20d29d7a 570/* Get packet from user space buffer */
635b8c8e
SG
571static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m,
572 struct iov_iter *from, int noblock)
20d29d7a 573{
635b8c8e 574 int good_linear = SKB_MAX_HEAD(TAP_RESERVE);
20d29d7a 575 struct sk_buff *skb;
6fe3faf8 576 struct tap_dev *tap;
f5ff53b4 577 unsigned long total_len = iov_iter_count(from);
97bc3633 578 unsigned long len = total_len;
20d29d7a 579 int err;
b9fb9ee0
AB
580 struct virtio_net_hdr vnet_hdr = { 0 };
581 int vnet_hdr_len = 0;
b92946e2 582 int copylen = 0;
c5c62f1b 583 int depth;
97bc3633 584 bool zerocopy = false;
61d46bf9 585 size_t linear;
b9fb9ee0
AB
586
587 if (q->flags & IFF_VNET_HDR) {
837585a5 588 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
b9fb9ee0
AB
589
590 err = -EINVAL;
ce3c8692 591 if (len < vnet_hdr_len)
b9fb9ee0 592 goto err;
ce3c8692 593 len -= vnet_hdr_len;
b9fb9ee0 594
f5ff53b4 595 err = -EFAULT;
cbbd26b8 596 if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from))
b9fb9ee0 597 goto err;
f5ff53b4 598 iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
b9fb9ee0 599 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
635b8c8e
SG
600 tap16_to_cpu(q, vnet_hdr.csum_start) +
601 tap16_to_cpu(q, vnet_hdr.csum_offset) + 2 >
602 tap16_to_cpu(q, vnet_hdr.hdr_len))
603 vnet_hdr.hdr_len = cpu_to_tap16(q,
604 tap16_to_cpu(q, vnet_hdr.csum_start) +
605 tap16_to_cpu(q, vnet_hdr.csum_offset) + 2);
b9fb9ee0 606 err = -EINVAL;
635b8c8e 607 if (tap16_to_cpu(q, vnet_hdr.hdr_len) > len)
b9fb9ee0
AB
608 goto err;
609 }
20d29d7a 610
b9fb9ee0 611 err = -EINVAL;
20d29d7a 612 if (unlikely(len < ETH_HLEN))
b9fb9ee0 613 goto err;
20d29d7a 614
ece793fc 615 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
f5ff53b4
AV
616 struct iov_iter i;
617
6ae7feb3 618 copylen = vnet_hdr.hdr_len ?
635b8c8e 619 tap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
16a3fa28
JW
620 if (copylen > good_linear)
621 copylen = good_linear;
8e2ad411
WB
622 else if (copylen < ETH_HLEN)
623 copylen = ETH_HLEN;
61d46bf9 624 linear = copylen;
f5ff53b4
AV
625 i = *from;
626 iov_iter_advance(&i, copylen);
627 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
ece793fc
JW
628 zerocopy = true;
629 }
630
631 if (!zerocopy) {
97bc3633 632 copylen = len;
635b8c8e 633 linear = tap16_to_cpu(q, vnet_hdr.hdr_len);
8e2ad411 634 if (linear > good_linear)
16a3fa28 635 linear = good_linear;
8e2ad411
WB
636 else if (linear < ETH_HLEN)
637 linear = ETH_HLEN;
61d46bf9 638 }
97bc3633 639
635b8c8e
SG
640 skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen,
641 linear, noblock, &err);
02df55d2
AB
642 if (!skb)
643 goto err;
20d29d7a 644
01d6657b 645 if (zerocopy)
f5ff53b4 646 err = zerocopy_sg_from_iter(skb, from);
aa196eed 647 else
f5ff53b4 648 err = skb_copy_datagram_from_iter(skb, 0, from, len);
ece793fc 649
02df55d2 650 if (err)
b9fb9ee0 651 goto err_kfree;
20d29d7a
AB
652
653 skb_set_network_header(skb, ETH_HLEN);
b9fb9ee0
AB
654 skb_reset_mac_header(skb);
655 skb->protocol = eth_hdr(skb)->h_proto;
656
657 if (vnet_hdr_len) {
fd88d68b 658 err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
635b8c8e 659 tap_is_little_endian(q));
b9fb9ee0
AB
660 if (err)
661 goto err_kfree;
662 }
663
40893fd0 664 skb_probe_transport_header(skb, ETH_HLEN);
9b4d669b 665
c5c62f1b
IV
666 /* Move network header to the right position for VLAN tagged packets */
667 if ((skb->protocol == htons(ETH_P_8021Q) ||
668 skb->protocol == htons(ETH_P_8021AD)) &&
669 __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
670 skb_set_network_header(skb, depth);
671
ac4e4af1 672 rcu_read_lock();
6fe3faf8 673 tap = rcu_dereference(q->tap);
97bc3633 674 /* copy skb_ubuf_info for callback when skb has no error */
01d6657b 675 if (zerocopy) {
97bc3633 676 skb_shinfo(skb)->destructor_arg = m->msg_control;
01d6657b 677 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
c9af6db4 678 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
aa196eed
JW
679 } else if (m && m->msg_control) {
680 struct ubuf_info *uarg = m->msg_control;
681 uarg->callback(uarg, false);
01d6657b 682 }
aa196eed 683
6fe3faf8
SG
684 if (tap) {
685 skb->dev = tap->dev;
6acf54f1 686 dev_queue_xmit(skb);
29d79196 687 } else {
02df55d2 688 kfree_skb(skb);
29d79196 689 }
ac4e4af1 690 rcu_read_unlock();
20d29d7a 691
97bc3633 692 return total_len;
02df55d2 693
b9fb9ee0
AB
694err_kfree:
695 kfree_skb(skb);
696
02df55d2 697err:
ac4e4af1 698 rcu_read_lock();
6fe3faf8
SG
699 tap = rcu_dereference(q->tap);
700 if (tap && tap->count_tx_dropped)
701 tap->count_tx_dropped(tap);
ac4e4af1 702 rcu_read_unlock();
02df55d2 703
02df55d2 704 return err;
20d29d7a
AB
705}
706
635b8c8e 707static ssize_t tap_write_iter(struct kiocb *iocb, struct iov_iter *from)
20d29d7a
AB
708{
709 struct file *file = iocb->ki_filp;
635b8c8e 710 struct tap_queue *q = file->private_data;
20d29d7a 711
635b8c8e 712 return tap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK);
20d29d7a
AB
713}
714
715/* Put packet to the user space buffer */
635b8c8e
SG
716static ssize_t tap_put_user(struct tap_queue *q,
717 const struct sk_buff *skb,
718 struct iov_iter *iter)
20d29d7a 719{
20d29d7a 720 int ret;
b9fb9ee0 721 int vnet_hdr_len = 0;
f09e2249 722 int vlan_offset = 0;
6c36d2e2 723 int total;
b9fb9ee0
AB
724
725 if (q->flags & IFF_VNET_HDR) {
726 struct virtio_net_hdr vnet_hdr;
837585a5 727 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
6c36d2e2 728 if (iov_iter_count(iter) < vnet_hdr_len)
b9fb9ee0
AB
729 return -EINVAL;
730
3e9e40e7 731 if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
635b8c8e 732 tap_is_little_endian(q), true))
fd88d68b 733 BUG();
b9fb9ee0 734
6c36d2e2
HX
735 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
736 sizeof(vnet_hdr))
b9fb9ee0 737 return -EFAULT;
7cc76f51
JW
738
739 iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
b9fb9ee0 740 }
6c36d2e2 741 total = vnet_hdr_len;
ce232ce0 742 total += skb->len;
f09e2249 743
df8a39de 744 if (skb_vlan_tag_present(skb)) {
f09e2249
BG
745 struct {
746 __be16 h_vlan_proto;
747 __be16 h_vlan_TCI;
748 } veth;
0fbe0d47 749 veth.h_vlan_proto = skb->vlan_proto;
df8a39de 750 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
f09e2249
BG
751
752 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
ce232ce0 753 total += VLAN_HLEN;
f09e2249 754
6c36d2e2
HX
755 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
756 if (ret || !iov_iter_count(iter))
f09e2249
BG
757 goto done;
758
6c36d2e2
HX
759 ret = copy_to_iter(&veth, sizeof(veth), iter);
760 if (ret != sizeof(veth) || !iov_iter_count(iter))
f09e2249
BG
761 goto done;
762 }
20d29d7a 763
6c36d2e2
HX
764 ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
765 skb->len - vlan_offset);
20d29d7a 766
f09e2249 767done:
ce232ce0 768 return ret ? ret : total;
20d29d7a
AB
769}
770
635b8c8e
SG
771static ssize_t tap_do_read(struct tap_queue *q,
772 struct iov_iter *to,
773 int noblock)
20d29d7a 774{
ccf7e72b 775 DEFINE_WAIT(wait);
20d29d7a 776 struct sk_buff *skb;
501c774c 777 ssize_t ret = 0;
20d29d7a 778
3af0bfe5
AV
779 if (!iov_iter_count(to))
780 return 0;
781
782 while (1) {
89cee917
JW
783 if (!noblock)
784 prepare_to_wait(sk_sleep(&q->sk), &wait,
785 TASK_INTERRUPTIBLE);
20d29d7a
AB
786
787 /* Read frames from the queue */
362899b8 788 skb = skb_array_consume(&q->skb_array);
3af0bfe5
AV
789 if (skb)
790 break;
791 if (noblock) {
792 ret = -EAGAIN;
793 break;
20d29d7a 794 }
3af0bfe5
AV
795 if (signal_pending(current)) {
796 ret = -ERESTARTSYS;
797 break;
798 }
799 /* Nothing to read, let's sleep */
800 schedule();
801 }
a499a2e9
VY
802 if (!noblock)
803 finish_wait(sk_sleep(&q->sk), &wait);
804
3af0bfe5 805 if (skb) {
635b8c8e 806 ret = tap_put_user(q, skb, to);
f51a5e82
JW
807 if (unlikely(ret < 0))
808 kfree_skb(skb);
809 else
810 consume_skb(skb);
20d29d7a 811 }
501c774c
AB
812 return ret;
813}
814
635b8c8e 815static ssize_t tap_read_iter(struct kiocb *iocb, struct iov_iter *to)
501c774c
AB
816{
817 struct file *file = iocb->ki_filp;
635b8c8e 818 struct tap_queue *q = file->private_data;
3af0bfe5 819 ssize_t len = iov_iter_count(to), ret;
20d29d7a 820
635b8c8e 821 ret = tap_do_read(q, to, file->f_flags & O_NONBLOCK);
ce232ce0 822 ret = min_t(ssize_t, ret, len);
e6ebc7f1
ZYW
823 if (ret > 0)
824 iocb->ki_pos = ret;
20d29d7a
AB
825 return ret;
826}
827
6fe3faf8 828static struct tap_dev *tap_get_tap_dev(struct tap_queue *q)
8f475a31 829{
6fe3faf8 830 struct tap_dev *tap;
8f475a31 831
441ac0fc 832 ASSERT_RTNL();
6fe3faf8
SG
833 tap = rtnl_dereference(q->tap);
834 if (tap)
835 dev_hold(tap->dev);
8f475a31 836
6fe3faf8 837 return tap;
8f475a31
JW
838}
839
6fe3faf8 840static void tap_put_tap_dev(struct tap_dev *tap)
8f475a31 841{
6fe3faf8 842 dev_put(tap->dev);
8f475a31
JW
843}
844
635b8c8e 845static int tap_ioctl_set_queue(struct file *file, unsigned int flags)
815f236d 846{
635b8c8e 847 struct tap_queue *q = file->private_data;
6fe3faf8 848 struct tap_dev *tap;
815f236d
JW
849 int ret;
850
6fe3faf8
SG
851 tap = tap_get_tap_dev(q);
852 if (!tap)
815f236d
JW
853 return -EINVAL;
854
855 if (flags & IFF_ATTACH_QUEUE)
6fe3faf8 856 ret = tap_enable_queue(tap, file, q);
815f236d 857 else if (flags & IFF_DETACH_QUEUE)
635b8c8e 858 ret = tap_disable_queue(q);
f57855a5
JW
859 else
860 ret = -EINVAL;
815f236d 861
6fe3faf8 862 tap_put_tap_dev(tap);
815f236d
JW
863 return ret;
864}
865
635b8c8e 866static int set_offload(struct tap_queue *q, unsigned long arg)
2be5c767 867{
6fe3faf8 868 struct tap_dev *tap;
2be5c767
VY
869 netdev_features_t features;
870 netdev_features_t feature_mask = 0;
871
6fe3faf8
SG
872 tap = rtnl_dereference(q->tap);
873 if (!tap)
2be5c767
VY
874 return -ENOLINK;
875
6fe3faf8 876 features = tap->dev->features;
2be5c767
VY
877
878 if (arg & TUN_F_CSUM) {
879 feature_mask = NETIF_F_HW_CSUM;
880
881 if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
882 if (arg & TUN_F_TSO_ECN)
883 feature_mask |= NETIF_F_TSO_ECN;
884 if (arg & TUN_F_TSO4)
885 feature_mask |= NETIF_F_TSO;
886 if (arg & TUN_F_TSO6)
887 feature_mask |= NETIF_F_TSO6;
888 }
e3e3c423
VY
889
890 if (arg & TUN_F_UFO)
891 feature_mask |= NETIF_F_UFO;
2be5c767
VY
892 }
893
894 /* tun/tap driver inverts the usage for TSO offloads, where
895 * setting the TSO bit means that the userspace wants to
896 * accept TSO frames and turning it off means that user space
897 * does not support TSO.
635b8c8e 898 * For tap, we have to invert it to mean the same thing.
2be5c767
VY
899 * When user space turns off TSO, we turn off GSO/LRO so that
900 * user-space will not receive TSO frames.
901 */
e3e3c423 902 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
2be5c767
VY
903 features |= RX_OFFLOADS;
904 else
905 features &= ~RX_OFFLOADS;
906
907 /* tap_features are the same as features on tun/tap and
908 * reflect user expectations.
909 */
6fe3faf8
SG
910 tap->tap_features = feature_mask;
911 if (tap->update_features)
912 tap->update_features(tap, features);
2be5c767
VY
913
914 return 0;
915}
916
20d29d7a
AB
917/*
918 * provide compatibility with generic tun/tap interface
919 */
635b8c8e
SG
920static long tap_ioctl(struct file *file, unsigned int cmd,
921 unsigned long arg)
20d29d7a 922{
635b8c8e 923 struct tap_queue *q = file->private_data;
6fe3faf8 924 struct tap_dev *tap;
20d29d7a
AB
925 void __user *argp = (void __user *)arg;
926 struct ifreq __user *ifr = argp;
927 unsigned int __user *up = argp;
39ec7de7 928 unsigned short u;
55afbd08 929 int __user *sp = argp;
7f460d30 930 struct sockaddr sa;
55afbd08 931 int s;
02df55d2 932 int ret;
20d29d7a
AB
933
934 switch (cmd) {
935 case TUNSETIFF:
936 /* ignore the name, just look at flags */
937 if (get_user(u, &ifr->ifr_flags))
938 return -EFAULT;
b9fb9ee0
AB
939
940 ret = 0;
635b8c8e 941 if ((u & ~TAP_IFFEATURES) != (IFF_NO_PI | IFF_TAP))
b9fb9ee0
AB
942 ret = -EINVAL;
943 else
635b8c8e 944 q->flags = (q->flags & ~TAP_IFFEATURES) | u;
b9fb9ee0
AB
945
946 return ret;
20d29d7a
AB
947
948 case TUNGETIFF:
441ac0fc 949 rtnl_lock();
6fe3faf8
SG
950 tap = tap_get_tap_dev(q);
951 if (!tap) {
441ac0fc 952 rtnl_unlock();
20d29d7a 953 return -ENOLINK;
441ac0fc 954 }
20d29d7a 955
02df55d2 956 ret = 0;
39ec7de7 957 u = q->flags;
6fe3faf8 958 if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
39ec7de7 959 put_user(u, &ifr->ifr_flags))
02df55d2 960 ret = -EFAULT;
6fe3faf8 961 tap_put_tap_dev(tap);
441ac0fc 962 rtnl_unlock();
02df55d2 963 return ret;
20d29d7a 964
815f236d
JW
965 case TUNSETQUEUE:
966 if (get_user(u, &ifr->ifr_flags))
967 return -EFAULT;
441ac0fc 968 rtnl_lock();
635b8c8e 969 ret = tap_ioctl_set_queue(file, u);
441ac0fc 970 rtnl_unlock();
82a19eb8 971 return ret;
815f236d 972
20d29d7a 973 case TUNGETFEATURES:
635b8c8e 974 if (put_user(IFF_TAP | IFF_NO_PI | TAP_IFFEATURES, up))
20d29d7a
AB
975 return -EFAULT;
976 return 0;
977
978 case TUNSETSNDBUF:
3ea79249 979 if (get_user(s, sp))
20d29d7a
AB
980 return -EFAULT;
981
3ea79249 982 q->sk.sk_sndbuf = s;
20d29d7a
AB
983 return 0;
984
55afbd08
MT
985 case TUNGETVNETHDRSZ:
986 s = q->vnet_hdr_sz;
987 if (put_user(s, sp))
988 return -EFAULT;
989 return 0;
990
991 case TUNSETVNETHDRSZ:
992 if (get_user(s, sp))
993 return -EFAULT;
994 if (s < (int)sizeof(struct virtio_net_hdr))
995 return -EINVAL;
996
997 q->vnet_hdr_sz = s;
998 return 0;
999
01b07fb3 1000 case TUNGETVNETLE:
635b8c8e 1001 s = !!(q->flags & TAP_VNET_LE);
01b07fb3
MT
1002 if (put_user(s, sp))
1003 return -EFAULT;
1004 return 0;
1005
1006 case TUNSETVNETLE:
1007 if (get_user(s, sp))
1008 return -EFAULT;
1009 if (s)
635b8c8e 1010 q->flags |= TAP_VNET_LE;
01b07fb3 1011 else
635b8c8e 1012 q->flags &= ~TAP_VNET_LE;
01b07fb3
MT
1013 return 0;
1014
8b8e658b 1015 case TUNGETVNETBE:
635b8c8e 1016 return tap_get_vnet_be(q, sp);
8b8e658b
GK
1017
1018 case TUNSETVNETBE:
635b8c8e 1019 return tap_set_vnet_be(q, sp);
8b8e658b 1020
20d29d7a
AB
1021 case TUNSETOFFLOAD:
1022 /* let the user check for future flags */
1023 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
e3e3c423 1024 TUN_F_TSO_ECN | TUN_F_UFO))
20d29d7a
AB
1025 return -EINVAL;
1026
2be5c767
VY
1027 rtnl_lock();
1028 ret = set_offload(q, arg);
1029 rtnl_unlock();
1030 return ret;
20d29d7a 1031
b5082083
JC
1032 case SIOCGIFHWADDR:
1033 rtnl_lock();
6fe3faf8
SG
1034 tap = tap_get_tap_dev(q);
1035 if (!tap) {
b5082083
JC
1036 rtnl_unlock();
1037 return -ENOLINK;
1038 }
1039 ret = 0;
6fe3faf8
SG
1040 u = tap->dev->type;
1041 if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
1042 copy_to_user(&ifr->ifr_hwaddr.sa_data, tap->dev->dev_addr, ETH_ALEN) ||
b5082083
JC
1043 put_user(u, &ifr->ifr_hwaddr.sa_family))
1044 ret = -EFAULT;
6fe3faf8 1045 tap_put_tap_dev(tap);
b5082083
JC
1046 rtnl_unlock();
1047 return ret;
1048
1049 case SIOCSIFHWADDR:
7f460d30
JC
1050 if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
1051 return -EFAULT;
b5082083 1052 rtnl_lock();
6fe3faf8
SG
1053 tap = tap_get_tap_dev(q);
1054 if (!tap) {
b5082083
JC
1055 rtnl_unlock();
1056 return -ENOLINK;
1057 }
6fe3faf8
SG
1058 ret = dev_set_mac_address(tap->dev, &sa);
1059 tap_put_tap_dev(tap);
b5082083
JC
1060 rtnl_unlock();
1061 return ret;
1062
20d29d7a
AB
1063 default:
1064 return -EINVAL;
1065 }
1066}
1067
1068#ifdef CONFIG_COMPAT
635b8c8e
SG
1069static long tap_compat_ioctl(struct file *file, unsigned int cmd,
1070 unsigned long arg)
20d29d7a 1071{
635b8c8e 1072 return tap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
20d29d7a
AB
1073}
1074#endif
1075
635b8c8e 1076const struct file_operations tap_fops = {
20d29d7a 1077 .owner = THIS_MODULE,
635b8c8e
SG
1078 .open = tap_open,
1079 .release = tap_release,
1080 .read_iter = tap_read_iter,
1081 .write_iter = tap_write_iter,
1082 .poll = tap_poll,
20d29d7a 1083 .llseek = no_llseek,
635b8c8e 1084 .unlocked_ioctl = tap_ioctl,
20d29d7a 1085#ifdef CONFIG_COMPAT
635b8c8e 1086 .compat_ioctl = tap_compat_ioctl,
20d29d7a
AB
1087#endif
1088};
1089
635b8c8e
SG
1090static int tap_sendmsg(struct socket *sock, struct msghdr *m,
1091 size_t total_len)
501c774c 1092{
635b8c8e
SG
1093 struct tap_queue *q = container_of(sock, struct tap_queue, sock);
1094 return tap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT);
501c774c
AB
1095}
1096
635b8c8e
SG
1097static int tap_recvmsg(struct socket *sock, struct msghdr *m,
1098 size_t total_len, int flags)
501c774c 1099{
635b8c8e 1100 struct tap_queue *q = container_of(sock, struct tap_queue, sock);
501c774c
AB
1101 int ret;
1102 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
1103 return -EINVAL;
635b8c8e 1104 ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT);
de2aa476
DM
1105 if (ret > total_len) {
1106 m->msg_flags |= MSG_TRUNC;
1107 ret = flags & MSG_TRUNC ? ret : total_len;
1108 }
501c774c
AB
1109 return ret;
1110}
1111
635b8c8e 1112static int tap_peek_len(struct socket *sock)
362899b8 1113{
635b8c8e 1114 struct tap_queue *q = container_of(sock, struct tap_queue,
362899b8
JW
1115 sock);
1116 return skb_array_peek_len(&q->skb_array);
1117}
1118
501c774c 1119/* Ops structure to mimic raw sockets with tun */
635b8c8e
SG
1120static const struct proto_ops tap_socket_ops = {
1121 .sendmsg = tap_sendmsg,
1122 .recvmsg = tap_recvmsg,
1123 .peek_len = tap_peek_len,
501c774c
AB
1124};
1125
1126/* Get an underlying socket object from tun file. Returns error unless file is
1127 * attached to a device. The returned object works like a packet socket, it
1128 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
1129 * holding a reference to the file for as long as the socket is in use. */
635b8c8e 1130struct socket *tap_get_socket(struct file *file)
501c774c 1131{
635b8c8e
SG
1132 struct tap_queue *q;
1133 if (file->f_op != &tap_fops)
501c774c
AB
1134 return ERR_PTR(-EINVAL);
1135 q = file->private_data;
1136 if (!q)
1137 return ERR_PTR(-EBADFD);
1138 return &q->sock;
1139}
635b8c8e 1140EXPORT_SYMBOL_GPL(tap_get_socket);
501c774c 1141
6fe3faf8 1142int tap_queue_resize(struct tap_dev *tap)
362899b8 1143{
6fe3faf8 1144 struct net_device *dev = tap->dev;
635b8c8e 1145 struct tap_queue *q;
362899b8 1146 struct skb_array **arrays;
6fe3faf8 1147 int n = tap->numqueues;
362899b8
JW
1148 int ret, i = 0;
1149
1150 arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL);
1151 if (!arrays)
1152 return -ENOMEM;
1153
6fe3faf8 1154 list_for_each_entry(q, &tap->queue_list, next)
362899b8
JW
1155 arrays[i++] = &q->skb_array;
1156
1157 ret = skb_array_resize_multiple(arrays, n,
1158 dev->tx_queue_len, GFP_KERNEL);
1159
1160 kfree(arrays);
1161 return ret;
1162}
ebc05ba7
SG
1163
1164int tap_create_cdev(struct cdev *tap_cdev,
1165 dev_t *tap_major, const char *device_name)
1166{
1167 int err;
1168
1169 err = alloc_chrdev_region(tap_major, 0, TAP_NUM_DEVS, device_name);
1170 if (err)
1171 goto out1;
1172
1173 cdev_init(tap_cdev, &tap_fops);
1174 err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS);
1175 if (err)
1176 goto out2;
1177
1178 macvtap_major.major = MAJOR(*tap_major);
1179
1180 idr_init(&macvtap_major.minor_idr);
1181 mutex_init(&macvtap_major.minor_lock);
1182
1183 macvtap_major.device_name = device_name;
1184
1185 return 0;
1186
1187out2:
1188 unregister_chrdev_region(*tap_major, TAP_NUM_DEVS);
1189out1:
1190 return err;
1191}
1192
1193void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev)
1194{
1195 cdev_del(tap_cdev);
1196 unregister_chrdev_region(major, TAP_NUM_DEVS);
1197 idr_destroy(&macvtap_major.minor_idr);
1198}