]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/tap.c
tap: Tap character device creation/destroy API
[mirror_ubuntu-jammy-kernel.git] / drivers / net / tap.c
CommitLineData
20d29d7a
AB
1#include <linux/etherdevice.h>
2#include <linux/if_macvlan.h>
f09e2249 3#include <linux/if_vlan.h>
20d29d7a
AB
4#include <linux/interrupt.h>
5#include <linux/nsproxy.h>
6#include <linux/compat.h>
7#include <linux/if_tun.h>
8#include <linux/module.h>
9#include <linux/skbuff.h>
10#include <linux/cache.h>
11#include <linux/sched.h>
12#include <linux/types.h>
5a0e3ad6 13#include <linux/slab.h>
20d29d7a
AB
14#include <linux/wait.h>
15#include <linux/cdev.h>
40401530 16#include <linux/idr.h>
20d29d7a 17#include <linux/fs.h>
6c36d2e2 18#include <linux/uio.h>
20d29d7a
AB
19
20#include <net/net_namespace.h>
21#include <net/rtnetlink.h>
22#include <net/sock.h>
b9fb9ee0 23#include <linux/virtio_net.h>
362899b8 24#include <linux/skb_array.h>
20d29d7a
AB
25
26/*
635b8c8e 27 * A tap queue is the central object of this driver, it connects
20d29d7a
AB
28 * an open character device to a macvlan interface. There can be
29 * multiple queues on one interface, which map back to queues
30 * implemented in hardware on the underlying device.
31 *
635b8c8e 32 * tap_proto is used to allocate queues through the sock allocation
20d29d7a
AB
33 * mechanism.
34 *
20d29d7a 35 */
635b8c8e 36struct tap_queue {
20d29d7a
AB
37 struct sock sk;
38 struct socket sock;
43815482 39 struct socket_wq wq;
55afbd08 40 int vnet_hdr_sz;
13707f9e 41 struct macvlan_dev __rcu *vlan;
20d29d7a 42 struct file *file;
b9fb9ee0 43 unsigned int flags;
376b1aab 44 u16 queue_index;
815f236d
JW
45 bool enabled;
46 struct list_head next;
362899b8 47 struct skb_array skb_array;
20d29d7a
AB
48};
49
635b8c8e 50#define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
01b07fb3 51
635b8c8e
SG
52#define TAP_VNET_LE 0x80000000
53#define TAP_VNET_BE 0x40000000
8b8e658b
GK
54
55#ifdef CONFIG_TUN_VNET_CROSS_LE
635b8c8e 56static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
8b8e658b 57{
635b8c8e 58 return q->flags & TAP_VNET_BE ? false :
8b8e658b
GK
59 virtio_legacy_is_little_endian();
60}
61
635b8c8e 62static long tap_get_vnet_be(struct tap_queue *q, int __user *sp)
8b8e658b 63{
635b8c8e 64 int s = !!(q->flags & TAP_VNET_BE);
8b8e658b
GK
65
66 if (put_user(s, sp))
67 return -EFAULT;
68
69 return 0;
70}
71
635b8c8e 72static long tap_set_vnet_be(struct tap_queue *q, int __user *sp)
8b8e658b
GK
73{
74 int s;
75
76 if (get_user(s, sp))
77 return -EFAULT;
78
79 if (s)
635b8c8e 80 q->flags |= TAP_VNET_BE;
8b8e658b 81 else
635b8c8e 82 q->flags &= ~TAP_VNET_BE;
8b8e658b
GK
83
84 return 0;
85}
86#else
635b8c8e 87static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
8b8e658b
GK
88{
89 return virtio_legacy_is_little_endian();
90}
91
635b8c8e 92static long tap_get_vnet_be(struct tap_queue *q, int __user *argp)
8b8e658b
GK
93{
94 return -EINVAL;
95}
96
635b8c8e 97static long tap_set_vnet_be(struct tap_queue *q, int __user *argp)
8b8e658b
GK
98{
99 return -EINVAL;
100}
101#endif /* CONFIG_TUN_VNET_CROSS_LE */
6ae7feb3 102
635b8c8e 103static inline bool tap_is_little_endian(struct tap_queue *q)
5b11e15f 104{
635b8c8e
SG
105 return q->flags & TAP_VNET_LE ||
106 tap_legacy_is_little_endian(q);
5b11e15f 107}
6ae7feb3 108
635b8c8e 109static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val)
6ae7feb3 110{
635b8c8e 111 return __virtio16_to_cpu(tap_is_little_endian(q), val);
6ae7feb3
MT
112}
113
635b8c8e 114static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val)
6ae7feb3 115{
635b8c8e 116 return __cpu_to_virtio16(tap_is_little_endian(q), val);
6ae7feb3
MT
117}
118
635b8c8e
SG
119static struct proto tap_proto = {
120 .name = "tap",
20d29d7a 121 .owner = THIS_MODULE,
635b8c8e 122 .obj_size = sizeof(struct tap_queue),
20d29d7a
AB
123};
124
635b8c8e 125#define TAP_NUM_DEVS (1U << MINORBITS)
ebc05ba7
SG
126struct major_info {
127 dev_t major;
128 struct idr minor_idr;
129 struct mutex minor_lock;
130 const char *device_name;
131} macvtap_major;
e09eff7f 132
97bc3633 133#define GOODCOPY_LEN 128
20d29d7a 134
635b8c8e 135static const struct proto_ops tap_socket_ops;
501c774c 136
2be5c767 137#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
f23d538b 138#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
a567dd62 139
635b8c8e 140static struct macvlan_dev *tap_get_vlan_rcu(const struct net_device *dev)
6acf54f1
VY
141{
142 return rcu_dereference(dev->rx_handler_data);
143}
144
20d29d7a
AB
145/*
146 * RCU usage:
635b8c8e 147 * The tap_queue and the macvlan_dev are loosely coupled, the
02df55d2 148 * pointers from one to the other can only be read while rcu_read_lock
441ac0fc 149 * or rtnl is held.
20d29d7a 150 *
635b8c8e 151 * Both the file and the macvlan_dev hold a reference on the tap_queue
02df55d2
AB
152 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
153 * q->vlan becomes inaccessible. When the files gets closed,
635b8c8e 154 * tap_get_queue() fails.
20d29d7a 155 *
02df55d2
AB
156 * There may still be references to the struct sock inside of the
157 * queue from outbound SKBs, but these never reference back to the
158 * file or the dev. The data structure is freed through __sk_free
159 * when both our references and any pending SKBs are gone.
20d29d7a 160 */
20d29d7a 161
635b8c8e
SG
162static int tap_enable_queue(struct net_device *dev, struct file *file,
163 struct tap_queue *q)
815f236d
JW
164{
165 struct macvlan_dev *vlan = netdev_priv(dev);
166 int err = -EINVAL;
167
441ac0fc 168 ASSERT_RTNL();
815f236d
JW
169
170 if (q->enabled)
171 goto out;
172
173 err = 0;
174 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
175 q->queue_index = vlan->numvtaps;
176 q->enabled = true;
177
178 vlan->numvtaps++;
179out:
815f236d
JW
180 return err;
181}
182
40b8fe45 183/* Requires RTNL */
635b8c8e
SG
184static int tap_set_queue(struct net_device *dev, struct file *file,
185 struct tap_queue *q)
20d29d7a
AB
186{
187 struct macvlan_dev *vlan = netdev_priv(dev);
20d29d7a 188
635b8c8e 189 if (vlan->numqueues == MAX_TAP_QUEUES)
40b8fe45 190 return -EBUSY;
20d29d7a 191
02df55d2 192 rcu_assign_pointer(q->vlan, vlan);
376b1aab 193 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
02df55d2 194 sock_hold(&q->sk);
20d29d7a
AB
195
196 q->file = file;
376b1aab 197 q->queue_index = vlan->numvtaps;
815f236d 198 q->enabled = true;
02df55d2 199 file->private_data = q;
815f236d 200 list_add_tail(&q->next, &vlan->queue_list);
20d29d7a 201
1565c7c1 202 vlan->numvtaps++;
815f236d 203 vlan->numqueues++;
1565c7c1 204
40b8fe45 205 return 0;
20d29d7a
AB
206}
207
635b8c8e 208static int tap_disable_queue(struct tap_queue *q)
815f236d
JW
209{
210 struct macvlan_dev *vlan;
635b8c8e 211 struct tap_queue *nq;
815f236d 212
441ac0fc 213 ASSERT_RTNL();
815f236d
JW
214 if (!q->enabled)
215 return -EINVAL;
216
441ac0fc
VY
217 vlan = rtnl_dereference(q->vlan);
218
815f236d
JW
219 if (vlan) {
220 int index = q->queue_index;
221 BUG_ON(index >= vlan->numvtaps);
441ac0fc 222 nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]);
815f236d
JW
223 nq->queue_index = index;
224
225 rcu_assign_pointer(vlan->taps[index], nq);
226 RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL);
227 q->enabled = false;
228
229 vlan->numvtaps--;
230 }
231
232 return 0;
233}
234
20d29d7a 235/*
02df55d2
AB
236 * The file owning the queue got closed, give up both
237 * the reference that the files holds as well as the
238 * one from the macvlan_dev if that still exists.
20d29d7a
AB
239 *
240 * Using the spinlock makes sure that we don't get
241 * to the queue again after destroying it.
20d29d7a 242 */
635b8c8e 243static void tap_put_queue(struct tap_queue *q)
20d29d7a 244{
02df55d2 245 struct macvlan_dev *vlan;
20d29d7a 246
441ac0fc
VY
247 rtnl_lock();
248 vlan = rtnl_dereference(q->vlan);
249
02df55d2 250 if (vlan) {
815f236d 251 if (q->enabled)
635b8c8e 252 BUG_ON(tap_disable_queue(q));
376b1aab 253
815f236d 254 vlan->numqueues--;
2cfa5a04 255 RCU_INIT_POINTER(q->vlan, NULL);
02df55d2 256 sock_put(&q->sk);
815f236d 257 list_del_init(&q->next);
20d29d7a
AB
258 }
259
441ac0fc 260 rtnl_unlock();
20d29d7a
AB
261
262 synchronize_rcu();
263 sock_put(&q->sk);
264}
265
266/*
1565c7c1
KK
267 * Select a queue based on the rxq of the device on which this packet
268 * arrived. If the incoming device is not mq, calculate a flow hash
269 * to select a queue. If all fails, find the first available queue.
270 * Cache vlan->numvtaps since it can become zero during the execution
271 * of this function.
20d29d7a 272 */
635b8c8e
SG
273static struct tap_queue *tap_get_queue(struct net_device *dev,
274 struct sk_buff *skb)
20d29d7a
AB
275{
276 struct macvlan_dev *vlan = netdev_priv(dev);
635b8c8e 277 struct tap_queue *tap = NULL;
815f236d
JW
278 /* Access to taps array is protected by rcu, but access to numvtaps
279 * isn't. Below we use it to lookup a queue, but treat it as a hint
280 * and validate that the result isn't NULL - in case we are
281 * racing against queue removal.
282 */
ed0483fa 283 int numvtaps = ACCESS_ONCE(vlan->numvtaps);
1565c7c1
KK
284 __u32 rxq;
285
286 if (!numvtaps)
287 goto out;
288
1b16bf42
JW
289 if (numvtaps == 1)
290 goto single;
291
ef0002b5 292 /* Check if we can use flow to select a queue */
3958afa1 293 rxq = skb_get_hash(skb);
ef0002b5
KK
294 if (rxq) {
295 tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
376b1aab 296 goto out;
ef0002b5
KK
297 }
298
1565c7c1
KK
299 if (likely(skb_rx_queue_recorded(skb))) {
300 rxq = skb_get_rx_queue(skb);
20d29d7a 301
1565c7c1
KK
302 while (unlikely(rxq >= numvtaps))
303 rxq -= numvtaps;
304
305 tap = rcu_dereference(vlan->taps[rxq]);
376b1aab 306 goto out;
1565c7c1
KK
307 }
308
1b16bf42 309single:
376b1aab 310 tap = rcu_dereference(vlan->taps[0]);
1565c7c1
KK
311out:
312 return tap;
20d29d7a
AB
313}
314
02df55d2
AB
315/*
316 * The net_device is going away, give up the reference
1565c7c1
KK
317 * that it holds on all queues and safely set the pointer
318 * from the queues to NULL.
02df55d2 319 */
635b8c8e 320void tap_del_queues(struct net_device *dev)
20d29d7a
AB
321{
322 struct macvlan_dev *vlan = netdev_priv(dev);
635b8c8e 323 struct tap_queue *q, *tmp;
02df55d2 324
441ac0fc 325 ASSERT_RTNL();
815f236d
JW
326 list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) {
327 list_del_init(&q->next);
376b1aab 328 RCU_INIT_POINTER(q->vlan, NULL);
815f236d
JW
329 if (q->enabled)
330 vlan->numvtaps--;
331 vlan->numqueues--;
dfe816c5 332 sock_put(&q->sk);
564517e8 333 }
815f236d
JW
334 BUG_ON(vlan->numvtaps);
335 BUG_ON(vlan->numqueues);
635b8c8e
SG
336 /* guarantee that any future tap_set_queue will fail */
337 vlan->numvtaps = MAX_TAP_QUEUES;
20d29d7a
AB
338}
339
635b8c8e 340rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
20d29d7a 341{
6acf54f1
VY
342 struct sk_buff *skb = *pskb;
343 struct net_device *dev = skb->dev;
344 struct macvlan_dev *vlan;
635b8c8e 345 struct tap_queue *q;
a567dd62
VY
346 netdev_features_t features = TAP_FEATURES;
347
635b8c8e 348 vlan = tap_get_vlan_rcu(dev);
6acf54f1
VY
349 if (!vlan)
350 return RX_HANDLER_PASS;
351
635b8c8e 352 q = tap_get_queue(dev, skb);
20d29d7a 353 if (!q)
6acf54f1 354 return RX_HANDLER_PASS;
8a35747a 355
362899b8 356 if (__skb_array_full(&q->skb_array))
8a35747a 357 goto drop;
20d29d7a 358
6acf54f1
VY
359 skb_push(skb, ETH_HLEN);
360
3e4f8b78 361 /* Apply the forward feature mask so that we perform segmentation
e5733321
VY
362 * according to users wishes. This only works if VNET_HDR is
363 * enabled.
3e4f8b78 364 */
e5733321
VY
365 if (q->flags & IFF_VNET_HDR)
366 features |= vlan->tap_features;
8b86a61d 367 if (netif_needs_gso(skb, features)) {
3e4f8b78
VY
368 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
369
370 if (IS_ERR(segs))
371 goto drop;
372
373 if (!segs) {
362899b8
JW
374 if (skb_array_produce(&q->skb_array, skb))
375 goto drop;
3e4f8b78
VY
376 goto wake_up;
377 }
378
be0bd316 379 consume_skb(skb);
3e4f8b78
VY
380 while (segs) {
381 struct sk_buff *nskb = segs->next;
382
383 segs->next = NULL;
362899b8
JW
384 if (skb_array_produce(&q->skb_array, segs)) {
385 kfree_skb(segs);
386 kfree_skb_list(nskb);
387 break;
388 }
3e4f8b78
VY
389 segs = nskb;
390 }
391 } else {
cbdb0427
VY
392 /* If we receive a partial checksum and the tap side
393 * doesn't support checksum offload, compute the checksum.
394 * Note: it doesn't matter which checksum feature to
a8e04698 395 * check, we either support them all or none.
cbdb0427
VY
396 */
397 if (skb->ip_summed == CHECKSUM_PARTIAL &&
a188222b 398 !(features & NETIF_F_CSUM_MASK) &&
cbdb0427
VY
399 skb_checksum_help(skb))
400 goto drop;
362899b8
JW
401 if (skb_array_produce(&q->skb_array, skb))
402 goto drop;
3e4f8b78
VY
403 }
404
405wake_up:
4a4771a5 406 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
6acf54f1 407 return RX_HANDLER_CONSUMED;
8a35747a
HX
408
409drop:
6acf54f1
VY
410 /* Count errors/drops only here, thus don't care about args. */
411 macvlan_count_rx(vlan, 0, 0, 0);
8a35747a 412 kfree_skb(skb);
6acf54f1 413 return RX_HANDLER_CONSUMED;
20d29d7a
AB
414}
415
635b8c8e 416int tap_get_minor(struct macvlan_dev *vlan)
e09eff7f
EB
417{
418 int retval = -ENOMEM;
e09eff7f 419
ebc05ba7
SG
420 mutex_lock(&macvtap_major.minor_lock);
421 retval = idr_alloc(&macvtap_major.minor_idr, vlan, 1, TAP_NUM_DEVS, GFP_KERNEL);
ec09ebc1
TH
422 if (retval >= 0) {
423 vlan->minor = retval;
424 } else if (retval == -ENOSPC) {
635b8c8e 425 netdev_err(vlan->dev, "Too many tap devices\n");
e09eff7f 426 retval = -EINVAL;
e09eff7f 427 }
ebc05ba7 428 mutex_unlock(&macvtap_major.minor_lock);
ec09ebc1 429 return retval < 0 ? retval : 0;
e09eff7f
EB
430}
431
635b8c8e 432void tap_free_minor(struct macvlan_dev *vlan)
e09eff7f 433{
ebc05ba7 434 mutex_lock(&macvtap_major.minor_lock);
e09eff7f 435 if (vlan->minor) {
ebc05ba7 436 idr_remove(&macvtap_major.minor_idr, vlan->minor);
e09eff7f
EB
437 vlan->minor = 0;
438 }
ebc05ba7 439 mutex_unlock(&macvtap_major.minor_lock);
e09eff7f
EB
440}
441
635b8c8e 442static struct net_device *dev_get_by_tap_minor(int minor)
e09eff7f
EB
443{
444 struct net_device *dev = NULL;
445 struct macvlan_dev *vlan;
446
ebc05ba7
SG
447 mutex_lock(&macvtap_major.minor_lock);
448 vlan = idr_find(&macvtap_major.minor_idr, minor);
e09eff7f
EB
449 if (vlan) {
450 dev = vlan->dev;
451 dev_hold(dev);
452 }
ebc05ba7 453 mutex_unlock(&macvtap_major.minor_lock);
e09eff7f
EB
454 return dev;
455}
456
635b8c8e 457static void tap_sock_write_space(struct sock *sk)
20d29d7a 458{
43815482
ED
459 wait_queue_head_t *wqueue;
460
20d29d7a 461 if (!sock_writeable(sk) ||
9cd3e072 462 !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
20d29d7a
AB
463 return;
464
43815482
ED
465 wqueue = sk_sleep(sk);
466 if (wqueue && waitqueue_active(wqueue))
467 wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
20d29d7a
AB
468}
469
635b8c8e 470static void tap_sock_destruct(struct sock *sk)
2259fef0 471{
635b8c8e 472 struct tap_queue *q = container_of(sk, struct tap_queue, sk);
362899b8 473
104a4933 474 skb_array_cleanup(&q->skb_array);
2259fef0
EB
475}
476
635b8c8e 477static int tap_open(struct inode *inode, struct file *file)
20d29d7a
AB
478{
479 struct net *net = current->nsproxy->net_ns;
40b8fe45 480 struct net_device *dev;
635b8c8e 481 struct tap_queue *q;
40b8fe45 482 int err = -ENODEV;
20d29d7a 483
40b8fe45 484 rtnl_lock();
635b8c8e 485 dev = dev_get_by_tap_minor(iminor(inode));
20d29d7a 486 if (!dev)
362899b8 487 goto err;
20d29d7a 488
20d29d7a 489 err = -ENOMEM;
635b8c8e
SG
490 q = (struct tap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
491 &tap_proto, 0);
20d29d7a 492 if (!q)
362899b8 493 goto err;
20d29d7a 494
d9a90a31 495 RCU_INIT_POINTER(q->sock.wq, &q->wq);
43815482 496 init_waitqueue_head(&q->wq.wait);
20d29d7a
AB
497 q->sock.type = SOCK_RAW;
498 q->sock.state = SS_CONNECTED;
501c774c 499 q->sock.file = file;
635b8c8e 500 q->sock.ops = &tap_socket_ops;
20d29d7a 501 sock_init_data(&q->sock, &q->sk);
635b8c8e
SG
502 q->sk.sk_write_space = tap_sock_write_space;
503 q->sk.sk_destruct = tap_sock_destruct;
b9fb9ee0 504 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
55afbd08 505 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
20d29d7a 506
97bc3633 507 /*
635b8c8e 508 * so far only KVM virtio_net uses tap, enable zero copy between
97bc3633 509 * guest kernel and host kernel when lower device supports zerocopy
047af9cf
EB
510 *
511 * The macvlan supports zerocopy iff the lower device supports zero
512 * copy so we don't have to look at the lower device directly.
97bc3633 513 */
047af9cf
EB
514 if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG))
515 sock_set_flag(&q->sk, SOCK_ZEROCOPY);
97bc3633 516
362899b8
JW
517 err = -ENOMEM;
518 if (skb_array_init(&q->skb_array, dev->tx_queue_len, GFP_KERNEL))
519 goto err_array;
520
635b8c8e 521 err = tap_set_queue(dev, file, q);
20d29d7a 522 if (err)
362899b8 523 goto err_queue;
20d29d7a 524
362899b8
JW
525 dev_put(dev);
526
527 rtnl_unlock();
528 return err;
529
530err_queue:
531 skb_array_cleanup(&q->skb_array);
532err_array:
533 sock_put(&q->sk);
534err:
20d29d7a
AB
535 if (dev)
536 dev_put(dev);
537
40b8fe45 538 rtnl_unlock();
20d29d7a
AB
539 return err;
540}
541
635b8c8e 542static int tap_release(struct inode *inode, struct file *file)
20d29d7a 543{
635b8c8e
SG
544 struct tap_queue *q = file->private_data;
545 tap_put_queue(q);
20d29d7a
AB
546 return 0;
547}
548
635b8c8e 549static unsigned int tap_poll(struct file *file, poll_table *wait)
20d29d7a 550{
635b8c8e 551 struct tap_queue *q = file->private_data;
20d29d7a
AB
552 unsigned int mask = POLLERR;
553
554 if (!q)
555 goto out;
556
557 mask = 0;
43815482 558 poll_wait(file, &q->wq.wait, wait);
20d29d7a 559
362899b8 560 if (!skb_array_empty(&q->skb_array))
20d29d7a
AB
561 mask |= POLLIN | POLLRDNORM;
562
563 if (sock_writeable(&q->sk) ||
9cd3e072 564 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
20d29d7a
AB
565 sock_writeable(&q->sk)))
566 mask |= POLLOUT | POLLWRNORM;
567
568out:
20d29d7a
AB
569 return mask;
570}
571
635b8c8e
SG
572static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad,
573 size_t len, size_t linear,
b9fb9ee0
AB
574 int noblock, int *err)
575{
576 struct sk_buff *skb;
577
578 /* Under a page? Don't bother with paged skb. */
579 if (prepad + len < PAGE_SIZE || !linear)
580 linear = len;
581
582 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
28d64271 583 err, 0);
b9fb9ee0
AB
584 if (!skb)
585 return NULL;
586
587 skb_reserve(skb, prepad);
588 skb_put(skb, linear);
589 skb->data_len = len - linear;
590 skb->len += len - linear;
591
592 return skb;
593}
594
2f1d8b9e 595/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
635b8c8e 596#define TAP_RESERVE HH_DATA_OFF(ETH_HLEN)
2f1d8b9e 597
20d29d7a 598/* Get packet from user space buffer */
635b8c8e
SG
599static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m,
600 struct iov_iter *from, int noblock)
20d29d7a 601{
635b8c8e 602 int good_linear = SKB_MAX_HEAD(TAP_RESERVE);
20d29d7a 603 struct sk_buff *skb;
02df55d2 604 struct macvlan_dev *vlan;
f5ff53b4 605 unsigned long total_len = iov_iter_count(from);
97bc3633 606 unsigned long len = total_len;
20d29d7a 607 int err;
b9fb9ee0
AB
608 struct virtio_net_hdr vnet_hdr = { 0 };
609 int vnet_hdr_len = 0;
b92946e2 610 int copylen = 0;
c5c62f1b 611 int depth;
97bc3633 612 bool zerocopy = false;
61d46bf9 613 size_t linear;
b9fb9ee0
AB
614
615 if (q->flags & IFF_VNET_HDR) {
837585a5 616 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
b9fb9ee0
AB
617
618 err = -EINVAL;
ce3c8692 619 if (len < vnet_hdr_len)
b9fb9ee0 620 goto err;
ce3c8692 621 len -= vnet_hdr_len;
b9fb9ee0 622
f5ff53b4 623 err = -EFAULT;
cbbd26b8 624 if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from))
b9fb9ee0 625 goto err;
f5ff53b4 626 iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
b9fb9ee0 627 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
635b8c8e
SG
628 tap16_to_cpu(q, vnet_hdr.csum_start) +
629 tap16_to_cpu(q, vnet_hdr.csum_offset) + 2 >
630 tap16_to_cpu(q, vnet_hdr.hdr_len))
631 vnet_hdr.hdr_len = cpu_to_tap16(q,
632 tap16_to_cpu(q, vnet_hdr.csum_start) +
633 tap16_to_cpu(q, vnet_hdr.csum_offset) + 2);
b9fb9ee0 634 err = -EINVAL;
635b8c8e 635 if (tap16_to_cpu(q, vnet_hdr.hdr_len) > len)
b9fb9ee0
AB
636 goto err;
637 }
20d29d7a 638
b9fb9ee0 639 err = -EINVAL;
20d29d7a 640 if (unlikely(len < ETH_HLEN))
b9fb9ee0 641 goto err;
20d29d7a 642
ece793fc 643 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
f5ff53b4
AV
644 struct iov_iter i;
645
6ae7feb3 646 copylen = vnet_hdr.hdr_len ?
635b8c8e 647 tap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
16a3fa28
JW
648 if (copylen > good_linear)
649 copylen = good_linear;
8e2ad411
WB
650 else if (copylen < ETH_HLEN)
651 copylen = ETH_HLEN;
61d46bf9 652 linear = copylen;
f5ff53b4
AV
653 i = *from;
654 iov_iter_advance(&i, copylen);
655 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
ece793fc
JW
656 zerocopy = true;
657 }
658
659 if (!zerocopy) {
97bc3633 660 copylen = len;
635b8c8e 661 linear = tap16_to_cpu(q, vnet_hdr.hdr_len);
8e2ad411 662 if (linear > good_linear)
16a3fa28 663 linear = good_linear;
8e2ad411
WB
664 else if (linear < ETH_HLEN)
665 linear = ETH_HLEN;
61d46bf9 666 }
97bc3633 667
635b8c8e
SG
668 skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen,
669 linear, noblock, &err);
02df55d2
AB
670 if (!skb)
671 goto err;
20d29d7a 672
01d6657b 673 if (zerocopy)
f5ff53b4 674 err = zerocopy_sg_from_iter(skb, from);
aa196eed 675 else
f5ff53b4 676 err = skb_copy_datagram_from_iter(skb, 0, from, len);
ece793fc 677
02df55d2 678 if (err)
b9fb9ee0 679 goto err_kfree;
20d29d7a
AB
680
681 skb_set_network_header(skb, ETH_HLEN);
b9fb9ee0
AB
682 skb_reset_mac_header(skb);
683 skb->protocol = eth_hdr(skb)->h_proto;
684
685 if (vnet_hdr_len) {
fd88d68b 686 err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
635b8c8e 687 tap_is_little_endian(q));
b9fb9ee0
AB
688 if (err)
689 goto err_kfree;
690 }
691
40893fd0 692 skb_probe_transport_header(skb, ETH_HLEN);
9b4d669b 693
c5c62f1b
IV
694 /* Move network header to the right position for VLAN tagged packets */
695 if ((skb->protocol == htons(ETH_P_8021Q) ||
696 skb->protocol == htons(ETH_P_8021AD)) &&
697 __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
698 skb_set_network_header(skb, depth);
699
ac4e4af1
VY
700 rcu_read_lock();
701 vlan = rcu_dereference(q->vlan);
97bc3633 702 /* copy skb_ubuf_info for callback when skb has no error */
01d6657b 703 if (zerocopy) {
97bc3633 704 skb_shinfo(skb)->destructor_arg = m->msg_control;
01d6657b 705 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
c9af6db4 706 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
aa196eed
JW
707 } else if (m && m->msg_control) {
708 struct ubuf_info *uarg = m->msg_control;
709 uarg->callback(uarg, false);
01d6657b 710 }
aa196eed 711
29d79196 712 if (vlan) {
6acf54f1
VY
713 skb->dev = vlan->dev;
714 dev_queue_xmit(skb);
29d79196 715 } else {
02df55d2 716 kfree_skb(skb);
29d79196 717 }
ac4e4af1 718 rcu_read_unlock();
20d29d7a 719
97bc3633 720 return total_len;
02df55d2 721
b9fb9ee0
AB
722err_kfree:
723 kfree_skb(skb);
724
02df55d2 725err:
ac4e4af1
VY
726 rcu_read_lock();
727 vlan = rcu_dereference(q->vlan);
02df55d2 728 if (vlan)
cd3e22b7 729 this_cpu_inc(vlan->pcpu_stats->tx_dropped);
ac4e4af1 730 rcu_read_unlock();
02df55d2 731
02df55d2 732 return err;
20d29d7a
AB
733}
734
635b8c8e 735static ssize_t tap_write_iter(struct kiocb *iocb, struct iov_iter *from)
20d29d7a
AB
736{
737 struct file *file = iocb->ki_filp;
635b8c8e 738 struct tap_queue *q = file->private_data;
20d29d7a 739
635b8c8e 740 return tap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK);
20d29d7a
AB
741}
742
743/* Put packet to the user space buffer */
635b8c8e
SG
744static ssize_t tap_put_user(struct tap_queue *q,
745 const struct sk_buff *skb,
746 struct iov_iter *iter)
20d29d7a 747{
20d29d7a 748 int ret;
b9fb9ee0 749 int vnet_hdr_len = 0;
f09e2249 750 int vlan_offset = 0;
6c36d2e2 751 int total;
b9fb9ee0
AB
752
753 if (q->flags & IFF_VNET_HDR) {
754 struct virtio_net_hdr vnet_hdr;
837585a5 755 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
6c36d2e2 756 if (iov_iter_count(iter) < vnet_hdr_len)
b9fb9ee0
AB
757 return -EINVAL;
758
3e9e40e7 759 if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
635b8c8e 760 tap_is_little_endian(q), true))
fd88d68b 761 BUG();
b9fb9ee0 762
6c36d2e2
HX
763 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
764 sizeof(vnet_hdr))
b9fb9ee0 765 return -EFAULT;
7cc76f51
JW
766
767 iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
b9fb9ee0 768 }
6c36d2e2 769 total = vnet_hdr_len;
ce232ce0 770 total += skb->len;
f09e2249 771
df8a39de 772 if (skb_vlan_tag_present(skb)) {
f09e2249
BG
773 struct {
774 __be16 h_vlan_proto;
775 __be16 h_vlan_TCI;
776 } veth;
0fbe0d47 777 veth.h_vlan_proto = skb->vlan_proto;
df8a39de 778 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
f09e2249
BG
779
780 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
ce232ce0 781 total += VLAN_HLEN;
f09e2249 782
6c36d2e2
HX
783 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
784 if (ret || !iov_iter_count(iter))
f09e2249
BG
785 goto done;
786
6c36d2e2
HX
787 ret = copy_to_iter(&veth, sizeof(veth), iter);
788 if (ret != sizeof(veth) || !iov_iter_count(iter))
f09e2249
BG
789 goto done;
790 }
20d29d7a 791
6c36d2e2
HX
792 ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
793 skb->len - vlan_offset);
20d29d7a 794
f09e2249 795done:
ce232ce0 796 return ret ? ret : total;
20d29d7a
AB
797}
798
635b8c8e
SG
799static ssize_t tap_do_read(struct tap_queue *q,
800 struct iov_iter *to,
801 int noblock)
20d29d7a 802{
ccf7e72b 803 DEFINE_WAIT(wait);
20d29d7a 804 struct sk_buff *skb;
501c774c 805 ssize_t ret = 0;
20d29d7a 806
3af0bfe5
AV
807 if (!iov_iter_count(to))
808 return 0;
809
810 while (1) {
89cee917
JW
811 if (!noblock)
812 prepare_to_wait(sk_sleep(&q->sk), &wait,
813 TASK_INTERRUPTIBLE);
20d29d7a
AB
814
815 /* Read frames from the queue */
362899b8 816 skb = skb_array_consume(&q->skb_array);
3af0bfe5
AV
817 if (skb)
818 break;
819 if (noblock) {
820 ret = -EAGAIN;
821 break;
20d29d7a 822 }
3af0bfe5
AV
823 if (signal_pending(current)) {
824 ret = -ERESTARTSYS;
825 break;
826 }
827 /* Nothing to read, let's sleep */
828 schedule();
829 }
a499a2e9
VY
830 if (!noblock)
831 finish_wait(sk_sleep(&q->sk), &wait);
832
3af0bfe5 833 if (skb) {
635b8c8e 834 ret = tap_put_user(q, skb, to);
f51a5e82
JW
835 if (unlikely(ret < 0))
836 kfree_skb(skb);
837 else
838 consume_skb(skb);
20d29d7a 839 }
501c774c
AB
840 return ret;
841}
842
635b8c8e 843static ssize_t tap_read_iter(struct kiocb *iocb, struct iov_iter *to)
501c774c
AB
844{
845 struct file *file = iocb->ki_filp;
635b8c8e 846 struct tap_queue *q = file->private_data;
3af0bfe5 847 ssize_t len = iov_iter_count(to), ret;
20d29d7a 848
635b8c8e 849 ret = tap_do_read(q, to, file->f_flags & O_NONBLOCK);
ce232ce0 850 ret = min_t(ssize_t, ret, len);
e6ebc7f1
ZYW
851 if (ret > 0)
852 iocb->ki_pos = ret;
20d29d7a
AB
853 return ret;
854}
855
635b8c8e 856static struct macvlan_dev *tap_get_vlan(struct tap_queue *q)
8f475a31
JW
857{
858 struct macvlan_dev *vlan;
859
441ac0fc
VY
860 ASSERT_RTNL();
861 vlan = rtnl_dereference(q->vlan);
8f475a31
JW
862 if (vlan)
863 dev_hold(vlan->dev);
8f475a31
JW
864
865 return vlan;
866}
867
635b8c8e 868static void tap_put_vlan(struct macvlan_dev *vlan)
8f475a31
JW
869{
870 dev_put(vlan->dev);
871}
872
635b8c8e 873static int tap_ioctl_set_queue(struct file *file, unsigned int flags)
815f236d 874{
635b8c8e 875 struct tap_queue *q = file->private_data;
815f236d
JW
876 struct macvlan_dev *vlan;
877 int ret;
878
635b8c8e 879 vlan = tap_get_vlan(q);
815f236d
JW
880 if (!vlan)
881 return -EINVAL;
882
883 if (flags & IFF_ATTACH_QUEUE)
635b8c8e 884 ret = tap_enable_queue(vlan->dev, file, q);
815f236d 885 else if (flags & IFF_DETACH_QUEUE)
635b8c8e 886 ret = tap_disable_queue(q);
f57855a5
JW
887 else
888 ret = -EINVAL;
815f236d 889
635b8c8e 890 tap_put_vlan(vlan);
815f236d
JW
891 return ret;
892}
893
635b8c8e 894static int set_offload(struct tap_queue *q, unsigned long arg)
2be5c767
VY
895{
896 struct macvlan_dev *vlan;
897 netdev_features_t features;
898 netdev_features_t feature_mask = 0;
899
900 vlan = rtnl_dereference(q->vlan);
901 if (!vlan)
902 return -ENOLINK;
903
904 features = vlan->dev->features;
905
906 if (arg & TUN_F_CSUM) {
907 feature_mask = NETIF_F_HW_CSUM;
908
909 if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
910 if (arg & TUN_F_TSO_ECN)
911 feature_mask |= NETIF_F_TSO_ECN;
912 if (arg & TUN_F_TSO4)
913 feature_mask |= NETIF_F_TSO;
914 if (arg & TUN_F_TSO6)
915 feature_mask |= NETIF_F_TSO6;
916 }
e3e3c423
VY
917
918 if (arg & TUN_F_UFO)
919 feature_mask |= NETIF_F_UFO;
2be5c767
VY
920 }
921
922 /* tun/tap driver inverts the usage for TSO offloads, where
923 * setting the TSO bit means that the userspace wants to
924 * accept TSO frames and turning it off means that user space
925 * does not support TSO.
635b8c8e 926 * For tap, we have to invert it to mean the same thing.
2be5c767
VY
927 * When user space turns off TSO, we turn off GSO/LRO so that
928 * user-space will not receive TSO frames.
929 */
e3e3c423 930 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
2be5c767
VY
931 features |= RX_OFFLOADS;
932 else
933 features &= ~RX_OFFLOADS;
934
935 /* tap_features are the same as features on tun/tap and
936 * reflect user expectations.
937 */
a567dd62 938 vlan->tap_features = feature_mask;
2be5c767
VY
939 vlan->set_features = features;
940 netdev_update_features(vlan->dev);
941
942 return 0;
943}
944
20d29d7a
AB
945/*
946 * provide compatibility with generic tun/tap interface
947 */
635b8c8e
SG
948static long tap_ioctl(struct file *file, unsigned int cmd,
949 unsigned long arg)
20d29d7a 950{
635b8c8e 951 struct tap_queue *q = file->private_data;
02df55d2 952 struct macvlan_dev *vlan;
20d29d7a
AB
953 void __user *argp = (void __user *)arg;
954 struct ifreq __user *ifr = argp;
955 unsigned int __user *up = argp;
39ec7de7 956 unsigned short u;
55afbd08 957 int __user *sp = argp;
7f460d30 958 struct sockaddr sa;
55afbd08 959 int s;
02df55d2 960 int ret;
20d29d7a
AB
961
962 switch (cmd) {
963 case TUNSETIFF:
964 /* ignore the name, just look at flags */
965 if (get_user(u, &ifr->ifr_flags))
966 return -EFAULT;
b9fb9ee0
AB
967
968 ret = 0;
635b8c8e 969 if ((u & ~TAP_IFFEATURES) != (IFF_NO_PI | IFF_TAP))
b9fb9ee0
AB
970 ret = -EINVAL;
971 else
635b8c8e 972 q->flags = (q->flags & ~TAP_IFFEATURES) | u;
b9fb9ee0
AB
973
974 return ret;
20d29d7a
AB
975
976 case TUNGETIFF:
441ac0fc 977 rtnl_lock();
635b8c8e 978 vlan = tap_get_vlan(q);
441ac0fc
VY
979 if (!vlan) {
980 rtnl_unlock();
20d29d7a 981 return -ENOLINK;
441ac0fc 982 }
20d29d7a 983
02df55d2 984 ret = 0;
39ec7de7 985 u = q->flags;
13707f9e 986 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
39ec7de7 987 put_user(u, &ifr->ifr_flags))
02df55d2 988 ret = -EFAULT;
635b8c8e 989 tap_put_vlan(vlan);
441ac0fc 990 rtnl_unlock();
02df55d2 991 return ret;
20d29d7a 992
815f236d
JW
993 case TUNSETQUEUE:
994 if (get_user(u, &ifr->ifr_flags))
995 return -EFAULT;
441ac0fc 996 rtnl_lock();
635b8c8e 997 ret = tap_ioctl_set_queue(file, u);
441ac0fc 998 rtnl_unlock();
82a19eb8 999 return ret;
815f236d 1000
20d29d7a 1001 case TUNGETFEATURES:
635b8c8e 1002 if (put_user(IFF_TAP | IFF_NO_PI | TAP_IFFEATURES, up))
20d29d7a
AB
1003 return -EFAULT;
1004 return 0;
1005
1006 case TUNSETSNDBUF:
3ea79249 1007 if (get_user(s, sp))
20d29d7a
AB
1008 return -EFAULT;
1009
3ea79249 1010 q->sk.sk_sndbuf = s;
20d29d7a
AB
1011 return 0;
1012
55afbd08
MT
1013 case TUNGETVNETHDRSZ:
1014 s = q->vnet_hdr_sz;
1015 if (put_user(s, sp))
1016 return -EFAULT;
1017 return 0;
1018
1019 case TUNSETVNETHDRSZ:
1020 if (get_user(s, sp))
1021 return -EFAULT;
1022 if (s < (int)sizeof(struct virtio_net_hdr))
1023 return -EINVAL;
1024
1025 q->vnet_hdr_sz = s;
1026 return 0;
1027
01b07fb3 1028 case TUNGETVNETLE:
635b8c8e 1029 s = !!(q->flags & TAP_VNET_LE);
01b07fb3
MT
1030 if (put_user(s, sp))
1031 return -EFAULT;
1032 return 0;
1033
1034 case TUNSETVNETLE:
1035 if (get_user(s, sp))
1036 return -EFAULT;
1037 if (s)
635b8c8e 1038 q->flags |= TAP_VNET_LE;
01b07fb3 1039 else
635b8c8e 1040 q->flags &= ~TAP_VNET_LE;
01b07fb3
MT
1041 return 0;
1042
8b8e658b 1043 case TUNGETVNETBE:
635b8c8e 1044 return tap_get_vnet_be(q, sp);
8b8e658b
GK
1045
1046 case TUNSETVNETBE:
635b8c8e 1047 return tap_set_vnet_be(q, sp);
8b8e658b 1048
20d29d7a
AB
1049 case TUNSETOFFLOAD:
1050 /* let the user check for future flags */
1051 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
e3e3c423 1052 TUN_F_TSO_ECN | TUN_F_UFO))
20d29d7a
AB
1053 return -EINVAL;
1054
2be5c767
VY
1055 rtnl_lock();
1056 ret = set_offload(q, arg);
1057 rtnl_unlock();
1058 return ret;
20d29d7a 1059
b5082083
JC
1060 case SIOCGIFHWADDR:
1061 rtnl_lock();
635b8c8e 1062 vlan = tap_get_vlan(q);
b5082083
JC
1063 if (!vlan) {
1064 rtnl_unlock();
1065 return -ENOLINK;
1066 }
1067 ret = 0;
1068 u = vlan->dev->type;
1069 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
1070 copy_to_user(&ifr->ifr_hwaddr.sa_data, vlan->dev->dev_addr, ETH_ALEN) ||
1071 put_user(u, &ifr->ifr_hwaddr.sa_family))
1072 ret = -EFAULT;
635b8c8e 1073 tap_put_vlan(vlan);
b5082083
JC
1074 rtnl_unlock();
1075 return ret;
1076
1077 case SIOCSIFHWADDR:
7f460d30
JC
1078 if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
1079 return -EFAULT;
b5082083 1080 rtnl_lock();
635b8c8e 1081 vlan = tap_get_vlan(q);
b5082083
JC
1082 if (!vlan) {
1083 rtnl_unlock();
1084 return -ENOLINK;
1085 }
7f460d30 1086 ret = dev_set_mac_address(vlan->dev, &sa);
635b8c8e 1087 tap_put_vlan(vlan);
b5082083
JC
1088 rtnl_unlock();
1089 return ret;
1090
20d29d7a
AB
1091 default:
1092 return -EINVAL;
1093 }
1094}
1095
1096#ifdef CONFIG_COMPAT
635b8c8e
SG
1097static long tap_compat_ioctl(struct file *file, unsigned int cmd,
1098 unsigned long arg)
20d29d7a 1099{
635b8c8e 1100 return tap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
20d29d7a
AB
1101}
1102#endif
1103
635b8c8e 1104const struct file_operations tap_fops = {
20d29d7a 1105 .owner = THIS_MODULE,
635b8c8e
SG
1106 .open = tap_open,
1107 .release = tap_release,
1108 .read_iter = tap_read_iter,
1109 .write_iter = tap_write_iter,
1110 .poll = tap_poll,
20d29d7a 1111 .llseek = no_llseek,
635b8c8e 1112 .unlocked_ioctl = tap_ioctl,
20d29d7a 1113#ifdef CONFIG_COMPAT
635b8c8e 1114 .compat_ioctl = tap_compat_ioctl,
20d29d7a
AB
1115#endif
1116};
1117
635b8c8e
SG
1118static int tap_sendmsg(struct socket *sock, struct msghdr *m,
1119 size_t total_len)
501c774c 1120{
635b8c8e
SG
1121 struct tap_queue *q = container_of(sock, struct tap_queue, sock);
1122 return tap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT);
501c774c
AB
1123}
1124
635b8c8e
SG
1125static int tap_recvmsg(struct socket *sock, struct msghdr *m,
1126 size_t total_len, int flags)
501c774c 1127{
635b8c8e 1128 struct tap_queue *q = container_of(sock, struct tap_queue, sock);
501c774c
AB
1129 int ret;
1130 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
1131 return -EINVAL;
635b8c8e 1132 ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT);
de2aa476
DM
1133 if (ret > total_len) {
1134 m->msg_flags |= MSG_TRUNC;
1135 ret = flags & MSG_TRUNC ? ret : total_len;
1136 }
501c774c
AB
1137 return ret;
1138}
1139
635b8c8e 1140static int tap_peek_len(struct socket *sock)
362899b8 1141{
635b8c8e 1142 struct tap_queue *q = container_of(sock, struct tap_queue,
362899b8
JW
1143 sock);
1144 return skb_array_peek_len(&q->skb_array);
1145}
1146
501c774c 1147/* Ops structure to mimic raw sockets with tun */
635b8c8e
SG
1148static const struct proto_ops tap_socket_ops = {
1149 .sendmsg = tap_sendmsg,
1150 .recvmsg = tap_recvmsg,
1151 .peek_len = tap_peek_len,
501c774c
AB
1152};
1153
1154/* Get an underlying socket object from tun file. Returns error unless file is
1155 * attached to a device. The returned object works like a packet socket, it
1156 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
1157 * holding a reference to the file for as long as the socket is in use. */
635b8c8e 1158struct socket *tap_get_socket(struct file *file)
501c774c 1159{
635b8c8e
SG
1160 struct tap_queue *q;
1161 if (file->f_op != &tap_fops)
501c774c
AB
1162 return ERR_PTR(-EINVAL);
1163 q = file->private_data;
1164 if (!q)
1165 return ERR_PTR(-EBADFD);
1166 return &q->sock;
1167}
635b8c8e 1168EXPORT_SYMBOL_GPL(tap_get_socket);
501c774c 1169
635b8c8e 1170int tap_queue_resize(struct macvlan_dev *vlan)
362899b8
JW
1171{
1172 struct net_device *dev = vlan->dev;
635b8c8e 1173 struct tap_queue *q;
362899b8
JW
1174 struct skb_array **arrays;
1175 int n = vlan->numqueues;
1176 int ret, i = 0;
1177
1178 arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL);
1179 if (!arrays)
1180 return -ENOMEM;
1181
1182 list_for_each_entry(q, &vlan->queue_list, next)
1183 arrays[i++] = &q->skb_array;
1184
1185 ret = skb_array_resize_multiple(arrays, n,
1186 dev->tx_queue_len, GFP_KERNEL);
1187
1188 kfree(arrays);
1189 return ret;
1190}
ebc05ba7
SG
1191
1192int tap_create_cdev(struct cdev *tap_cdev,
1193 dev_t *tap_major, const char *device_name)
1194{
1195 int err;
1196
1197 err = alloc_chrdev_region(tap_major, 0, TAP_NUM_DEVS, device_name);
1198 if (err)
1199 goto out1;
1200
1201 cdev_init(tap_cdev, &tap_fops);
1202 err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS);
1203 if (err)
1204 goto out2;
1205
1206 macvtap_major.major = MAJOR(*tap_major);
1207
1208 idr_init(&macvtap_major.minor_idr);
1209 mutex_init(&macvtap_major.minor_lock);
1210
1211 macvtap_major.device_name = device_name;
1212
1213 return 0;
1214
1215out2:
1216 unregister_chrdev_region(*tap_major, TAP_NUM_DEVS);
1217out1:
1218 return err;
1219}
1220
1221void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev)
1222{
1223 cdev_del(tap_cdev);
1224 unregister_chrdev_region(major, TAP_NUM_DEVS);
1225 idr_destroy(&macvtap_major.minor_idr);
1226}