]>
Commit | Line | Data |
---|---|---|
1 | #include <linux/etherdevice.h> | |
2 | #include <linux/if_macvlan.h> | |
3 | #include <linux/if_vlan.h> | |
4 | #include <linux/interrupt.h> | |
5 | #include <linux/nsproxy.h> | |
6 | #include <linux/compat.h> | |
7 | #include <linux/if_tun.h> | |
8 | #include <linux/module.h> | |
9 | #include <linux/skbuff.h> | |
10 | #include <linux/cache.h> | |
11 | #include <linux/sched.h> | |
12 | #include <linux/types.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/wait.h> | |
15 | #include <linux/cdev.h> | |
16 | #include <linux/idr.h> | |
17 | #include <linux/fs.h> | |
18 | #include <linux/uio.h> | |
19 | ||
20 | #include <net/ipv6.h> | |
21 | #include <net/net_namespace.h> | |
22 | #include <net/rtnetlink.h> | |
23 | #include <net/sock.h> | |
24 | #include <linux/virtio_net.h> | |
25 | ||
26 | /* | |
27 | * A macvtap queue is the central object of this driver, it connects | |
28 | * an open character device to a macvlan interface. There can be | |
29 | * multiple queues on one interface, which map back to queues | |
30 | * implemented in hardware on the underlying device. | |
31 | * | |
32 | * macvtap_proto is used to allocate queues through the sock allocation | |
33 | * mechanism. | |
34 | * | |
35 | */ | |
36 | struct macvtap_queue { | |
37 | struct sock sk; | |
38 | struct socket sock; | |
39 | struct socket_wq wq; | |
40 | int vnet_hdr_sz; | |
41 | struct macvlan_dev __rcu *vlan; | |
42 | struct file *file; | |
43 | unsigned int flags; | |
44 | u16 queue_index; | |
45 | bool enabled; | |
46 | struct list_head next; | |
47 | }; | |
48 | ||
49 | static struct proto macvtap_proto = { | |
50 | .name = "macvtap", | |
51 | .owner = THIS_MODULE, | |
52 | .obj_size = sizeof (struct macvtap_queue), | |
53 | }; | |
54 | ||
55 | /* | |
56 | * Variables for dealing with macvtaps device numbers. | |
57 | */ | |
58 | static dev_t macvtap_major; | |
59 | #define MACVTAP_NUM_DEVS (1U << MINORBITS) | |
60 | static DEFINE_MUTEX(minor_lock); | |
61 | static DEFINE_IDR(minor_idr); | |
62 | ||
63 | #define GOODCOPY_LEN 128 | |
64 | static struct class *macvtap_class; | |
65 | static struct cdev macvtap_cdev; | |
66 | ||
67 | static const struct proto_ops macvtap_socket_ops; | |
68 | ||
69 | #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ | |
70 | NETIF_F_TSO6) | |
71 | #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) | |
72 | #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) | |
73 | ||
74 | static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev) | |
75 | { | |
76 | return rcu_dereference(dev->rx_handler_data); | |
77 | } | |
78 | ||
79 | /* | |
80 | * RCU usage: | |
81 | * The macvtap_queue and the macvlan_dev are loosely coupled, the | |
82 | * pointers from one to the other can only be read while rcu_read_lock | |
83 | * or rtnl is held. | |
84 | * | |
85 | * Both the file and the macvlan_dev hold a reference on the macvtap_queue | |
86 | * through sock_hold(&q->sk). When the macvlan_dev goes away first, | |
87 | * q->vlan becomes inaccessible. When the files gets closed, | |
88 | * macvtap_get_queue() fails. | |
89 | * | |
90 | * There may still be references to the struct sock inside of the | |
91 | * queue from outbound SKBs, but these never reference back to the | |
92 | * file or the dev. The data structure is freed through __sk_free | |
93 | * when both our references and any pending SKBs are gone. | |
94 | */ | |
95 | ||
96 | static int macvtap_enable_queue(struct net_device *dev, struct file *file, | |
97 | struct macvtap_queue *q) | |
98 | { | |
99 | struct macvlan_dev *vlan = netdev_priv(dev); | |
100 | int err = -EINVAL; | |
101 | ||
102 | ASSERT_RTNL(); | |
103 | ||
104 | if (q->enabled) | |
105 | goto out; | |
106 | ||
107 | err = 0; | |
108 | rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); | |
109 | q->queue_index = vlan->numvtaps; | |
110 | q->enabled = true; | |
111 | ||
112 | vlan->numvtaps++; | |
113 | out: | |
114 | return err; | |
115 | } | |
116 | ||
117 | /* Requires RTNL */ | |
118 | static int macvtap_set_queue(struct net_device *dev, struct file *file, | |
119 | struct macvtap_queue *q) | |
120 | { | |
121 | struct macvlan_dev *vlan = netdev_priv(dev); | |
122 | ||
123 | if (vlan->numqueues == MAX_MACVTAP_QUEUES) | |
124 | return -EBUSY; | |
125 | ||
126 | rcu_assign_pointer(q->vlan, vlan); | |
127 | rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); | |
128 | sock_hold(&q->sk); | |
129 | ||
130 | q->file = file; | |
131 | q->queue_index = vlan->numvtaps; | |
132 | q->enabled = true; | |
133 | file->private_data = q; | |
134 | list_add_tail(&q->next, &vlan->queue_list); | |
135 | ||
136 | vlan->numvtaps++; | |
137 | vlan->numqueues++; | |
138 | ||
139 | return 0; | |
140 | } | |
141 | ||
142 | static int macvtap_disable_queue(struct macvtap_queue *q) | |
143 | { | |
144 | struct macvlan_dev *vlan; | |
145 | struct macvtap_queue *nq; | |
146 | ||
147 | ASSERT_RTNL(); | |
148 | if (!q->enabled) | |
149 | return -EINVAL; | |
150 | ||
151 | vlan = rtnl_dereference(q->vlan); | |
152 | ||
153 | if (vlan) { | |
154 | int index = q->queue_index; | |
155 | BUG_ON(index >= vlan->numvtaps); | |
156 | nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]); | |
157 | nq->queue_index = index; | |
158 | ||
159 | rcu_assign_pointer(vlan->taps[index], nq); | |
160 | RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL); | |
161 | q->enabled = false; | |
162 | ||
163 | vlan->numvtaps--; | |
164 | } | |
165 | ||
166 | return 0; | |
167 | } | |
168 | ||
169 | /* | |
170 | * The file owning the queue got closed, give up both | |
171 | * the reference that the files holds as well as the | |
172 | * one from the macvlan_dev if that still exists. | |
173 | * | |
174 | * Using the spinlock makes sure that we don't get | |
175 | * to the queue again after destroying it. | |
176 | */ | |
177 | static void macvtap_put_queue(struct macvtap_queue *q) | |
178 | { | |
179 | struct macvlan_dev *vlan; | |
180 | ||
181 | rtnl_lock(); | |
182 | vlan = rtnl_dereference(q->vlan); | |
183 | ||
184 | if (vlan) { | |
185 | if (q->enabled) | |
186 | BUG_ON(macvtap_disable_queue(q)); | |
187 | ||
188 | vlan->numqueues--; | |
189 | RCU_INIT_POINTER(q->vlan, NULL); | |
190 | sock_put(&q->sk); | |
191 | list_del_init(&q->next); | |
192 | } | |
193 | ||
194 | rtnl_unlock(); | |
195 | ||
196 | synchronize_rcu(); | |
197 | sock_put(&q->sk); | |
198 | } | |
199 | ||
200 | /* | |
201 | * Select a queue based on the rxq of the device on which this packet | |
202 | * arrived. If the incoming device is not mq, calculate a flow hash | |
203 | * to select a queue. If all fails, find the first available queue. | |
204 | * Cache vlan->numvtaps since it can become zero during the execution | |
205 | * of this function. | |
206 | */ | |
207 | static struct macvtap_queue *macvtap_get_queue(struct net_device *dev, | |
208 | struct sk_buff *skb) | |
209 | { | |
210 | struct macvlan_dev *vlan = netdev_priv(dev); | |
211 | struct macvtap_queue *tap = NULL; | |
212 | /* Access to taps array is protected by rcu, but access to numvtaps | |
213 | * isn't. Below we use it to lookup a queue, but treat it as a hint | |
214 | * and validate that the result isn't NULL - in case we are | |
215 | * racing against queue removal. | |
216 | */ | |
217 | int numvtaps = ACCESS_ONCE(vlan->numvtaps); | |
218 | __u32 rxq; | |
219 | ||
220 | if (!numvtaps) | |
221 | goto out; | |
222 | ||
223 | /* Check if we can use flow to select a queue */ | |
224 | rxq = skb_get_hash(skb); | |
225 | if (rxq) { | |
226 | tap = rcu_dereference(vlan->taps[rxq % numvtaps]); | |
227 | goto out; | |
228 | } | |
229 | ||
230 | if (likely(skb_rx_queue_recorded(skb))) { | |
231 | rxq = skb_get_rx_queue(skb); | |
232 | ||
233 | while (unlikely(rxq >= numvtaps)) | |
234 | rxq -= numvtaps; | |
235 | ||
236 | tap = rcu_dereference(vlan->taps[rxq]); | |
237 | goto out; | |
238 | } | |
239 | ||
240 | tap = rcu_dereference(vlan->taps[0]); | |
241 | out: | |
242 | return tap; | |
243 | } | |
244 | ||
245 | /* | |
246 | * The net_device is going away, give up the reference | |
247 | * that it holds on all queues and safely set the pointer | |
248 | * from the queues to NULL. | |
249 | */ | |
250 | static void macvtap_del_queues(struct net_device *dev) | |
251 | { | |
252 | struct macvlan_dev *vlan = netdev_priv(dev); | |
253 | struct macvtap_queue *q, *tmp, *qlist[MAX_MACVTAP_QUEUES]; | |
254 | int i, j = 0; | |
255 | ||
256 | ASSERT_RTNL(); | |
257 | list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) { | |
258 | list_del_init(&q->next); | |
259 | qlist[j++] = q; | |
260 | RCU_INIT_POINTER(q->vlan, NULL); | |
261 | if (q->enabled) | |
262 | vlan->numvtaps--; | |
263 | vlan->numqueues--; | |
264 | } | |
265 | for (i = 0; i < vlan->numvtaps; i++) | |
266 | RCU_INIT_POINTER(vlan->taps[i], NULL); | |
267 | BUG_ON(vlan->numvtaps); | |
268 | BUG_ON(vlan->numqueues); | |
269 | /* guarantee that any future macvtap_set_queue will fail */ | |
270 | vlan->numvtaps = MAX_MACVTAP_QUEUES; | |
271 | ||
272 | for (--j; j >= 0; j--) | |
273 | sock_put(&qlist[j]->sk); | |
274 | } | |
275 | ||
276 | static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb) | |
277 | { | |
278 | struct sk_buff *skb = *pskb; | |
279 | struct net_device *dev = skb->dev; | |
280 | struct macvlan_dev *vlan; | |
281 | struct macvtap_queue *q; | |
282 | netdev_features_t features = TAP_FEATURES; | |
283 | ||
284 | vlan = macvtap_get_vlan_rcu(dev); | |
285 | if (!vlan) | |
286 | return RX_HANDLER_PASS; | |
287 | ||
288 | q = macvtap_get_queue(dev, skb); | |
289 | if (!q) | |
290 | return RX_HANDLER_PASS; | |
291 | ||
292 | if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len) | |
293 | goto drop; | |
294 | ||
295 | skb_push(skb, ETH_HLEN); | |
296 | ||
297 | /* Apply the forward feature mask so that we perform segmentation | |
298 | * according to users wishes. This only works if VNET_HDR is | |
299 | * enabled. | |
300 | */ | |
301 | if (q->flags & IFF_VNET_HDR) | |
302 | features |= vlan->tap_features; | |
303 | if (netif_needs_gso(dev, skb, features)) { | |
304 | struct sk_buff *segs = __skb_gso_segment(skb, features, false); | |
305 | ||
306 | if (IS_ERR(segs)) | |
307 | goto drop; | |
308 | ||
309 | if (!segs) { | |
310 | skb_queue_tail(&q->sk.sk_receive_queue, skb); | |
311 | goto wake_up; | |
312 | } | |
313 | ||
314 | kfree_skb(skb); | |
315 | while (segs) { | |
316 | struct sk_buff *nskb = segs->next; | |
317 | ||
318 | segs->next = NULL; | |
319 | skb_queue_tail(&q->sk.sk_receive_queue, segs); | |
320 | segs = nskb; | |
321 | } | |
322 | } else { | |
323 | /* If we receive a partial checksum and the tap side | |
324 | * doesn't support checksum offload, compute the checksum. | |
325 | * Note: it doesn't matter which checksum feature to | |
326 | * check, we either support them all or none. | |
327 | */ | |
328 | if (skb->ip_summed == CHECKSUM_PARTIAL && | |
329 | !(features & NETIF_F_ALL_CSUM) && | |
330 | skb_checksum_help(skb)) | |
331 | goto drop; | |
332 | skb_queue_tail(&q->sk.sk_receive_queue, skb); | |
333 | } | |
334 | ||
335 | wake_up: | |
336 | wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); | |
337 | return RX_HANDLER_CONSUMED; | |
338 | ||
339 | drop: | |
340 | /* Count errors/drops only here, thus don't care about args. */ | |
341 | macvlan_count_rx(vlan, 0, 0, 0); | |
342 | kfree_skb(skb); | |
343 | return RX_HANDLER_CONSUMED; | |
344 | } | |
345 | ||
346 | static int macvtap_get_minor(struct macvlan_dev *vlan) | |
347 | { | |
348 | int retval = -ENOMEM; | |
349 | ||
350 | mutex_lock(&minor_lock); | |
351 | retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL); | |
352 | if (retval >= 0) { | |
353 | vlan->minor = retval; | |
354 | } else if (retval == -ENOSPC) { | |
355 | printk(KERN_ERR "too many macvtap devices\n"); | |
356 | retval = -EINVAL; | |
357 | } | |
358 | mutex_unlock(&minor_lock); | |
359 | return retval < 0 ? retval : 0; | |
360 | } | |
361 | ||
362 | static void macvtap_free_minor(struct macvlan_dev *vlan) | |
363 | { | |
364 | mutex_lock(&minor_lock); | |
365 | if (vlan->minor) { | |
366 | idr_remove(&minor_idr, vlan->minor); | |
367 | vlan->minor = 0; | |
368 | } | |
369 | mutex_unlock(&minor_lock); | |
370 | } | |
371 | ||
372 | static struct net_device *dev_get_by_macvtap_minor(int minor) | |
373 | { | |
374 | struct net_device *dev = NULL; | |
375 | struct macvlan_dev *vlan; | |
376 | ||
377 | mutex_lock(&minor_lock); | |
378 | vlan = idr_find(&minor_idr, minor); | |
379 | if (vlan) { | |
380 | dev = vlan->dev; | |
381 | dev_hold(dev); | |
382 | } | |
383 | mutex_unlock(&minor_lock); | |
384 | return dev; | |
385 | } | |
386 | ||
387 | static int macvtap_newlink(struct net *src_net, | |
388 | struct net_device *dev, | |
389 | struct nlattr *tb[], | |
390 | struct nlattr *data[]) | |
391 | { | |
392 | struct macvlan_dev *vlan = netdev_priv(dev); | |
393 | int err; | |
394 | ||
395 | INIT_LIST_HEAD(&vlan->queue_list); | |
396 | ||
397 | /* Since macvlan supports all offloads by default, make | |
398 | * tap support all offloads also. | |
399 | */ | |
400 | vlan->tap_features = TUN_OFFLOADS; | |
401 | ||
402 | err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan); | |
403 | if (err) | |
404 | return err; | |
405 | ||
406 | /* Don't put anything that may fail after macvlan_common_newlink | |
407 | * because we can't undo what it does. | |
408 | */ | |
409 | return macvlan_common_newlink(src_net, dev, tb, data); | |
410 | } | |
411 | ||
412 | static void macvtap_dellink(struct net_device *dev, | |
413 | struct list_head *head) | |
414 | { | |
415 | netdev_rx_handler_unregister(dev); | |
416 | macvtap_del_queues(dev); | |
417 | macvlan_dellink(dev, head); | |
418 | } | |
419 | ||
420 | static void macvtap_setup(struct net_device *dev) | |
421 | { | |
422 | macvlan_common_setup(dev); | |
423 | dev->tx_queue_len = TUN_READQ_SIZE; | |
424 | } | |
425 | ||
426 | static struct rtnl_link_ops macvtap_link_ops __read_mostly = { | |
427 | .kind = "macvtap", | |
428 | .setup = macvtap_setup, | |
429 | .newlink = macvtap_newlink, | |
430 | .dellink = macvtap_dellink, | |
431 | }; | |
432 | ||
433 | ||
434 | static void macvtap_sock_write_space(struct sock *sk) | |
435 | { | |
436 | wait_queue_head_t *wqueue; | |
437 | ||
438 | if (!sock_writeable(sk) || | |
439 | !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) | |
440 | return; | |
441 | ||
442 | wqueue = sk_sleep(sk); | |
443 | if (wqueue && waitqueue_active(wqueue)) | |
444 | wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND); | |
445 | } | |
446 | ||
447 | static void macvtap_sock_destruct(struct sock *sk) | |
448 | { | |
449 | skb_queue_purge(&sk->sk_receive_queue); | |
450 | } | |
451 | ||
452 | static int macvtap_open(struct inode *inode, struct file *file) | |
453 | { | |
454 | struct net *net = current->nsproxy->net_ns; | |
455 | struct net_device *dev; | |
456 | struct macvtap_queue *q; | |
457 | int err = -ENODEV; | |
458 | ||
459 | rtnl_lock(); | |
460 | dev = dev_get_by_macvtap_minor(iminor(inode)); | |
461 | if (!dev) | |
462 | goto out; | |
463 | ||
464 | err = -ENOMEM; | |
465 | q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, | |
466 | &macvtap_proto); | |
467 | if (!q) | |
468 | goto out; | |
469 | ||
470 | RCU_INIT_POINTER(q->sock.wq, &q->wq); | |
471 | init_waitqueue_head(&q->wq.wait); | |
472 | q->sock.type = SOCK_RAW; | |
473 | q->sock.state = SS_CONNECTED; | |
474 | q->sock.file = file; | |
475 | q->sock.ops = &macvtap_socket_ops; | |
476 | sock_init_data(&q->sock, &q->sk); | |
477 | q->sk.sk_write_space = macvtap_sock_write_space; | |
478 | q->sk.sk_destruct = macvtap_sock_destruct; | |
479 | q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; | |
480 | q->vnet_hdr_sz = sizeof(struct virtio_net_hdr); | |
481 | ||
482 | /* | |
483 | * so far only KVM virtio_net uses macvtap, enable zero copy between | |
484 | * guest kernel and host kernel when lower device supports zerocopy | |
485 | * | |
486 | * The macvlan supports zerocopy iff the lower device supports zero | |
487 | * copy so we don't have to look at the lower device directly. | |
488 | */ | |
489 | if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG)) | |
490 | sock_set_flag(&q->sk, SOCK_ZEROCOPY); | |
491 | ||
492 | err = macvtap_set_queue(dev, file, q); | |
493 | if (err) | |
494 | sock_put(&q->sk); | |
495 | ||
496 | out: | |
497 | if (dev) | |
498 | dev_put(dev); | |
499 | ||
500 | rtnl_unlock(); | |
501 | return err; | |
502 | } | |
503 | ||
504 | static int macvtap_release(struct inode *inode, struct file *file) | |
505 | { | |
506 | struct macvtap_queue *q = file->private_data; | |
507 | macvtap_put_queue(q); | |
508 | return 0; | |
509 | } | |
510 | ||
511 | static unsigned int macvtap_poll(struct file *file, poll_table * wait) | |
512 | { | |
513 | struct macvtap_queue *q = file->private_data; | |
514 | unsigned int mask = POLLERR; | |
515 | ||
516 | if (!q) | |
517 | goto out; | |
518 | ||
519 | mask = 0; | |
520 | poll_wait(file, &q->wq.wait, wait); | |
521 | ||
522 | if (!skb_queue_empty(&q->sk.sk_receive_queue)) | |
523 | mask |= POLLIN | POLLRDNORM; | |
524 | ||
525 | if (sock_writeable(&q->sk) || | |
526 | (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) && | |
527 | sock_writeable(&q->sk))) | |
528 | mask |= POLLOUT | POLLWRNORM; | |
529 | ||
530 | out: | |
531 | return mask; | |
532 | } | |
533 | ||
534 | static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad, | |
535 | size_t len, size_t linear, | |
536 | int noblock, int *err) | |
537 | { | |
538 | struct sk_buff *skb; | |
539 | ||
540 | /* Under a page? Don't bother with paged skb. */ | |
541 | if (prepad + len < PAGE_SIZE || !linear) | |
542 | linear = len; | |
543 | ||
544 | skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, | |
545 | err, 0); | |
546 | if (!skb) | |
547 | return NULL; | |
548 | ||
549 | skb_reserve(skb, prepad); | |
550 | skb_put(skb, linear); | |
551 | skb->data_len = len - linear; | |
552 | skb->len += len - linear; | |
553 | ||
554 | return skb; | |
555 | } | |
556 | ||
557 | /* | |
558 | * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should | |
559 | * be shared with the tun/tap driver. | |
560 | */ | |
561 | static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb, | |
562 | struct virtio_net_hdr *vnet_hdr) | |
563 | { | |
564 | unsigned short gso_type = 0; | |
565 | if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { | |
566 | switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { | |
567 | case VIRTIO_NET_HDR_GSO_TCPV4: | |
568 | gso_type = SKB_GSO_TCPV4; | |
569 | break; | |
570 | case VIRTIO_NET_HDR_GSO_TCPV6: | |
571 | gso_type = SKB_GSO_TCPV6; | |
572 | break; | |
573 | case VIRTIO_NET_HDR_GSO_UDP: | |
574 | pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n", | |
575 | current->comm); | |
576 | gso_type = SKB_GSO_UDP; | |
577 | if (skb->protocol == htons(ETH_P_IPV6)) | |
578 | ipv6_proxy_select_ident(skb); | |
579 | break; | |
580 | default: | |
581 | return -EINVAL; | |
582 | } | |
583 | ||
584 | if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) | |
585 | gso_type |= SKB_GSO_TCP_ECN; | |
586 | ||
587 | if (vnet_hdr->gso_size == 0) | |
588 | return -EINVAL; | |
589 | } | |
590 | ||
591 | if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { | |
592 | if (!skb_partial_csum_set(skb, vnet_hdr->csum_start, | |
593 | vnet_hdr->csum_offset)) | |
594 | return -EINVAL; | |
595 | } | |
596 | ||
597 | if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { | |
598 | skb_shinfo(skb)->gso_size = vnet_hdr->gso_size; | |
599 | skb_shinfo(skb)->gso_type = gso_type; | |
600 | ||
601 | /* Header must be checked, and gso_segs computed. */ | |
602 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | |
603 | skb_shinfo(skb)->gso_segs = 0; | |
604 | } | |
605 | return 0; | |
606 | } | |
607 | ||
608 | static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb, | |
609 | struct virtio_net_hdr *vnet_hdr) | |
610 | { | |
611 | memset(vnet_hdr, 0, sizeof(*vnet_hdr)); | |
612 | ||
613 | if (skb_is_gso(skb)) { | |
614 | struct skb_shared_info *sinfo = skb_shinfo(skb); | |
615 | ||
616 | /* This is a hint as to how much should be linear. */ | |
617 | vnet_hdr->hdr_len = skb_headlen(skb); | |
618 | vnet_hdr->gso_size = sinfo->gso_size; | |
619 | if (sinfo->gso_type & SKB_GSO_TCPV4) | |
620 | vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; | |
621 | else if (sinfo->gso_type & SKB_GSO_TCPV6) | |
622 | vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; | |
623 | else | |
624 | BUG(); | |
625 | if (sinfo->gso_type & SKB_GSO_TCP_ECN) | |
626 | vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; | |
627 | } else | |
628 | vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; | |
629 | ||
630 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
631 | vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; | |
632 | vnet_hdr->csum_start = skb_checksum_start_offset(skb); | |
633 | if (vlan_tx_tag_present(skb)) | |
634 | vnet_hdr->csum_start += VLAN_HLEN; | |
635 | vnet_hdr->csum_offset = skb->csum_offset; | |
636 | } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { | |
637 | vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; | |
638 | } /* else everything is zero */ | |
639 | } | |
640 | ||
641 | /* Get packet from user space buffer */ | |
642 | static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, | |
643 | const struct iovec *iv, unsigned long total_len, | |
644 | size_t count, int noblock) | |
645 | { | |
646 | int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN); | |
647 | struct sk_buff *skb; | |
648 | struct macvlan_dev *vlan; | |
649 | unsigned long len = total_len; | |
650 | int err; | |
651 | struct virtio_net_hdr vnet_hdr = { 0 }; | |
652 | int vnet_hdr_len = 0; | |
653 | int copylen = 0; | |
654 | bool zerocopy = false; | |
655 | size_t linear; | |
656 | ||
657 | if (q->flags & IFF_VNET_HDR) { | |
658 | vnet_hdr_len = q->vnet_hdr_sz; | |
659 | ||
660 | err = -EINVAL; | |
661 | if (len < vnet_hdr_len) | |
662 | goto err; | |
663 | len -= vnet_hdr_len; | |
664 | ||
665 | err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0, | |
666 | sizeof(vnet_hdr)); | |
667 | if (err < 0) | |
668 | goto err; | |
669 | if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && | |
670 | vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 > | |
671 | vnet_hdr.hdr_len) | |
672 | vnet_hdr.hdr_len = vnet_hdr.csum_start + | |
673 | vnet_hdr.csum_offset + 2; | |
674 | err = -EINVAL; | |
675 | if (vnet_hdr.hdr_len > len) | |
676 | goto err; | |
677 | } | |
678 | ||
679 | err = -EINVAL; | |
680 | if (unlikely(len < ETH_HLEN)) | |
681 | goto err; | |
682 | ||
683 | err = -EMSGSIZE; | |
684 | if (unlikely(count > UIO_MAXIOV)) | |
685 | goto err; | |
686 | ||
687 | if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { | |
688 | copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN; | |
689 | if (copylen > good_linear) | |
690 | copylen = good_linear; | |
691 | linear = copylen; | |
692 | if (iov_pages(iv, vnet_hdr_len + copylen, count) | |
693 | <= MAX_SKB_FRAGS) | |
694 | zerocopy = true; | |
695 | } | |
696 | ||
697 | if (!zerocopy) { | |
698 | copylen = len; | |
699 | if (vnet_hdr.hdr_len > good_linear) | |
700 | linear = good_linear; | |
701 | else | |
702 | linear = vnet_hdr.hdr_len; | |
703 | } | |
704 | ||
705 | skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, | |
706 | linear, noblock, &err); | |
707 | if (!skb) | |
708 | goto err; | |
709 | ||
710 | if (zerocopy) | |
711 | err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count); | |
712 | else { | |
713 | err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len, | |
714 | len); | |
715 | if (!err && m && m->msg_control) { | |
716 | struct ubuf_info *uarg = m->msg_control; | |
717 | uarg->callback(uarg, false); | |
718 | } | |
719 | } | |
720 | ||
721 | if (err) | |
722 | goto err_kfree; | |
723 | ||
724 | skb_set_network_header(skb, ETH_HLEN); | |
725 | skb_reset_mac_header(skb); | |
726 | skb->protocol = eth_hdr(skb)->h_proto; | |
727 | ||
728 | if (vnet_hdr_len) { | |
729 | err = macvtap_skb_from_vnet_hdr(skb, &vnet_hdr); | |
730 | if (err) | |
731 | goto err_kfree; | |
732 | } | |
733 | ||
734 | skb_probe_transport_header(skb, ETH_HLEN); | |
735 | ||
736 | rcu_read_lock(); | |
737 | vlan = rcu_dereference(q->vlan); | |
738 | /* copy skb_ubuf_info for callback when skb has no error */ | |
739 | if (zerocopy) { | |
740 | skb_shinfo(skb)->destructor_arg = m->msg_control; | |
741 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; | |
742 | skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; | |
743 | } | |
744 | if (vlan) { | |
745 | skb->dev = vlan->dev; | |
746 | dev_queue_xmit(skb); | |
747 | } else { | |
748 | kfree_skb(skb); | |
749 | } | |
750 | rcu_read_unlock(); | |
751 | ||
752 | return total_len; | |
753 | ||
754 | err_kfree: | |
755 | kfree_skb(skb); | |
756 | ||
757 | err: | |
758 | rcu_read_lock(); | |
759 | vlan = rcu_dereference(q->vlan); | |
760 | if (vlan) | |
761 | this_cpu_inc(vlan->pcpu_stats->tx_dropped); | |
762 | rcu_read_unlock(); | |
763 | ||
764 | return err; | |
765 | } | |
766 | ||
767 | static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv, | |
768 | unsigned long count, loff_t pos) | |
769 | { | |
770 | struct file *file = iocb->ki_filp; | |
771 | ssize_t result = -ENOLINK; | |
772 | struct macvtap_queue *q = file->private_data; | |
773 | ||
774 | result = macvtap_get_user(q, NULL, iv, iov_length(iv, count), count, | |
775 | file->f_flags & O_NONBLOCK); | |
776 | return result; | |
777 | } | |
778 | ||
779 | /* Put packet to the user space buffer */ | |
780 | static ssize_t macvtap_put_user(struct macvtap_queue *q, | |
781 | const struct sk_buff *skb, | |
782 | struct iov_iter *iter) | |
783 | { | |
784 | int ret; | |
785 | int vnet_hdr_len = 0; | |
786 | int vlan_offset = 0; | |
787 | int total; | |
788 | ||
789 | if (q->flags & IFF_VNET_HDR) { | |
790 | struct virtio_net_hdr vnet_hdr; | |
791 | vnet_hdr_len = q->vnet_hdr_sz; | |
792 | if (iov_iter_count(iter) < vnet_hdr_len) | |
793 | return -EINVAL; | |
794 | ||
795 | macvtap_skb_to_vnet_hdr(skb, &vnet_hdr); | |
796 | ||
797 | if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != | |
798 | sizeof(vnet_hdr)) | |
799 | return -EFAULT; | |
800 | } | |
801 | total = vnet_hdr_len; | |
802 | total += skb->len; | |
803 | ||
804 | if (vlan_tx_tag_present(skb)) { | |
805 | struct { | |
806 | __be16 h_vlan_proto; | |
807 | __be16 h_vlan_TCI; | |
808 | } veth; | |
809 | veth.h_vlan_proto = skb->vlan_proto; | |
810 | veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); | |
811 | ||
812 | vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); | |
813 | total += VLAN_HLEN; | |
814 | ||
815 | ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); | |
816 | if (ret || !iov_iter_count(iter)) | |
817 | goto done; | |
818 | ||
819 | ret = copy_to_iter(&veth, sizeof(veth), iter); | |
820 | if (ret != sizeof(veth) || !iov_iter_count(iter)) | |
821 | goto done; | |
822 | } | |
823 | ||
824 | ret = skb_copy_datagram_iter(skb, vlan_offset, iter, | |
825 | skb->len - vlan_offset); | |
826 | ||
827 | done: | |
828 | return ret ? ret : total; | |
829 | } | |
830 | ||
831 | static ssize_t macvtap_do_read(struct macvtap_queue *q, | |
832 | const struct iovec *iv, unsigned long segs, | |
833 | unsigned long len, | |
834 | int noblock) | |
835 | { | |
836 | DEFINE_WAIT(wait); | |
837 | struct sk_buff *skb; | |
838 | ssize_t ret = 0; | |
839 | struct iov_iter iter; | |
840 | ||
841 | while (len) { | |
842 | if (!noblock) | |
843 | prepare_to_wait(sk_sleep(&q->sk), &wait, | |
844 | TASK_INTERRUPTIBLE); | |
845 | ||
846 | /* Read frames from the queue */ | |
847 | skb = skb_dequeue(&q->sk.sk_receive_queue); | |
848 | if (!skb) { | |
849 | if (noblock) { | |
850 | ret = -EAGAIN; | |
851 | break; | |
852 | } | |
853 | if (signal_pending(current)) { | |
854 | ret = -ERESTARTSYS; | |
855 | break; | |
856 | } | |
857 | /* Nothing to read, let's sleep */ | |
858 | schedule(); | |
859 | continue; | |
860 | } | |
861 | iov_iter_init(&iter, READ, iv, segs, len); | |
862 | ret = macvtap_put_user(q, skb, &iter); | |
863 | kfree_skb(skb); | |
864 | break; | |
865 | } | |
866 | ||
867 | if (!noblock) | |
868 | finish_wait(sk_sleep(&q->sk), &wait); | |
869 | return ret; | |
870 | } | |
871 | ||
872 | static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv, | |
873 | unsigned long count, loff_t pos) | |
874 | { | |
875 | struct file *file = iocb->ki_filp; | |
876 | struct macvtap_queue *q = file->private_data; | |
877 | ssize_t len, ret = 0; | |
878 | ||
879 | len = iov_length(iv, count); | |
880 | if (len < 0) { | |
881 | ret = -EINVAL; | |
882 | goto out; | |
883 | } | |
884 | ||
885 | ret = macvtap_do_read(q, iv, count, len, file->f_flags & O_NONBLOCK); | |
886 | ret = min_t(ssize_t, ret, len); | |
887 | if (ret > 0) | |
888 | iocb->ki_pos = ret; | |
889 | out: | |
890 | return ret; | |
891 | } | |
892 | ||
893 | static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q) | |
894 | { | |
895 | struct macvlan_dev *vlan; | |
896 | ||
897 | ASSERT_RTNL(); | |
898 | vlan = rtnl_dereference(q->vlan); | |
899 | if (vlan) | |
900 | dev_hold(vlan->dev); | |
901 | ||
902 | return vlan; | |
903 | } | |
904 | ||
905 | static void macvtap_put_vlan(struct macvlan_dev *vlan) | |
906 | { | |
907 | dev_put(vlan->dev); | |
908 | } | |
909 | ||
910 | static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags) | |
911 | { | |
912 | struct macvtap_queue *q = file->private_data; | |
913 | struct macvlan_dev *vlan; | |
914 | int ret; | |
915 | ||
916 | vlan = macvtap_get_vlan(q); | |
917 | if (!vlan) | |
918 | return -EINVAL; | |
919 | ||
920 | if (flags & IFF_ATTACH_QUEUE) | |
921 | ret = macvtap_enable_queue(vlan->dev, file, q); | |
922 | else if (flags & IFF_DETACH_QUEUE) | |
923 | ret = macvtap_disable_queue(q); | |
924 | else | |
925 | ret = -EINVAL; | |
926 | ||
927 | macvtap_put_vlan(vlan); | |
928 | return ret; | |
929 | } | |
930 | ||
931 | static int set_offload(struct macvtap_queue *q, unsigned long arg) | |
932 | { | |
933 | struct macvlan_dev *vlan; | |
934 | netdev_features_t features; | |
935 | netdev_features_t feature_mask = 0; | |
936 | ||
937 | vlan = rtnl_dereference(q->vlan); | |
938 | if (!vlan) | |
939 | return -ENOLINK; | |
940 | ||
941 | features = vlan->dev->features; | |
942 | ||
943 | if (arg & TUN_F_CSUM) { | |
944 | feature_mask = NETIF_F_HW_CSUM; | |
945 | ||
946 | if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) { | |
947 | if (arg & TUN_F_TSO_ECN) | |
948 | feature_mask |= NETIF_F_TSO_ECN; | |
949 | if (arg & TUN_F_TSO4) | |
950 | feature_mask |= NETIF_F_TSO; | |
951 | if (arg & TUN_F_TSO6) | |
952 | feature_mask |= NETIF_F_TSO6; | |
953 | } | |
954 | } | |
955 | ||
956 | /* tun/tap driver inverts the usage for TSO offloads, where | |
957 | * setting the TSO bit means that the userspace wants to | |
958 | * accept TSO frames and turning it off means that user space | |
959 | * does not support TSO. | |
960 | * For macvtap, we have to invert it to mean the same thing. | |
961 | * When user space turns off TSO, we turn off GSO/LRO so that | |
962 | * user-space will not receive TSO frames. | |
963 | */ | |
964 | if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6)) | |
965 | features |= RX_OFFLOADS; | |
966 | else | |
967 | features &= ~RX_OFFLOADS; | |
968 | ||
969 | /* tap_features are the same as features on tun/tap and | |
970 | * reflect user expectations. | |
971 | */ | |
972 | vlan->tap_features = feature_mask; | |
973 | vlan->set_features = features; | |
974 | netdev_update_features(vlan->dev); | |
975 | ||
976 | return 0; | |
977 | } | |
978 | ||
979 | /* | |
980 | * provide compatibility with generic tun/tap interface | |
981 | */ | |
982 | static long macvtap_ioctl(struct file *file, unsigned int cmd, | |
983 | unsigned long arg) | |
984 | { | |
985 | struct macvtap_queue *q = file->private_data; | |
986 | struct macvlan_dev *vlan; | |
987 | void __user *argp = (void __user *)arg; | |
988 | struct ifreq __user *ifr = argp; | |
989 | unsigned int __user *up = argp; | |
990 | unsigned int u; | |
991 | int __user *sp = argp; | |
992 | int s; | |
993 | int ret; | |
994 | ||
995 | switch (cmd) { | |
996 | case TUNSETIFF: | |
997 | /* ignore the name, just look at flags */ | |
998 | if (get_user(u, &ifr->ifr_flags)) | |
999 | return -EFAULT; | |
1000 | ||
1001 | ret = 0; | |
1002 | if ((u & ~(IFF_VNET_HDR | IFF_MULTI_QUEUE)) != | |
1003 | (IFF_NO_PI | IFF_TAP)) | |
1004 | ret = -EINVAL; | |
1005 | else | |
1006 | q->flags = u; | |
1007 | ||
1008 | return ret; | |
1009 | ||
1010 | case TUNGETIFF: | |
1011 | rtnl_lock(); | |
1012 | vlan = macvtap_get_vlan(q); | |
1013 | if (!vlan) { | |
1014 | rtnl_unlock(); | |
1015 | return -ENOLINK; | |
1016 | } | |
1017 | ||
1018 | ret = 0; | |
1019 | if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || | |
1020 | put_user(q->flags, &ifr->ifr_flags)) | |
1021 | ret = -EFAULT; | |
1022 | macvtap_put_vlan(vlan); | |
1023 | rtnl_unlock(); | |
1024 | return ret; | |
1025 | ||
1026 | case TUNSETQUEUE: | |
1027 | if (get_user(u, &ifr->ifr_flags)) | |
1028 | return -EFAULT; | |
1029 | rtnl_lock(); | |
1030 | ret = macvtap_ioctl_set_queue(file, u); | |
1031 | rtnl_unlock(); | |
1032 | return ret; | |
1033 | ||
1034 | case TUNGETFEATURES: | |
1035 | if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR | | |
1036 | IFF_MULTI_QUEUE, up)) | |
1037 | return -EFAULT; | |
1038 | return 0; | |
1039 | ||
1040 | case TUNSETSNDBUF: | |
1041 | if (get_user(u, up)) | |
1042 | return -EFAULT; | |
1043 | ||
1044 | q->sk.sk_sndbuf = u; | |
1045 | return 0; | |
1046 | ||
1047 | case TUNGETVNETHDRSZ: | |
1048 | s = q->vnet_hdr_sz; | |
1049 | if (put_user(s, sp)) | |
1050 | return -EFAULT; | |
1051 | return 0; | |
1052 | ||
1053 | case TUNSETVNETHDRSZ: | |
1054 | if (get_user(s, sp)) | |
1055 | return -EFAULT; | |
1056 | if (s < (int)sizeof(struct virtio_net_hdr)) | |
1057 | return -EINVAL; | |
1058 | ||
1059 | q->vnet_hdr_sz = s; | |
1060 | return 0; | |
1061 | ||
1062 | case TUNSETOFFLOAD: | |
1063 | /* let the user check for future flags */ | |
1064 | if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | | |
1065 | TUN_F_TSO_ECN)) | |
1066 | return -EINVAL; | |
1067 | ||
1068 | rtnl_lock(); | |
1069 | ret = set_offload(q, arg); | |
1070 | rtnl_unlock(); | |
1071 | return ret; | |
1072 | ||
1073 | default: | |
1074 | return -EINVAL; | |
1075 | } | |
1076 | } | |
1077 | ||
1078 | #ifdef CONFIG_COMPAT | |
1079 | static long macvtap_compat_ioctl(struct file *file, unsigned int cmd, | |
1080 | unsigned long arg) | |
1081 | { | |
1082 | return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); | |
1083 | } | |
1084 | #endif | |
1085 | ||
1086 | static const struct file_operations macvtap_fops = { | |
1087 | .owner = THIS_MODULE, | |
1088 | .open = macvtap_open, | |
1089 | .release = macvtap_release, | |
1090 | .aio_read = macvtap_aio_read, | |
1091 | .aio_write = macvtap_aio_write, | |
1092 | .poll = macvtap_poll, | |
1093 | .llseek = no_llseek, | |
1094 | .unlocked_ioctl = macvtap_ioctl, | |
1095 | #ifdef CONFIG_COMPAT | |
1096 | .compat_ioctl = macvtap_compat_ioctl, | |
1097 | #endif | |
1098 | }; | |
1099 | ||
1100 | static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock, | |
1101 | struct msghdr *m, size_t total_len) | |
1102 | { | |
1103 | struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); | |
1104 | return macvtap_get_user(q, m, m->msg_iov, total_len, m->msg_iovlen, | |
1105 | m->msg_flags & MSG_DONTWAIT); | |
1106 | } | |
1107 | ||
1108 | static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock, | |
1109 | struct msghdr *m, size_t total_len, | |
1110 | int flags) | |
1111 | { | |
1112 | struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); | |
1113 | int ret; | |
1114 | if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) | |
1115 | return -EINVAL; | |
1116 | ret = macvtap_do_read(q, m->msg_iov, m->msg_iovlen, total_len, | |
1117 | flags & MSG_DONTWAIT); | |
1118 | if (ret > total_len) { | |
1119 | m->msg_flags |= MSG_TRUNC; | |
1120 | ret = flags & MSG_TRUNC ? ret : total_len; | |
1121 | } | |
1122 | return ret; | |
1123 | } | |
1124 | ||
1125 | /* Ops structure to mimic raw sockets with tun */ | |
1126 | static const struct proto_ops macvtap_socket_ops = { | |
1127 | .sendmsg = macvtap_sendmsg, | |
1128 | .recvmsg = macvtap_recvmsg, | |
1129 | }; | |
1130 | ||
1131 | /* Get an underlying socket object from tun file. Returns error unless file is | |
1132 | * attached to a device. The returned object works like a packet socket, it | |
1133 | * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for | |
1134 | * holding a reference to the file for as long as the socket is in use. */ | |
1135 | struct socket *macvtap_get_socket(struct file *file) | |
1136 | { | |
1137 | struct macvtap_queue *q; | |
1138 | if (file->f_op != &macvtap_fops) | |
1139 | return ERR_PTR(-EINVAL); | |
1140 | q = file->private_data; | |
1141 | if (!q) | |
1142 | return ERR_PTR(-EBADFD); | |
1143 | return &q->sock; | |
1144 | } | |
1145 | EXPORT_SYMBOL_GPL(macvtap_get_socket); | |
1146 | ||
1147 | static int macvtap_device_event(struct notifier_block *unused, | |
1148 | unsigned long event, void *ptr) | |
1149 | { | |
1150 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | |
1151 | struct macvlan_dev *vlan; | |
1152 | struct device *classdev; | |
1153 | dev_t devt; | |
1154 | int err; | |
1155 | ||
1156 | if (dev->rtnl_link_ops != &macvtap_link_ops) | |
1157 | return NOTIFY_DONE; | |
1158 | ||
1159 | vlan = netdev_priv(dev); | |
1160 | ||
1161 | switch (event) { | |
1162 | case NETDEV_REGISTER: | |
1163 | /* Create the device node here after the network device has | |
1164 | * been registered but before register_netdevice has | |
1165 | * finished running. | |
1166 | */ | |
1167 | err = macvtap_get_minor(vlan); | |
1168 | if (err) | |
1169 | return notifier_from_errno(err); | |
1170 | ||
1171 | devt = MKDEV(MAJOR(macvtap_major), vlan->minor); | |
1172 | classdev = device_create(macvtap_class, &dev->dev, devt, | |
1173 | dev, "tap%d", dev->ifindex); | |
1174 | if (IS_ERR(classdev)) { | |
1175 | macvtap_free_minor(vlan); | |
1176 | return notifier_from_errno(PTR_ERR(classdev)); | |
1177 | } | |
1178 | break; | |
1179 | case NETDEV_UNREGISTER: | |
1180 | devt = MKDEV(MAJOR(macvtap_major), vlan->minor); | |
1181 | device_destroy(macvtap_class, devt); | |
1182 | macvtap_free_minor(vlan); | |
1183 | break; | |
1184 | } | |
1185 | ||
1186 | return NOTIFY_DONE; | |
1187 | } | |
1188 | ||
1189 | static struct notifier_block macvtap_notifier_block __read_mostly = { | |
1190 | .notifier_call = macvtap_device_event, | |
1191 | }; | |
1192 | ||
1193 | static int macvtap_init(void) | |
1194 | { | |
1195 | int err; | |
1196 | ||
1197 | err = alloc_chrdev_region(&macvtap_major, 0, | |
1198 | MACVTAP_NUM_DEVS, "macvtap"); | |
1199 | if (err) | |
1200 | goto out1; | |
1201 | ||
1202 | cdev_init(&macvtap_cdev, &macvtap_fops); | |
1203 | err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS); | |
1204 | if (err) | |
1205 | goto out2; | |
1206 | ||
1207 | macvtap_class = class_create(THIS_MODULE, "macvtap"); | |
1208 | if (IS_ERR(macvtap_class)) { | |
1209 | err = PTR_ERR(macvtap_class); | |
1210 | goto out3; | |
1211 | } | |
1212 | ||
1213 | err = register_netdevice_notifier(&macvtap_notifier_block); | |
1214 | if (err) | |
1215 | goto out4; | |
1216 | ||
1217 | err = macvlan_link_register(&macvtap_link_ops); | |
1218 | if (err) | |
1219 | goto out5; | |
1220 | ||
1221 | return 0; | |
1222 | ||
1223 | out5: | |
1224 | unregister_netdevice_notifier(&macvtap_notifier_block); | |
1225 | out4: | |
1226 | class_unregister(macvtap_class); | |
1227 | out3: | |
1228 | cdev_del(&macvtap_cdev); | |
1229 | out2: | |
1230 | unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); | |
1231 | out1: | |
1232 | return err; | |
1233 | } | |
1234 | module_init(macvtap_init); | |
1235 | ||
1236 | static void macvtap_exit(void) | |
1237 | { | |
1238 | rtnl_link_unregister(&macvtap_link_ops); | |
1239 | unregister_netdevice_notifier(&macvtap_notifier_block); | |
1240 | class_unregister(macvtap_class); | |
1241 | cdev_del(&macvtap_cdev); | |
1242 | unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); | |
1243 | } | |
1244 | module_exit(macvtap_exit); | |
1245 | ||
1246 | MODULE_ALIAS_RTNL_LINK("macvtap"); | |
1247 | MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>"); | |
1248 | MODULE_LICENSE("GPL"); |