]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/8021q/vlan.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-zesty-kernel.git] / net / 8021q / vlan.c
1 /*
2 * INET 802.1Q VLAN
3 * Ethernet-type device handling.
4 *
5 * Authors: Ben Greear <greearb@candelatech.com>
6 * Please send support related email to: netdev@vger.kernel.org
7 * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
8 *
9 * Fixes:
10 * Fix for packet capture - Nick Eggleston <nick@dccinc.com>;
11 * Add HW acceleration hooks - David S. Miller <davem@redhat.com>;
12 * Correct all the locking - David S. Miller <davem@redhat.com>;
13 * Use hash table for VLAN groups - David S. Miller <davem@redhat.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
21 #include <linux/capability.h>
22 #include <linux/module.h>
23 #include <linux/netdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/slab.h>
26 #include <linux/init.h>
27 #include <linux/rculist.h>
28 #include <net/p8022.h>
29 #include <net/arp.h>
30 #include <linux/rtnetlink.h>
31 #include <linux/notifier.h>
32 #include <net/rtnetlink.h>
33 #include <net/net_namespace.h>
34 #include <net/netns/generic.h>
35 #include <asm/uaccess.h>
36
37 #include <linux/if_vlan.h>
38 #include "vlan.h"
39 #include "vlanproc.h"
40
41 #define DRV_VERSION "1.8"
42
43 /* Global VLAN variables */
44
45 int vlan_net_id __read_mostly;
46
47 /* Our listing of VLAN group(s) */
48 static struct hlist_head vlan_group_hash[VLAN_GRP_HASH_SIZE];
49
50 const char vlan_fullname[] = "802.1Q VLAN Support";
51 const char vlan_version[] = DRV_VERSION;
52 static const char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>";
53 static const char vlan_buggyright[] = "David S. Miller <davem@redhat.com>";
54
55 static struct packet_type vlan_packet_type __read_mostly = {
56 .type = cpu_to_be16(ETH_P_8021Q),
57 .func = vlan_skb_recv, /* VLAN receive method */
58 };
59
60 /* End of global variables definitions. */
61
62 static inline unsigned int vlan_grp_hashfn(unsigned int idx)
63 {
64 return ((idx >> VLAN_GRP_HASH_SHIFT) ^ idx) & VLAN_GRP_HASH_MASK;
65 }
66
67 /* Must be invoked with RCU read lock (no preempt) */
68 static struct vlan_group *__vlan_find_group(struct net_device *real_dev)
69 {
70 struct vlan_group *grp;
71 struct hlist_node *n;
72 int hash = vlan_grp_hashfn(real_dev->ifindex);
73
74 hlist_for_each_entry_rcu(grp, n, &vlan_group_hash[hash], hlist) {
75 if (grp->real_dev == real_dev)
76 return grp;
77 }
78
79 return NULL;
80 }
81
82 /* Find the protocol handler. Assumes VID < VLAN_VID_MASK.
83 *
84 * Must be invoked with RCU read lock (no preempt)
85 */
86 struct net_device *__find_vlan_dev(struct net_device *real_dev, u16 vlan_id)
87 {
88 struct vlan_group *grp = __vlan_find_group(real_dev);
89
90 if (grp)
91 return vlan_group_get_device(grp, vlan_id);
92
93 return NULL;
94 }
95
96 static void vlan_group_free(struct vlan_group *grp)
97 {
98 int i;
99
100 for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
101 kfree(grp->vlan_devices_arrays[i]);
102 kfree(grp);
103 }
104
105 static struct vlan_group *vlan_group_alloc(struct net_device *real_dev)
106 {
107 struct vlan_group *grp;
108
109 grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL);
110 if (!grp)
111 return NULL;
112
113 grp->real_dev = real_dev;
114 hlist_add_head_rcu(&grp->hlist,
115 &vlan_group_hash[vlan_grp_hashfn(real_dev->ifindex)]);
116 return grp;
117 }
118
119 static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id)
120 {
121 struct net_device **array;
122 unsigned int size;
123
124 ASSERT_RTNL();
125
126 array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
127 if (array != NULL)
128 return 0;
129
130 size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN;
131 array = kzalloc(size, GFP_KERNEL);
132 if (array == NULL)
133 return -ENOBUFS;
134
135 vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN] = array;
136 return 0;
137 }
138
139 static void vlan_rcu_free(struct rcu_head *rcu)
140 {
141 vlan_group_free(container_of(rcu, struct vlan_group, rcu));
142 }
143
144 void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
145 {
146 struct vlan_dev_info *vlan = vlan_dev_info(dev);
147 struct net_device *real_dev = vlan->real_dev;
148 const struct net_device_ops *ops = real_dev->netdev_ops;
149 struct vlan_group *grp;
150 u16 vlan_id = vlan->vlan_id;
151
152 ASSERT_RTNL();
153
154 grp = __vlan_find_group(real_dev);
155 BUG_ON(!grp);
156
157 /* Take it out of our own structures, but be sure to interlock with
158 * HW accelerating devices or SW vlan input packet processing.
159 */
160 if (real_dev->features & NETIF_F_HW_VLAN_FILTER)
161 ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);
162
163 grp->nr_vlans--;
164
165 vlan_group_set_device(grp, vlan_id, NULL);
166 if (!grp->killall)
167 synchronize_net();
168
169 unregister_netdevice_queue(dev, head);
170
171 /* If the group is now empty, kill off the group. */
172 if (grp->nr_vlans == 0) {
173 vlan_gvrp_uninit_applicant(real_dev);
174
175 if (real_dev->features & NETIF_F_HW_VLAN_RX)
176 ops->ndo_vlan_rx_register(real_dev, NULL);
177
178 hlist_del_rcu(&grp->hlist);
179
180 /* Free the group, after all cpu's are done. */
181 call_rcu(&grp->rcu, vlan_rcu_free);
182 }
183
184 /* Get rid of the vlan's reference to real_dev */
185 dev_put(real_dev);
186 }
187
188 int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id)
189 {
190 const char *name = real_dev->name;
191 const struct net_device_ops *ops = real_dev->netdev_ops;
192
193 if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
194 pr_info("8021q: VLANs not supported on %s\n", name);
195 return -EOPNOTSUPP;
196 }
197
198 if ((real_dev->features & NETIF_F_HW_VLAN_RX) && !ops->ndo_vlan_rx_register) {
199 pr_info("8021q: device %s has buggy VLAN hw accel\n", name);
200 return -EOPNOTSUPP;
201 }
202
203 if ((real_dev->features & NETIF_F_HW_VLAN_FILTER) &&
204 (!ops->ndo_vlan_rx_add_vid || !ops->ndo_vlan_rx_kill_vid)) {
205 pr_info("8021q: Device %s has buggy VLAN hw accel\n", name);
206 return -EOPNOTSUPP;
207 }
208
209 if (__find_vlan_dev(real_dev, vlan_id) != NULL)
210 return -EEXIST;
211
212 return 0;
213 }
214
215 int register_vlan_dev(struct net_device *dev)
216 {
217 struct vlan_dev_info *vlan = vlan_dev_info(dev);
218 struct net_device *real_dev = vlan->real_dev;
219 const struct net_device_ops *ops = real_dev->netdev_ops;
220 u16 vlan_id = vlan->vlan_id;
221 struct vlan_group *grp, *ngrp = NULL;
222 int err;
223
224 grp = __vlan_find_group(real_dev);
225 if (!grp) {
226 ngrp = grp = vlan_group_alloc(real_dev);
227 if (!grp)
228 return -ENOBUFS;
229 err = vlan_gvrp_init_applicant(real_dev);
230 if (err < 0)
231 goto out_free_group;
232 }
233
234 err = vlan_group_prealloc_vid(grp, vlan_id);
235 if (err < 0)
236 goto out_uninit_applicant;
237
238 err = register_netdevice(dev);
239 if (err < 0)
240 goto out_uninit_applicant;
241
242 /* Account for reference in struct vlan_dev_info */
243 dev_hold(real_dev);
244
245 netif_stacked_transfer_operstate(real_dev, dev);
246 linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
247
248 /* So, got the sucker initialized, now lets place
249 * it into our local structure.
250 */
251 vlan_group_set_device(grp, vlan_id, dev);
252 grp->nr_vlans++;
253
254 if (ngrp && real_dev->features & NETIF_F_HW_VLAN_RX)
255 ops->ndo_vlan_rx_register(real_dev, ngrp);
256 if (real_dev->features & NETIF_F_HW_VLAN_FILTER)
257 ops->ndo_vlan_rx_add_vid(real_dev, vlan_id);
258
259 return 0;
260
261 out_uninit_applicant:
262 if (ngrp)
263 vlan_gvrp_uninit_applicant(real_dev);
264 out_free_group:
265 if (ngrp) {
266 hlist_del_rcu(&ngrp->hlist);
267 /* Free the group, after all cpu's are done. */
268 call_rcu(&ngrp->rcu, vlan_rcu_free);
269 }
270 return err;
271 }
272
273 /* Attach a VLAN device to a mac address (ie Ethernet Card).
274 * Returns 0 if the device was created or a negative error code otherwise.
275 */
276 static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
277 {
278 struct net_device *new_dev;
279 struct net *net = dev_net(real_dev);
280 struct vlan_net *vn = net_generic(net, vlan_net_id);
281 char name[IFNAMSIZ];
282 int err;
283
284 if (vlan_id >= VLAN_VID_MASK)
285 return -ERANGE;
286
287 err = vlan_check_real_dev(real_dev, vlan_id);
288 if (err < 0)
289 return err;
290
291 /* Gotta set up the fields for the device. */
292 switch (vn->name_type) {
293 case VLAN_NAME_TYPE_RAW_PLUS_VID:
294 /* name will look like: eth1.0005 */
295 snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, vlan_id);
296 break;
297 case VLAN_NAME_TYPE_PLUS_VID_NO_PAD:
298 /* Put our vlan.VID in the name.
299 * Name will look like: vlan5
300 */
301 snprintf(name, IFNAMSIZ, "vlan%i", vlan_id);
302 break;
303 case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD:
304 /* Put our vlan.VID in the name.
305 * Name will look like: eth0.5
306 */
307 snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, vlan_id);
308 break;
309 case VLAN_NAME_TYPE_PLUS_VID:
310 /* Put our vlan.VID in the name.
311 * Name will look like: vlan0005
312 */
313 default:
314 snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
315 }
316
317 new_dev = alloc_netdev_mq(sizeof(struct vlan_dev_info), name,
318 vlan_setup, real_dev->num_tx_queues);
319
320 if (new_dev == NULL)
321 return -ENOBUFS;
322
323 new_dev->real_num_tx_queues = real_dev->real_num_tx_queues;
324 dev_net_set(new_dev, net);
325 /* need 4 bytes for extra VLAN header info,
326 * hope the underlying device can handle it.
327 */
328 new_dev->mtu = real_dev->mtu;
329
330 vlan_dev_info(new_dev)->vlan_id = vlan_id;
331 vlan_dev_info(new_dev)->real_dev = real_dev;
332 vlan_dev_info(new_dev)->dent = NULL;
333 vlan_dev_info(new_dev)->flags = VLAN_FLAG_REORDER_HDR;
334
335 new_dev->rtnl_link_ops = &vlan_link_ops;
336 err = register_vlan_dev(new_dev);
337 if (err < 0)
338 goto out_free_newdev;
339
340 return 0;
341
342 out_free_newdev:
343 free_netdev(new_dev);
344 return err;
345 }
346
347 static void vlan_sync_address(struct net_device *dev,
348 struct net_device *vlandev)
349 {
350 struct vlan_dev_info *vlan = vlan_dev_info(vlandev);
351
352 /* May be called without an actual change */
353 if (!compare_ether_addr(vlan->real_dev_addr, dev->dev_addr))
354 return;
355
356 /* vlan address was different from the old address and is equal to
357 * the new address */
358 if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
359 !compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
360 dev_unicast_delete(dev, vlandev->dev_addr);
361
362 /* vlan address was equal to the old address and is different from
363 * the new address */
364 if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
365 compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
366 dev_unicast_add(dev, vlandev->dev_addr);
367
368 memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN);
369 }
370
371 static void vlan_transfer_features(struct net_device *dev,
372 struct net_device *vlandev)
373 {
374 unsigned long old_features = vlandev->features;
375
376 vlandev->features &= ~dev->vlan_features;
377 vlandev->features |= dev->features & dev->vlan_features;
378 vlandev->gso_max_size = dev->gso_max_size;
379 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
380 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
381 #endif
382 vlandev->real_num_tx_queues = dev->real_num_tx_queues;
383 BUG_ON(vlandev->real_num_tx_queues > vlandev->num_tx_queues);
384
385 if (old_features != vlandev->features)
386 netdev_features_change(vlandev);
387 }
388
389 static void __vlan_device_event(struct net_device *dev, unsigned long event)
390 {
391 switch (event) {
392 case NETDEV_CHANGENAME:
393 vlan_proc_rem_dev(dev);
394 if (vlan_proc_add_dev(dev) < 0)
395 pr_warning("8021q: failed to change proc name for %s\n",
396 dev->name);
397 break;
398 case NETDEV_REGISTER:
399 if (vlan_proc_add_dev(dev) < 0)
400 pr_warning("8021q: failed to add proc entry for %s\n",
401 dev->name);
402 break;
403 case NETDEV_UNREGISTER:
404 vlan_proc_rem_dev(dev);
405 break;
406 }
407 }
408
409 static int vlan_device_event(struct notifier_block *unused, unsigned long event,
410 void *ptr)
411 {
412 struct net_device *dev = ptr;
413 struct vlan_group *grp;
414 int i, flgs;
415 struct net_device *vlandev;
416 struct vlan_dev_info *vlan;
417 LIST_HEAD(list);
418
419 if (is_vlan_dev(dev))
420 __vlan_device_event(dev, event);
421
422 grp = __vlan_find_group(dev);
423 if (!grp)
424 goto out;
425
426 /* It is OK that we do not hold the group lock right now,
427 * as we run under the RTNL lock.
428 */
429
430 switch (event) {
431 case NETDEV_CHANGE:
432 /* Propagate real device state to vlan devices */
433 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
434 vlandev = vlan_group_get_device(grp, i);
435 if (!vlandev)
436 continue;
437
438 netif_stacked_transfer_operstate(dev, vlandev);
439 }
440 break;
441
442 case NETDEV_CHANGEADDR:
443 /* Adjust unicast filters on underlying device */
444 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
445 vlandev = vlan_group_get_device(grp, i);
446 if (!vlandev)
447 continue;
448
449 flgs = vlandev->flags;
450 if (!(flgs & IFF_UP))
451 continue;
452
453 vlan_sync_address(dev, vlandev);
454 }
455 break;
456
457 case NETDEV_CHANGEMTU:
458 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
459 vlandev = vlan_group_get_device(grp, i);
460 if (!vlandev)
461 continue;
462
463 if (vlandev->mtu <= dev->mtu)
464 continue;
465
466 dev_set_mtu(vlandev, dev->mtu);
467 }
468 break;
469
470 case NETDEV_FEAT_CHANGE:
471 /* Propagate device features to underlying device */
472 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
473 vlandev = vlan_group_get_device(grp, i);
474 if (!vlandev)
475 continue;
476
477 vlan_transfer_features(dev, vlandev);
478 }
479
480 break;
481
482 case NETDEV_DOWN:
483 /* Put all VLANs for this dev in the down state too. */
484 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
485 vlandev = vlan_group_get_device(grp, i);
486 if (!vlandev)
487 continue;
488
489 flgs = vlandev->flags;
490 if (!(flgs & IFF_UP))
491 continue;
492
493 vlan = vlan_dev_info(vlandev);
494 if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
495 dev_change_flags(vlandev, flgs & ~IFF_UP);
496 netif_stacked_transfer_operstate(dev, vlandev);
497 }
498 break;
499
500 case NETDEV_UP:
501 /* Put all VLANs for this dev in the up state too. */
502 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
503 vlandev = vlan_group_get_device(grp, i);
504 if (!vlandev)
505 continue;
506
507 flgs = vlandev->flags;
508 if (flgs & IFF_UP)
509 continue;
510
511 vlan = vlan_dev_info(vlandev);
512 if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
513 dev_change_flags(vlandev, flgs | IFF_UP);
514 netif_stacked_transfer_operstate(dev, vlandev);
515 }
516 break;
517
518 case NETDEV_UNREGISTER:
519 /* Delete all VLANs for this dev. */
520 grp->killall = 1;
521
522 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
523 vlandev = vlan_group_get_device(grp, i);
524 if (!vlandev)
525 continue;
526
527 /* unregistration of last vlan destroys group, abort
528 * afterwards */
529 if (grp->nr_vlans == 1)
530 i = VLAN_GROUP_ARRAY_LEN;
531
532 unregister_vlan_dev(vlandev, &list);
533 }
534 unregister_netdevice_many(&list);
535 break;
536 }
537
538 out:
539 return NOTIFY_DONE;
540 }
541
542 static struct notifier_block vlan_notifier_block __read_mostly = {
543 .notifier_call = vlan_device_event,
544 };
545
546 /*
547 * VLAN IOCTL handler.
548 * o execute requested action or pass command to the device driver
549 * arg is really a struct vlan_ioctl_args __user *.
550 */
551 static int vlan_ioctl_handler(struct net *net, void __user *arg)
552 {
553 int err;
554 struct vlan_ioctl_args args;
555 struct net_device *dev = NULL;
556
557 if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args)))
558 return -EFAULT;
559
560 /* Null terminate this sucker, just in case. */
561 args.device1[23] = 0;
562 args.u.device2[23] = 0;
563
564 rtnl_lock();
565
566 switch (args.cmd) {
567 case SET_VLAN_INGRESS_PRIORITY_CMD:
568 case SET_VLAN_EGRESS_PRIORITY_CMD:
569 case SET_VLAN_FLAG_CMD:
570 case ADD_VLAN_CMD:
571 case DEL_VLAN_CMD:
572 case GET_VLAN_REALDEV_NAME_CMD:
573 case GET_VLAN_VID_CMD:
574 err = -ENODEV;
575 dev = __dev_get_by_name(net, args.device1);
576 if (!dev)
577 goto out;
578
579 err = -EINVAL;
580 if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev))
581 goto out;
582 }
583
584 switch (args.cmd) {
585 case SET_VLAN_INGRESS_PRIORITY_CMD:
586 err = -EPERM;
587 if (!capable(CAP_NET_ADMIN))
588 break;
589 vlan_dev_set_ingress_priority(dev,
590 args.u.skb_priority,
591 args.vlan_qos);
592 err = 0;
593 break;
594
595 case SET_VLAN_EGRESS_PRIORITY_CMD:
596 err = -EPERM;
597 if (!capable(CAP_NET_ADMIN))
598 break;
599 err = vlan_dev_set_egress_priority(dev,
600 args.u.skb_priority,
601 args.vlan_qos);
602 break;
603
604 case SET_VLAN_FLAG_CMD:
605 err = -EPERM;
606 if (!capable(CAP_NET_ADMIN))
607 break;
608 err = vlan_dev_change_flags(dev,
609 args.vlan_qos ? args.u.flag : 0,
610 args.u.flag);
611 break;
612
613 case SET_VLAN_NAME_TYPE_CMD:
614 err = -EPERM;
615 if (!capable(CAP_NET_ADMIN))
616 break;
617 if ((args.u.name_type >= 0) &&
618 (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
619 struct vlan_net *vn;
620
621 vn = net_generic(net, vlan_net_id);
622 vn->name_type = args.u.name_type;
623 err = 0;
624 } else {
625 err = -EINVAL;
626 }
627 break;
628
629 case ADD_VLAN_CMD:
630 err = -EPERM;
631 if (!capable(CAP_NET_ADMIN))
632 break;
633 err = register_vlan_device(dev, args.u.VID);
634 break;
635
636 case DEL_VLAN_CMD:
637 err = -EPERM;
638 if (!capable(CAP_NET_ADMIN))
639 break;
640 unregister_vlan_dev(dev, NULL);
641 err = 0;
642 break;
643
644 case GET_VLAN_REALDEV_NAME_CMD:
645 err = 0;
646 vlan_dev_get_realdev_name(dev, args.u.device2);
647 if (copy_to_user(arg, &args,
648 sizeof(struct vlan_ioctl_args)))
649 err = -EFAULT;
650 break;
651
652 case GET_VLAN_VID_CMD:
653 err = 0;
654 args.u.VID = vlan_dev_vlan_id(dev);
655 if (copy_to_user(arg, &args,
656 sizeof(struct vlan_ioctl_args)))
657 err = -EFAULT;
658 break;
659
660 default:
661 err = -EOPNOTSUPP;
662 break;
663 }
664 out:
665 rtnl_unlock();
666 return err;
667 }
668
669 static int __net_init vlan_init_net(struct net *net)
670 {
671 struct vlan_net *vn = net_generic(net, vlan_net_id);
672 int err;
673
674 vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD;
675
676 err = vlan_proc_init(net);
677
678 return err;
679 }
680
681 static void __net_exit vlan_exit_net(struct net *net)
682 {
683 vlan_proc_cleanup(net);
684 }
685
686 static struct pernet_operations vlan_net_ops = {
687 .init = vlan_init_net,
688 .exit = vlan_exit_net,
689 .id = &vlan_net_id,
690 .size = sizeof(struct vlan_net),
691 };
692
693 static int __init vlan_proto_init(void)
694 {
695 int err;
696
697 pr_info("%s v%s %s\n", vlan_fullname, vlan_version, vlan_copyright);
698 pr_info("All bugs added by %s\n", vlan_buggyright);
699
700 err = register_pernet_subsys(&vlan_net_ops);
701 if (err < 0)
702 goto err0;
703
704 err = register_netdevice_notifier(&vlan_notifier_block);
705 if (err < 0)
706 goto err2;
707
708 err = vlan_gvrp_init();
709 if (err < 0)
710 goto err3;
711
712 err = vlan_netlink_init();
713 if (err < 0)
714 goto err4;
715
716 dev_add_pack(&vlan_packet_type);
717 vlan_ioctl_set(vlan_ioctl_handler);
718 return 0;
719
720 err4:
721 vlan_gvrp_uninit();
722 err3:
723 unregister_netdevice_notifier(&vlan_notifier_block);
724 err2:
725 unregister_pernet_subsys(&vlan_net_ops);
726 err0:
727 return err;
728 }
729
730 static void __exit vlan_cleanup_module(void)
731 {
732 unsigned int i;
733
734 vlan_ioctl_set(NULL);
735 vlan_netlink_fini();
736
737 unregister_netdevice_notifier(&vlan_notifier_block);
738
739 dev_remove_pack(&vlan_packet_type);
740
741 /* This table must be empty if there are no module references left. */
742 for (i = 0; i < VLAN_GRP_HASH_SIZE; i++)
743 BUG_ON(!hlist_empty(&vlan_group_hash[i]));
744
745 unregister_pernet_subsys(&vlan_net_ops);
746 rcu_barrier(); /* Wait for completion of call_rcu()'s */
747
748 vlan_gvrp_uninit();
749 }
750
751 module_init(vlan_proto_init);
752 module_exit(vlan_cleanup_module);
753
754 MODULE_LICENSE("GPL");
755 MODULE_VERSION(DRV_VERSION);