]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/bridge/br_vlan.c
ipv6: Check ip6_find_1stfragopt() return value properly.
[mirror_ubuntu-zesty-kernel.git] / net / bridge / br_vlan.c
1 #include <linux/kernel.h>
2 #include <linux/netdevice.h>
3 #include <linux/rtnetlink.h>
4 #include <linux/slab.h>
5 #include <net/switchdev.h>
6
7 #include "br_private.h"
8
9 static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
10 const void *ptr)
11 {
12 const struct net_bridge_vlan *vle = ptr;
13 u16 vid = *(u16 *)arg->key;
14
15 return vle->vid != vid;
16 }
17
18 static const struct rhashtable_params br_vlan_rht_params = {
19 .head_offset = offsetof(struct net_bridge_vlan, vnode),
20 .key_offset = offsetof(struct net_bridge_vlan, vid),
21 .key_len = sizeof(u16),
22 .nelem_hint = 3,
23 .locks_mul = 1,
24 .max_size = VLAN_N_VID,
25 .obj_cmpfn = br_vlan_cmp,
26 .automatic_shrinking = true,
27 };
28
29 static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
30 {
31 return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
32 }
33
34 static void __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
35 {
36 if (vg->pvid == vid)
37 return;
38
39 smp_wmb();
40 vg->pvid = vid;
41 }
42
43 static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
44 {
45 if (vg->pvid != vid)
46 return;
47
48 smp_wmb();
49 vg->pvid = 0;
50 }
51
52 static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
53 {
54 struct net_bridge_vlan_group *vg;
55
56 if (br_vlan_is_master(v))
57 vg = br_vlan_group(v->br);
58 else
59 vg = nbp_vlan_group(v->port);
60
61 if (flags & BRIDGE_VLAN_INFO_PVID)
62 __vlan_add_pvid(vg, v->vid);
63 else
64 __vlan_delete_pvid(vg, v->vid);
65
66 if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
67 v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
68 else
69 v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
70 }
71
72 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
73 u16 vid, u16 flags)
74 {
75 struct switchdev_obj_port_vlan v = {
76 .obj.orig_dev = dev,
77 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
78 .flags = flags,
79 .vid_begin = vid,
80 .vid_end = vid,
81 };
82 int err;
83
84 /* Try switchdev op first. In case it is not supported, fallback to
85 * 8021q add.
86 */
87 err = switchdev_port_obj_add(dev, &v.obj);
88 if (err == -EOPNOTSUPP)
89 return vlan_vid_add(dev, br->vlan_proto, vid);
90 return err;
91 }
92
93 static void __vlan_add_list(struct net_bridge_vlan *v)
94 {
95 struct net_bridge_vlan_group *vg;
96 struct list_head *headp, *hpos;
97 struct net_bridge_vlan *vent;
98
99 if (br_vlan_is_master(v))
100 vg = br_vlan_group(v->br);
101 else
102 vg = nbp_vlan_group(v->port);
103
104 headp = &vg->vlan_list;
105 list_for_each_prev(hpos, headp) {
106 vent = list_entry(hpos, struct net_bridge_vlan, vlist);
107 if (v->vid < vent->vid)
108 continue;
109 else
110 break;
111 }
112 list_add_rcu(&v->vlist, hpos);
113 }
114
115 static void __vlan_del_list(struct net_bridge_vlan *v)
116 {
117 list_del_rcu(&v->vlist);
118 }
119
120 static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
121 u16 vid)
122 {
123 struct switchdev_obj_port_vlan v = {
124 .obj.orig_dev = dev,
125 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
126 .vid_begin = vid,
127 .vid_end = vid,
128 };
129 int err;
130
131 /* Try switchdev op first. In case it is not supported, fallback to
132 * 8021q del.
133 */
134 err = switchdev_port_obj_del(dev, &v.obj);
135 if (err == -EOPNOTSUPP) {
136 vlan_vid_del(dev, br->vlan_proto, vid);
137 return 0;
138 }
139 return err;
140 }
141
142 /* Returns a master vlan, if it didn't exist it gets created. In all cases a
143 * a reference is taken to the master vlan before returning.
144 */
145 static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
146 {
147 struct net_bridge_vlan_group *vg;
148 struct net_bridge_vlan *masterv;
149
150 vg = br_vlan_group(br);
151 masterv = br_vlan_find(vg, vid);
152 if (!masterv) {
153 /* missing global ctx, create it now */
154 if (br_vlan_add(br, vid, 0))
155 return NULL;
156 masterv = br_vlan_find(vg, vid);
157 if (WARN_ON(!masterv))
158 return NULL;
159 }
160 atomic_inc(&masterv->refcnt);
161
162 return masterv;
163 }
164
165 static void br_master_vlan_rcu_free(struct rcu_head *rcu)
166 {
167 struct net_bridge_vlan *v;
168
169 v = container_of(rcu, struct net_bridge_vlan, rcu);
170 WARN_ON(!br_vlan_is_master(v));
171 free_percpu(v->stats);
172 v->stats = NULL;
173 kfree(v);
174 }
175
176 static void br_vlan_put_master(struct net_bridge_vlan *masterv)
177 {
178 struct net_bridge_vlan_group *vg;
179
180 if (!br_vlan_is_master(masterv))
181 return;
182
183 vg = br_vlan_group(masterv->br);
184 if (atomic_dec_and_test(&masterv->refcnt)) {
185 rhashtable_remove_fast(&vg->vlan_hash,
186 &masterv->vnode, br_vlan_rht_params);
187 __vlan_del_list(masterv);
188 call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
189 }
190 }
191
192 /* This is the shared VLAN add function which works for both ports and bridge
193 * devices. There are four possible calls to this function in terms of the
194 * vlan entry type:
195 * 1. vlan is being added on a port (no master flags, global entry exists)
196 * 2. vlan is being added on a bridge (both master and brentry flags)
197 * 3. vlan is being added on a port, but a global entry didn't exist which
198 * is being created right now (master flag set, brentry flag unset), the
199 * global entry is used for global per-vlan features, but not for filtering
200 * 4. same as 3 but with both master and brentry flags set so the entry
201 * will be used for filtering in both the port and the bridge
202 */
203 static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
204 {
205 struct net_bridge_vlan *masterv = NULL;
206 struct net_bridge_port *p = NULL;
207 struct net_bridge_vlan_group *vg;
208 struct net_device *dev;
209 struct net_bridge *br;
210 int err;
211
212 if (br_vlan_is_master(v)) {
213 br = v->br;
214 dev = br->dev;
215 vg = br_vlan_group(br);
216 } else {
217 p = v->port;
218 br = p->br;
219 dev = p->dev;
220 vg = nbp_vlan_group(p);
221 }
222
223 if (p) {
224 /* Add VLAN to the device filter if it is supported.
225 * This ensures tagged traffic enters the bridge when
226 * promiscuous mode is disabled by br_manage_promisc().
227 */
228 err = __vlan_vid_add(dev, br, v->vid, flags);
229 if (err)
230 goto out;
231
232 /* need to work on the master vlan too */
233 if (flags & BRIDGE_VLAN_INFO_MASTER) {
234 err = br_vlan_add(br, v->vid, flags |
235 BRIDGE_VLAN_INFO_BRENTRY);
236 if (err)
237 goto out_filt;
238 }
239
240 masterv = br_vlan_get_master(br, v->vid);
241 if (!masterv)
242 goto out_filt;
243 v->brvlan = masterv;
244 v->stats = masterv->stats;
245 }
246
247 /* Add the dev mac and count the vlan only if it's usable */
248 if (br_vlan_should_use(v)) {
249 err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
250 if (err) {
251 br_err(br, "failed insert local address into bridge forwarding table\n");
252 goto out_filt;
253 }
254 vg->num_vlans++;
255 }
256
257 err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
258 br_vlan_rht_params);
259 if (err)
260 goto out_fdb_insert;
261
262 __vlan_add_list(v);
263 __vlan_add_flags(v, flags);
264 out:
265 return err;
266
267 out_fdb_insert:
268 if (br_vlan_should_use(v)) {
269 br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
270 vg->num_vlans--;
271 }
272
273 out_filt:
274 if (p) {
275 __vlan_vid_del(dev, br, v->vid);
276 if (masterv) {
277 br_vlan_put_master(masterv);
278 v->brvlan = NULL;
279 }
280 }
281
282 goto out;
283 }
284
285 static int __vlan_del(struct net_bridge_vlan *v)
286 {
287 struct net_bridge_vlan *masterv = v;
288 struct net_bridge_vlan_group *vg;
289 struct net_bridge_port *p = NULL;
290 int err = 0;
291
292 if (br_vlan_is_master(v)) {
293 vg = br_vlan_group(v->br);
294 } else {
295 p = v->port;
296 vg = nbp_vlan_group(v->port);
297 masterv = v->brvlan;
298 }
299
300 __vlan_delete_pvid(vg, v->vid);
301 if (p) {
302 err = __vlan_vid_del(p->dev, p->br, v->vid);
303 if (err)
304 goto out;
305 }
306
307 if (br_vlan_should_use(v)) {
308 v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
309 vg->num_vlans--;
310 }
311
312 if (masterv != v) {
313 rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
314 br_vlan_rht_params);
315 __vlan_del_list(v);
316 kfree_rcu(v, rcu);
317 }
318
319 br_vlan_put_master(masterv);
320 out:
321 return err;
322 }
323
324 static void __vlan_group_free(struct net_bridge_vlan_group *vg)
325 {
326 WARN_ON(!list_empty(&vg->vlan_list));
327 rhashtable_destroy(&vg->vlan_hash);
328 kfree(vg);
329 }
330
331 static void __vlan_flush(struct net_bridge_vlan_group *vg)
332 {
333 struct net_bridge_vlan *vlan, *tmp;
334
335 __vlan_delete_pvid(vg, vg->pvid);
336 list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
337 __vlan_del(vlan);
338 }
339
340 struct sk_buff *br_handle_vlan(struct net_bridge *br,
341 struct net_bridge_vlan_group *vg,
342 struct sk_buff *skb)
343 {
344 struct br_vlan_stats *stats;
345 struct net_bridge_vlan *v;
346 u16 vid;
347
348 /* If this packet was not filtered at input, let it pass */
349 if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
350 goto out;
351
352 /* At this point, we know that the frame was filtered and contains
353 * a valid vlan id. If the vlan id has untagged flag set,
354 * send untagged; otherwise, send tagged.
355 */
356 br_vlan_get_tag(skb, &vid);
357 v = br_vlan_find(vg, vid);
358 /* Vlan entry must be configured at this point. The
359 * only exception is the bridge is set in promisc mode and the
360 * packet is destined for the bridge device. In this case
361 * pass the packet as is.
362 */
363 if (!v || !br_vlan_should_use(v)) {
364 if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
365 goto out;
366 } else {
367 kfree_skb(skb);
368 return NULL;
369 }
370 }
371 if (br->vlan_stats_enabled) {
372 stats = this_cpu_ptr(v->stats);
373 u64_stats_update_begin(&stats->syncp);
374 stats->tx_bytes += skb->len;
375 stats->tx_packets++;
376 u64_stats_update_end(&stats->syncp);
377 }
378
379 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
380 skb->vlan_tci = 0;
381 out:
382 return skb;
383 }
384
385 /* Called under RCU */
386 static bool __allowed_ingress(const struct net_bridge *br,
387 struct net_bridge_vlan_group *vg,
388 struct sk_buff *skb, u16 *vid)
389 {
390 struct br_vlan_stats *stats;
391 struct net_bridge_vlan *v;
392 bool tagged;
393
394 BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
395 /* If vlan tx offload is disabled on bridge device and frame was
396 * sent from vlan device on the bridge device, it does not have
397 * HW accelerated vlan tag.
398 */
399 if (unlikely(!skb_vlan_tag_present(skb) &&
400 skb->protocol == br->vlan_proto)) {
401 skb = skb_vlan_untag(skb);
402 if (unlikely(!skb))
403 return false;
404 }
405
406 if (!br_vlan_get_tag(skb, vid)) {
407 /* Tagged frame */
408 if (skb->vlan_proto != br->vlan_proto) {
409 /* Protocol-mismatch, empty out vlan_tci for new tag */
410 skb_push(skb, ETH_HLEN);
411 skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
412 skb_vlan_tag_get(skb));
413 if (unlikely(!skb))
414 return false;
415
416 skb_pull(skb, ETH_HLEN);
417 skb_reset_mac_len(skb);
418 *vid = 0;
419 tagged = false;
420 } else {
421 tagged = true;
422 }
423 } else {
424 /* Untagged frame */
425 tagged = false;
426 }
427
428 if (!*vid) {
429 u16 pvid = br_get_pvid(vg);
430
431 /* Frame had a tag with VID 0 or did not have a tag.
432 * See if pvid is set on this port. That tells us which
433 * vlan untagged or priority-tagged traffic belongs to.
434 */
435 if (!pvid)
436 goto drop;
437
438 /* PVID is set on this port. Any untagged or priority-tagged
439 * ingress frame is considered to belong to this vlan.
440 */
441 *vid = pvid;
442 if (likely(!tagged))
443 /* Untagged Frame. */
444 __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
445 else
446 /* Priority-tagged Frame.
447 * At this point, We know that skb->vlan_tci had
448 * VLAN_TAG_PRESENT bit and its VID field was 0x000.
449 * We update only VID field and preserve PCP field.
450 */
451 skb->vlan_tci |= pvid;
452
453 /* if stats are disabled we can avoid the lookup */
454 if (!br->vlan_stats_enabled)
455 return true;
456 }
457 v = br_vlan_find(vg, *vid);
458 if (!v || !br_vlan_should_use(v))
459 goto drop;
460
461 if (br->vlan_stats_enabled) {
462 stats = this_cpu_ptr(v->stats);
463 u64_stats_update_begin(&stats->syncp);
464 stats->rx_bytes += skb->len;
465 stats->rx_packets++;
466 u64_stats_update_end(&stats->syncp);
467 }
468
469 return true;
470
471 drop:
472 kfree_skb(skb);
473 return false;
474 }
475
476 bool br_allowed_ingress(const struct net_bridge *br,
477 struct net_bridge_vlan_group *vg, struct sk_buff *skb,
478 u16 *vid)
479 {
480 /* If VLAN filtering is disabled on the bridge, all packets are
481 * permitted.
482 */
483 if (!br->vlan_enabled) {
484 BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
485 return true;
486 }
487
488 return __allowed_ingress(br, vg, skb, vid);
489 }
490
491 /* Called under RCU. */
492 bool br_allowed_egress(struct net_bridge_vlan_group *vg,
493 const struct sk_buff *skb)
494 {
495 const struct net_bridge_vlan *v;
496 u16 vid;
497
498 /* If this packet was not filtered at input, let it pass */
499 if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
500 return true;
501
502 br_vlan_get_tag(skb, &vid);
503 v = br_vlan_find(vg, vid);
504 if (v && br_vlan_should_use(v))
505 return true;
506
507 return false;
508 }
509
510 /* Called under RCU */
511 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
512 {
513 struct net_bridge_vlan_group *vg;
514 struct net_bridge *br = p->br;
515
516 /* If filtering was disabled at input, let it pass. */
517 if (!br->vlan_enabled)
518 return true;
519
520 vg = nbp_vlan_group_rcu(p);
521 if (!vg || !vg->num_vlans)
522 return false;
523
524 if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
525 *vid = 0;
526
527 if (!*vid) {
528 *vid = br_get_pvid(vg);
529 if (!*vid)
530 return false;
531
532 return true;
533 }
534
535 if (br_vlan_find(vg, *vid))
536 return true;
537
538 return false;
539 }
540
541 /* Must be protected by RTNL.
542 * Must be called with vid in range from 1 to 4094 inclusive.
543 */
544 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
545 {
546 struct net_bridge_vlan_group *vg;
547 struct net_bridge_vlan *vlan;
548 int ret;
549
550 ASSERT_RTNL();
551
552 vg = br_vlan_group(br);
553 vlan = br_vlan_find(vg, vid);
554 if (vlan) {
555 if (!br_vlan_is_brentry(vlan)) {
556 /* Trying to change flags of non-existent bridge vlan */
557 if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
558 return -EINVAL;
559 /* It was only kept for port vlans, now make it real */
560 ret = br_fdb_insert(br, NULL, br->dev->dev_addr,
561 vlan->vid);
562 if (ret) {
563 br_err(br, "failed insert local address into bridge forwarding table\n");
564 return ret;
565 }
566 atomic_inc(&vlan->refcnt);
567 vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
568 vg->num_vlans++;
569 }
570 __vlan_add_flags(vlan, flags);
571 return 0;
572 }
573
574 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
575 if (!vlan)
576 return -ENOMEM;
577
578 vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
579 if (!vlan->stats) {
580 kfree(vlan);
581 return -ENOMEM;
582 }
583 vlan->vid = vid;
584 vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
585 vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
586 vlan->br = br;
587 if (flags & BRIDGE_VLAN_INFO_BRENTRY)
588 atomic_set(&vlan->refcnt, 1);
589 ret = __vlan_add(vlan, flags);
590 if (ret) {
591 free_percpu(vlan->stats);
592 kfree(vlan);
593 }
594
595 return ret;
596 }
597
598 /* Must be protected by RTNL.
599 * Must be called with vid in range from 1 to 4094 inclusive.
600 */
601 int br_vlan_delete(struct net_bridge *br, u16 vid)
602 {
603 struct net_bridge_vlan_group *vg;
604 struct net_bridge_vlan *v;
605
606 ASSERT_RTNL();
607
608 vg = br_vlan_group(br);
609 v = br_vlan_find(vg, vid);
610 if (!v || !br_vlan_is_brentry(v))
611 return -ENOENT;
612
613 br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
614 br_fdb_delete_by_port(br, NULL, vid, 0);
615
616 return __vlan_del(v);
617 }
618
619 void br_vlan_flush(struct net_bridge *br)
620 {
621 struct net_bridge_vlan_group *vg;
622
623 ASSERT_RTNL();
624
625 vg = br_vlan_group(br);
626 __vlan_flush(vg);
627 RCU_INIT_POINTER(br->vlgrp, NULL);
628 synchronize_rcu();
629 __vlan_group_free(vg);
630 }
631
632 struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
633 {
634 if (!vg)
635 return NULL;
636
637 return br_vlan_lookup(&vg->vlan_hash, vid);
638 }
639
640 /* Must be protected by RTNL. */
641 static void recalculate_group_addr(struct net_bridge *br)
642 {
643 if (br->group_addr_set)
644 return;
645
646 spin_lock_bh(&br->lock);
647 if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
648 /* Bridge Group Address */
649 br->group_addr[5] = 0x00;
650 } else { /* vlan_enabled && ETH_P_8021AD */
651 /* Provider Bridge Group Address */
652 br->group_addr[5] = 0x08;
653 }
654 spin_unlock_bh(&br->lock);
655 }
656
657 /* Must be protected by RTNL. */
658 void br_recalculate_fwd_mask(struct net_bridge *br)
659 {
660 if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
661 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
662 else /* vlan_enabled && ETH_P_8021AD */
663 br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
664 ~(1u << br->group_addr[5]);
665 }
666
667 int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
668 {
669 struct switchdev_attr attr = {
670 .orig_dev = br->dev,
671 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
672 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
673 .u.vlan_filtering = val,
674 };
675 int err;
676
677 if (br->vlan_enabled == val)
678 return 0;
679
680 err = switchdev_port_attr_set(br->dev, &attr);
681 if (err && err != -EOPNOTSUPP)
682 return err;
683
684 br->vlan_enabled = val;
685 br_manage_promisc(br);
686 recalculate_group_addr(br);
687 br_recalculate_fwd_mask(br);
688
689 return 0;
690 }
691
692 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
693 {
694 return __br_vlan_filter_toggle(br, val);
695 }
696
697 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
698 {
699 int err = 0;
700 struct net_bridge_port *p;
701 struct net_bridge_vlan *vlan;
702 struct net_bridge_vlan_group *vg;
703 __be16 oldproto;
704
705 if (br->vlan_proto == proto)
706 return 0;
707
708 /* Add VLANs for the new proto to the device filter. */
709 list_for_each_entry(p, &br->port_list, list) {
710 vg = nbp_vlan_group(p);
711 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
712 err = vlan_vid_add(p->dev, proto, vlan->vid);
713 if (err)
714 goto err_filt;
715 }
716 }
717
718 oldproto = br->vlan_proto;
719 br->vlan_proto = proto;
720
721 recalculate_group_addr(br);
722 br_recalculate_fwd_mask(br);
723
724 /* Delete VLANs for the old proto from the device filter. */
725 list_for_each_entry(p, &br->port_list, list) {
726 vg = nbp_vlan_group(p);
727 list_for_each_entry(vlan, &vg->vlan_list, vlist)
728 vlan_vid_del(p->dev, oldproto, vlan->vid);
729 }
730
731 return 0;
732
733 err_filt:
734 list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
735 vlan_vid_del(p->dev, proto, vlan->vid);
736
737 list_for_each_entry_continue_reverse(p, &br->port_list, list) {
738 vg = nbp_vlan_group(p);
739 list_for_each_entry(vlan, &vg->vlan_list, vlist)
740 vlan_vid_del(p->dev, proto, vlan->vid);
741 }
742
743 return err;
744 }
745
746 int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
747 {
748 if (val != ETH_P_8021Q && val != ETH_P_8021AD)
749 return -EPROTONOSUPPORT;
750
751 return __br_vlan_set_proto(br, htons(val));
752 }
753
754 int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
755 {
756 switch (val) {
757 case 0:
758 case 1:
759 br->vlan_stats_enabled = val;
760 break;
761 default:
762 return -EINVAL;
763 }
764
765 return 0;
766 }
767
768 static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
769 {
770 struct net_bridge_vlan *v;
771
772 if (vid != vg->pvid)
773 return false;
774
775 v = br_vlan_lookup(&vg->vlan_hash, vid);
776 if (v && br_vlan_should_use(v) &&
777 (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
778 return true;
779
780 return false;
781 }
782
783 static void br_vlan_disable_default_pvid(struct net_bridge *br)
784 {
785 struct net_bridge_port *p;
786 u16 pvid = br->default_pvid;
787
788 /* Disable default_pvid on all ports where it is still
789 * configured.
790 */
791 if (vlan_default_pvid(br_vlan_group(br), pvid))
792 br_vlan_delete(br, pvid);
793
794 list_for_each_entry(p, &br->port_list, list) {
795 if (vlan_default_pvid(nbp_vlan_group(p), pvid))
796 nbp_vlan_delete(p, pvid);
797 }
798
799 br->default_pvid = 0;
800 }
801
802 int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
803 {
804 const struct net_bridge_vlan *pvent;
805 struct net_bridge_vlan_group *vg;
806 struct net_bridge_port *p;
807 u16 old_pvid;
808 int err = 0;
809 unsigned long *changed;
810
811 if (!pvid) {
812 br_vlan_disable_default_pvid(br);
813 return 0;
814 }
815
816 changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
817 GFP_KERNEL);
818 if (!changed)
819 return -ENOMEM;
820
821 old_pvid = br->default_pvid;
822
823 /* Update default_pvid config only if we do not conflict with
824 * user configuration.
825 */
826 vg = br_vlan_group(br);
827 pvent = br_vlan_find(vg, pvid);
828 if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
829 (!pvent || !br_vlan_should_use(pvent))) {
830 err = br_vlan_add(br, pvid,
831 BRIDGE_VLAN_INFO_PVID |
832 BRIDGE_VLAN_INFO_UNTAGGED |
833 BRIDGE_VLAN_INFO_BRENTRY);
834 if (err)
835 goto out;
836 br_vlan_delete(br, old_pvid);
837 set_bit(0, changed);
838 }
839
840 list_for_each_entry(p, &br->port_list, list) {
841 /* Update default_pvid config only if we do not conflict with
842 * user configuration.
843 */
844 vg = nbp_vlan_group(p);
845 if ((old_pvid &&
846 !vlan_default_pvid(vg, old_pvid)) ||
847 br_vlan_find(vg, pvid))
848 continue;
849
850 err = nbp_vlan_add(p, pvid,
851 BRIDGE_VLAN_INFO_PVID |
852 BRIDGE_VLAN_INFO_UNTAGGED);
853 if (err)
854 goto err_port;
855 nbp_vlan_delete(p, old_pvid);
856 set_bit(p->port_no, changed);
857 }
858
859 br->default_pvid = pvid;
860
861 out:
862 kfree(changed);
863 return err;
864
865 err_port:
866 list_for_each_entry_continue_reverse(p, &br->port_list, list) {
867 if (!test_bit(p->port_no, changed))
868 continue;
869
870 if (old_pvid)
871 nbp_vlan_add(p, old_pvid,
872 BRIDGE_VLAN_INFO_PVID |
873 BRIDGE_VLAN_INFO_UNTAGGED);
874 nbp_vlan_delete(p, pvid);
875 }
876
877 if (test_bit(0, changed)) {
878 if (old_pvid)
879 br_vlan_add(br, old_pvid,
880 BRIDGE_VLAN_INFO_PVID |
881 BRIDGE_VLAN_INFO_UNTAGGED |
882 BRIDGE_VLAN_INFO_BRENTRY);
883 br_vlan_delete(br, pvid);
884 }
885 goto out;
886 }
887
888 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
889 {
890 u16 pvid = val;
891 int err = 0;
892
893 if (val >= VLAN_VID_MASK)
894 return -EINVAL;
895
896 if (pvid == br->default_pvid)
897 goto out;
898
899 /* Only allow default pvid change when filtering is disabled */
900 if (br->vlan_enabled) {
901 pr_info_once("Please disable vlan filtering to change default_pvid\n");
902 err = -EPERM;
903 goto out;
904 }
905 err = __br_vlan_set_default_pvid(br, pvid);
906 out:
907 return err;
908 }
909
910 int br_vlan_init(struct net_bridge *br)
911 {
912 struct net_bridge_vlan_group *vg;
913 int ret = -ENOMEM;
914
915 vg = kzalloc(sizeof(*vg), GFP_KERNEL);
916 if (!vg)
917 goto out;
918 ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
919 if (ret)
920 goto err_rhtbl;
921 INIT_LIST_HEAD(&vg->vlan_list);
922 br->vlan_proto = htons(ETH_P_8021Q);
923 br->default_pvid = 1;
924 rcu_assign_pointer(br->vlgrp, vg);
925 ret = br_vlan_add(br, 1,
926 BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
927 BRIDGE_VLAN_INFO_BRENTRY);
928 if (ret)
929 goto err_vlan_add;
930
931 out:
932 return ret;
933
934 err_vlan_add:
935 rhashtable_destroy(&vg->vlan_hash);
936 err_rhtbl:
937 kfree(vg);
938
939 goto out;
940 }
941
942 int nbp_vlan_init(struct net_bridge_port *p)
943 {
944 struct switchdev_attr attr = {
945 .orig_dev = p->br->dev,
946 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
947 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
948 .u.vlan_filtering = p->br->vlan_enabled,
949 };
950 struct net_bridge_vlan_group *vg;
951 int ret = -ENOMEM;
952
953 vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
954 if (!vg)
955 goto out;
956
957 ret = switchdev_port_attr_set(p->dev, &attr);
958 if (ret && ret != -EOPNOTSUPP)
959 goto err_vlan_enabled;
960
961 ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
962 if (ret)
963 goto err_rhtbl;
964 INIT_LIST_HEAD(&vg->vlan_list);
965 rcu_assign_pointer(p->vlgrp, vg);
966 if (p->br->default_pvid) {
967 ret = nbp_vlan_add(p, p->br->default_pvid,
968 BRIDGE_VLAN_INFO_PVID |
969 BRIDGE_VLAN_INFO_UNTAGGED);
970 if (ret)
971 goto err_vlan_add;
972 }
973 out:
974 return ret;
975
976 err_vlan_add:
977 RCU_INIT_POINTER(p->vlgrp, NULL);
978 synchronize_rcu();
979 rhashtable_destroy(&vg->vlan_hash);
980 err_vlan_enabled:
981 err_rhtbl:
982 kfree(vg);
983
984 goto out;
985 }
986
987 /* Must be protected by RTNL.
988 * Must be called with vid in range from 1 to 4094 inclusive.
989 */
990 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
991 {
992 struct switchdev_obj_port_vlan v = {
993 .obj.orig_dev = port->dev,
994 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
995 .flags = flags,
996 .vid_begin = vid,
997 .vid_end = vid,
998 };
999 struct net_bridge_vlan *vlan;
1000 int ret;
1001
1002 ASSERT_RTNL();
1003
1004 vlan = br_vlan_find(nbp_vlan_group(port), vid);
1005 if (vlan) {
1006 /* Pass the flags to the hardware bridge */
1007 ret = switchdev_port_obj_add(port->dev, &v.obj);
1008 if (ret && ret != -EOPNOTSUPP)
1009 return ret;
1010 __vlan_add_flags(vlan, flags);
1011 return 0;
1012 }
1013
1014 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1015 if (!vlan)
1016 return -ENOMEM;
1017
1018 vlan->vid = vid;
1019 vlan->port = port;
1020 ret = __vlan_add(vlan, flags);
1021 if (ret)
1022 kfree(vlan);
1023
1024 return ret;
1025 }
1026
1027 /* Must be protected by RTNL.
1028 * Must be called with vid in range from 1 to 4094 inclusive.
1029 */
1030 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
1031 {
1032 struct net_bridge_vlan *v;
1033
1034 ASSERT_RTNL();
1035
1036 v = br_vlan_find(nbp_vlan_group(port), vid);
1037 if (!v)
1038 return -ENOENT;
1039 br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1040 br_fdb_delete_by_port(port->br, port, vid, 0);
1041
1042 return __vlan_del(v);
1043 }
1044
1045 void nbp_vlan_flush(struct net_bridge_port *port)
1046 {
1047 struct net_bridge_vlan_group *vg;
1048
1049 ASSERT_RTNL();
1050
1051 vg = nbp_vlan_group(port);
1052 __vlan_flush(vg);
1053 RCU_INIT_POINTER(port->vlgrp, NULL);
1054 synchronize_rcu();
1055 __vlan_group_free(vg);
1056 }
1057
1058 void br_vlan_get_stats(const struct net_bridge_vlan *v,
1059 struct br_vlan_stats *stats)
1060 {
1061 int i;
1062
1063 memset(stats, 0, sizeof(*stats));
1064 for_each_possible_cpu(i) {
1065 u64 rxpackets, rxbytes, txpackets, txbytes;
1066 struct br_vlan_stats *cpu_stats;
1067 unsigned int start;
1068
1069 cpu_stats = per_cpu_ptr(v->stats, i);
1070 do {
1071 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1072 rxpackets = cpu_stats->rx_packets;
1073 rxbytes = cpu_stats->rx_bytes;
1074 txbytes = cpu_stats->tx_bytes;
1075 txpackets = cpu_stats->tx_packets;
1076 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1077
1078 stats->rx_packets += rxpackets;
1079 stats->rx_bytes += rxbytes;
1080 stats->tx_bytes += txbytes;
1081 stats->tx_packets += txpackets;
1082 }
1083 }