]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bridge/br_if.c
Merge tag 'for-linus-4.15-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-bionic-kernel.git] / net / bridge / br_if.c
1 /*
2 * Userspace interface
3 * Linux ethernet bridge
4 *
5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 #include <linux/kernel.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/netpoll.h>
18 #include <linux/ethtool.h>
19 #include <linux/if_arp.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/if_ether.h>
24 #include <linux/slab.h>
25 #include <net/dsa.h>
26 #include <net/sock.h>
27 #include <linux/if_vlan.h>
28 #include <net/switchdev.h>
29
30 #include "br_private.h"
31
32 /*
33 * Determine initial path cost based on speed.
34 * using recommendations from 802.1d standard
35 *
36 * Since driver might sleep need to not be holding any locks.
37 */
38 static int port_cost(struct net_device *dev)
39 {
40 struct ethtool_link_ksettings ecmd;
41
42 if (!__ethtool_get_link_ksettings(dev, &ecmd)) {
43 switch (ecmd.base.speed) {
44 case SPEED_10000:
45 return 2;
46 case SPEED_1000:
47 return 4;
48 case SPEED_100:
49 return 19;
50 case SPEED_10:
51 return 100;
52 }
53 }
54
55 /* Old silly heuristics based on name */
56 if (!strncmp(dev->name, "lec", 3))
57 return 7;
58
59 if (!strncmp(dev->name, "plip", 4))
60 return 2500;
61
62 return 100; /* assume old 10Mbps */
63 }
64
65
66 /* Check for port carrier transitions. */
67 void br_port_carrier_check(struct net_bridge_port *p)
68 {
69 struct net_device *dev = p->dev;
70 struct net_bridge *br = p->br;
71
72 if (!(p->flags & BR_ADMIN_COST) &&
73 netif_running(dev) && netif_oper_up(dev))
74 p->path_cost = port_cost(dev);
75
76 if (!netif_running(br->dev))
77 return;
78
79 spin_lock_bh(&br->lock);
80 if (netif_running(dev) && netif_oper_up(dev)) {
81 if (p->state == BR_STATE_DISABLED)
82 br_stp_enable_port(p);
83 } else {
84 if (p->state != BR_STATE_DISABLED)
85 br_stp_disable_port(p);
86 }
87 spin_unlock_bh(&br->lock);
88 }
89
90 static void br_port_set_promisc(struct net_bridge_port *p)
91 {
92 int err = 0;
93
94 if (br_promisc_port(p))
95 return;
96
97 err = dev_set_promiscuity(p->dev, 1);
98 if (err)
99 return;
100
101 br_fdb_unsync_static(p->br, p);
102 p->flags |= BR_PROMISC;
103 }
104
105 static void br_port_clear_promisc(struct net_bridge_port *p)
106 {
107 int err;
108
109 /* Check if the port is already non-promisc or if it doesn't
110 * support UNICAST filtering. Without unicast filtering support
111 * we'll end up re-enabling promisc mode anyway, so just check for
112 * it here.
113 */
114 if (!br_promisc_port(p) || !(p->dev->priv_flags & IFF_UNICAST_FLT))
115 return;
116
117 /* Since we'll be clearing the promisc mode, program the port
118 * first so that we don't have interruption in traffic.
119 */
120 err = br_fdb_sync_static(p->br, p);
121 if (err)
122 return;
123
124 dev_set_promiscuity(p->dev, -1);
125 p->flags &= ~BR_PROMISC;
126 }
127
128 /* When a port is added or removed or when certain port flags
129 * change, this function is called to automatically manage
130 * promiscuity setting of all the bridge ports. We are always called
131 * under RTNL so can skip using rcu primitives.
132 */
133 void br_manage_promisc(struct net_bridge *br)
134 {
135 struct net_bridge_port *p;
136 bool set_all = false;
137
138 /* If vlan filtering is disabled or bridge interface is placed
139 * into promiscuous mode, place all ports in promiscuous mode.
140 */
141 if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br->dev))
142 set_all = true;
143
144 list_for_each_entry(p, &br->port_list, list) {
145 if (set_all) {
146 br_port_set_promisc(p);
147 } else {
148 /* If the number of auto-ports is <= 1, then all other
149 * ports will have their output configuration
150 * statically specified through fdbs. Since ingress
151 * on the auto-port becomes forwarding/egress to other
152 * ports and egress configuration is statically known,
153 * we can say that ingress configuration of the
154 * auto-port is also statically known.
155 * This lets us disable promiscuous mode and write
156 * this config to hw.
157 */
158 if (br->auto_cnt == 0 ||
159 (br->auto_cnt == 1 && br_auto_port(p)))
160 br_port_clear_promisc(p);
161 else
162 br_port_set_promisc(p);
163 }
164 }
165 }
166
167 static void nbp_update_port_count(struct net_bridge *br)
168 {
169 struct net_bridge_port *p;
170 u32 cnt = 0;
171
172 list_for_each_entry(p, &br->port_list, list) {
173 if (br_auto_port(p))
174 cnt++;
175 }
176 if (br->auto_cnt != cnt) {
177 br->auto_cnt = cnt;
178 br_manage_promisc(br);
179 }
180 }
181
182 static void nbp_delete_promisc(struct net_bridge_port *p)
183 {
184 /* If port is currently promiscuous, unset promiscuity.
185 * Otherwise, it is a static port so remove all addresses
186 * from it.
187 */
188 dev_set_allmulti(p->dev, -1);
189 if (br_promisc_port(p))
190 dev_set_promiscuity(p->dev, -1);
191 else
192 br_fdb_unsync_static(p->br, p);
193 }
194
195 static void release_nbp(struct kobject *kobj)
196 {
197 struct net_bridge_port *p
198 = container_of(kobj, struct net_bridge_port, kobj);
199 kfree(p);
200 }
201
202 static struct kobj_type brport_ktype = {
203 #ifdef CONFIG_SYSFS
204 .sysfs_ops = &brport_sysfs_ops,
205 #endif
206 .release = release_nbp,
207 };
208
209 static void destroy_nbp(struct net_bridge_port *p)
210 {
211 struct net_device *dev = p->dev;
212
213 p->br = NULL;
214 p->dev = NULL;
215 dev_put(dev);
216
217 kobject_put(&p->kobj);
218 }
219
220 static void destroy_nbp_rcu(struct rcu_head *head)
221 {
222 struct net_bridge_port *p =
223 container_of(head, struct net_bridge_port, rcu);
224 destroy_nbp(p);
225 }
226
227 static unsigned get_max_headroom(struct net_bridge *br)
228 {
229 unsigned max_headroom = 0;
230 struct net_bridge_port *p;
231
232 list_for_each_entry(p, &br->port_list, list) {
233 unsigned dev_headroom = netdev_get_fwd_headroom(p->dev);
234
235 if (dev_headroom > max_headroom)
236 max_headroom = dev_headroom;
237 }
238
239 return max_headroom;
240 }
241
242 static void update_headroom(struct net_bridge *br, int new_hr)
243 {
244 struct net_bridge_port *p;
245
246 list_for_each_entry(p, &br->port_list, list)
247 netdev_set_rx_headroom(p->dev, new_hr);
248
249 br->dev->needed_headroom = new_hr;
250 }
251
252 /* Delete port(interface) from bridge is done in two steps.
253 * via RCU. First step, marks device as down. That deletes
254 * all the timers and stops new packets from flowing through.
255 *
256 * Final cleanup doesn't occur until after all CPU's finished
257 * processing packets.
258 *
259 * Protected from multiple admin operations by RTNL mutex
260 */
261 static void del_nbp(struct net_bridge_port *p)
262 {
263 struct net_bridge *br = p->br;
264 struct net_device *dev = p->dev;
265
266 sysfs_remove_link(br->ifobj, p->dev->name);
267
268 nbp_delete_promisc(p);
269
270 spin_lock_bh(&br->lock);
271 br_stp_disable_port(p);
272 spin_unlock_bh(&br->lock);
273
274 br_ifinfo_notify(RTM_DELLINK, NULL, p);
275
276 list_del_rcu(&p->list);
277 if (netdev_get_fwd_headroom(dev) == br->dev->needed_headroom)
278 update_headroom(br, get_max_headroom(br));
279 netdev_reset_rx_headroom(dev);
280
281 nbp_vlan_flush(p);
282 br_fdb_delete_by_port(br, p, 0, 1);
283 switchdev_deferred_process();
284
285 nbp_update_port_count(br);
286
287 netdev_upper_dev_unlink(dev, br->dev);
288
289 dev->priv_flags &= ~IFF_BRIDGE_PORT;
290
291 netdev_rx_handler_unregister(dev);
292
293 br_multicast_del_port(p);
294
295 kobject_uevent(&p->kobj, KOBJ_REMOVE);
296 kobject_del(&p->kobj);
297
298 br_netpoll_disable(p);
299
300 call_rcu(&p->rcu, destroy_nbp_rcu);
301 }
302
303 /* Delete bridge device */
304 void br_dev_delete(struct net_device *dev, struct list_head *head)
305 {
306 struct net_bridge *br = netdev_priv(dev);
307 struct net_bridge_port *p, *n;
308
309 list_for_each_entry_safe(p, n, &br->port_list, list) {
310 del_nbp(p);
311 }
312
313 br_recalculate_neigh_suppress_enabled(br);
314
315 br_fdb_delete_by_port(br, NULL, 0, 1);
316
317 cancel_delayed_work_sync(&br->gc_work);
318
319 br_sysfs_delbr(br->dev);
320 unregister_netdevice_queue(br->dev, head);
321 }
322
323 /* find an available port number */
324 static int find_portno(struct net_bridge *br)
325 {
326 int index;
327 struct net_bridge_port *p;
328 unsigned long *inuse;
329
330 inuse = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
331 GFP_KERNEL);
332 if (!inuse)
333 return -ENOMEM;
334
335 set_bit(0, inuse); /* zero is reserved */
336 list_for_each_entry(p, &br->port_list, list) {
337 set_bit(p->port_no, inuse);
338 }
339 index = find_first_zero_bit(inuse, BR_MAX_PORTS);
340 kfree(inuse);
341
342 return (index >= BR_MAX_PORTS) ? -EXFULL : index;
343 }
344
345 /* called with RTNL but without bridge lock */
346 static struct net_bridge_port *new_nbp(struct net_bridge *br,
347 struct net_device *dev)
348 {
349 struct net_bridge_port *p;
350 int index, err;
351
352 index = find_portno(br);
353 if (index < 0)
354 return ERR_PTR(index);
355
356 p = kzalloc(sizeof(*p), GFP_KERNEL);
357 if (p == NULL)
358 return ERR_PTR(-ENOMEM);
359
360 p->br = br;
361 dev_hold(dev);
362 p->dev = dev;
363 p->path_cost = port_cost(dev);
364 p->priority = 0x8000 >> BR_PORT_BITS;
365 p->port_no = index;
366 p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
367 br_init_port(p);
368 br_set_state(p, BR_STATE_DISABLED);
369 br_stp_port_timer_init(p);
370 err = br_multicast_add_port(p);
371 if (err) {
372 dev_put(dev);
373 kfree(p);
374 p = ERR_PTR(err);
375 }
376
377 return p;
378 }
379
380 int br_add_bridge(struct net *net, const char *name)
381 {
382 struct net_device *dev;
383 int res;
384
385 dev = alloc_netdev(sizeof(struct net_bridge), name, NET_NAME_UNKNOWN,
386 br_dev_setup);
387
388 if (!dev)
389 return -ENOMEM;
390
391 dev_net_set(dev, net);
392 dev->rtnl_link_ops = &br_link_ops;
393
394 res = register_netdev(dev);
395 if (res)
396 free_netdev(dev);
397 return res;
398 }
399
400 int br_del_bridge(struct net *net, const char *name)
401 {
402 struct net_device *dev;
403 int ret = 0;
404
405 rtnl_lock();
406 dev = __dev_get_by_name(net, name);
407 if (dev == NULL)
408 ret = -ENXIO; /* Could not find device */
409
410 else if (!(dev->priv_flags & IFF_EBRIDGE)) {
411 /* Attempt to delete non bridge device! */
412 ret = -EPERM;
413 }
414
415 else if (dev->flags & IFF_UP) {
416 /* Not shutdown yet. */
417 ret = -EBUSY;
418 }
419
420 else
421 br_dev_delete(dev, NULL);
422
423 rtnl_unlock();
424 return ret;
425 }
426
427 /* MTU of the bridge pseudo-device: ETH_DATA_LEN or the minimum of the ports */
428 int br_min_mtu(const struct net_bridge *br)
429 {
430 const struct net_bridge_port *p;
431 int mtu = 0;
432
433 ASSERT_RTNL();
434
435 if (list_empty(&br->port_list))
436 mtu = ETH_DATA_LEN;
437 else {
438 list_for_each_entry(p, &br->port_list, list) {
439 if (!mtu || p->dev->mtu < mtu)
440 mtu = p->dev->mtu;
441 }
442 }
443 return mtu;
444 }
445
446 static void br_set_gso_limits(struct net_bridge *br)
447 {
448 unsigned int gso_max_size = GSO_MAX_SIZE;
449 u16 gso_max_segs = GSO_MAX_SEGS;
450 const struct net_bridge_port *p;
451
452 list_for_each_entry(p, &br->port_list, list) {
453 gso_max_size = min(gso_max_size, p->dev->gso_max_size);
454 gso_max_segs = min(gso_max_segs, p->dev->gso_max_segs);
455 }
456 br->dev->gso_max_size = gso_max_size;
457 br->dev->gso_max_segs = gso_max_segs;
458 }
459
460 /*
461 * Recomputes features using slave's features
462 */
463 netdev_features_t br_features_recompute(struct net_bridge *br,
464 netdev_features_t features)
465 {
466 struct net_bridge_port *p;
467 netdev_features_t mask;
468
469 if (list_empty(&br->port_list))
470 return features;
471
472 mask = features;
473 features &= ~NETIF_F_ONE_FOR_ALL;
474
475 list_for_each_entry(p, &br->port_list, list) {
476 features = netdev_increment_features(features,
477 p->dev->features, mask);
478 }
479 features = netdev_add_tso_features(features, mask);
480
481 return features;
482 }
483
484 /* called with RTNL */
485 int br_add_if(struct net_bridge *br, struct net_device *dev,
486 struct netlink_ext_ack *extack)
487 {
488 struct net_bridge_port *p;
489 int err = 0;
490 unsigned br_hr, dev_hr;
491 bool changed_addr;
492
493 /* Don't allow bridging non-ethernet like devices, or DSA-enabled
494 * master network devices since the bridge layer rx_handler prevents
495 * the DSA fake ethertype handler to be invoked, so we do not strip off
496 * the DSA switch tag protocol header and the bridge layer just return
497 * RX_HANDLER_CONSUMED, stopping RX processing for these frames.
498 */
499 if ((dev->flags & IFF_LOOPBACK) ||
500 dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN ||
501 !is_valid_ether_addr(dev->dev_addr) ||
502 netdev_uses_dsa(dev))
503 return -EINVAL;
504
505 /* No bridging of bridges */
506 if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit) {
507 NL_SET_ERR_MSG(extack,
508 "Can not enslave a bridge to a bridge");
509 return -ELOOP;
510 }
511
512 /* Device is already being bridged */
513 if (br_port_exists(dev))
514 return -EBUSY;
515
516 /* No bridging devices that dislike that (e.g. wireless) */
517 if (dev->priv_flags & IFF_DONT_BRIDGE) {
518 NL_SET_ERR_MSG(extack,
519 "Device does not allow enslaving to a bridge");
520 return -EOPNOTSUPP;
521 }
522
523 p = new_nbp(br, dev);
524 if (IS_ERR(p))
525 return PTR_ERR(p);
526
527 call_netdevice_notifiers(NETDEV_JOIN, dev);
528
529 err = dev_set_allmulti(dev, 1);
530 if (err)
531 goto put_back;
532
533 err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
534 SYSFS_BRIDGE_PORT_ATTR);
535 if (err)
536 goto err1;
537
538 err = br_sysfs_addif(p);
539 if (err)
540 goto err2;
541
542 err = br_netpoll_enable(p);
543 if (err)
544 goto err3;
545
546 err = netdev_rx_handler_register(dev, br_handle_frame, p);
547 if (err)
548 goto err4;
549
550 dev->priv_flags |= IFF_BRIDGE_PORT;
551
552 err = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL, extack);
553 if (err)
554 goto err5;
555
556 err = nbp_switchdev_mark_set(p);
557 if (err)
558 goto err6;
559
560 dev_disable_lro(dev);
561
562 list_add_rcu(&p->list, &br->port_list);
563
564 nbp_update_port_count(br);
565
566 netdev_update_features(br->dev);
567
568 br_hr = br->dev->needed_headroom;
569 dev_hr = netdev_get_fwd_headroom(dev);
570 if (br_hr < dev_hr)
571 update_headroom(br, dev_hr);
572 else
573 netdev_set_rx_headroom(dev, br_hr);
574
575 if (br_fdb_insert(br, p, dev->dev_addr, 0))
576 netdev_err(dev, "failed insert local address bridge forwarding table\n");
577
578 err = nbp_vlan_init(p);
579 if (err) {
580 netdev_err(dev, "failed to initialize vlan filtering on this port\n");
581 goto err7;
582 }
583
584 spin_lock_bh(&br->lock);
585 changed_addr = br_stp_recalculate_bridge_id(br);
586
587 if (netif_running(dev) && netif_oper_up(dev) &&
588 (br->dev->flags & IFF_UP))
589 br_stp_enable_port(p);
590 spin_unlock_bh(&br->lock);
591
592 br_ifinfo_notify(RTM_NEWLINK, NULL, p);
593
594 if (changed_addr)
595 call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
596
597 dev_set_mtu(br->dev, br_min_mtu(br));
598 br_set_gso_limits(br);
599
600 kobject_uevent(&p->kobj, KOBJ_ADD);
601
602 return 0;
603
604 err7:
605 list_del_rcu(&p->list);
606 br_fdb_delete_by_port(br, p, 0, 1);
607 nbp_update_port_count(br);
608 err6:
609 netdev_upper_dev_unlink(dev, br->dev);
610 err5:
611 dev->priv_flags &= ~IFF_BRIDGE_PORT;
612 netdev_rx_handler_unregister(dev);
613 err4:
614 br_netpoll_disable(p);
615 err3:
616 sysfs_remove_link(br->ifobj, p->dev->name);
617 err2:
618 kobject_put(&p->kobj);
619 p = NULL; /* kobject_put frees */
620 err1:
621 dev_set_allmulti(dev, -1);
622 put_back:
623 dev_put(dev);
624 kfree(p);
625 return err;
626 }
627
628 /* called with RTNL */
629 int br_del_if(struct net_bridge *br, struct net_device *dev)
630 {
631 struct net_bridge_port *p;
632 bool changed_addr;
633
634 p = br_port_get_rtnl(dev);
635 if (!p || p->br != br)
636 return -EINVAL;
637
638 /* Since more than one interface can be attached to a bridge,
639 * there still maybe an alternate path for netconsole to use;
640 * therefore there is no reason for a NETDEV_RELEASE event.
641 */
642 del_nbp(p);
643
644 dev_set_mtu(br->dev, br_min_mtu(br));
645 br_set_gso_limits(br);
646
647 spin_lock_bh(&br->lock);
648 changed_addr = br_stp_recalculate_bridge_id(br);
649 spin_unlock_bh(&br->lock);
650
651 if (changed_addr)
652 call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
653
654 netdev_update_features(br->dev);
655
656 return 0;
657 }
658
659 void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
660 {
661 struct net_bridge *br = p->br;
662
663 if (mask & BR_AUTO_MASK)
664 nbp_update_port_count(br);
665
666 if (mask & BR_NEIGH_SUPPRESS)
667 br_recalculate_neigh_suppress_enabled(br);
668 }