]> git.proxmox.com Git - mirror_ovs.git/blob - vswitchd/bridge.c
vswitchd: Fix log messages when bond slaves are enabled or disabled.
[mirror_ovs.git] / vswitchd / bridge.c
1 /* Copyright (c) 2008, 2009 Nicira Networks
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <config.h>
17 #include "bridge.h"
18 #include <assert.h>
19 #include <errno.h>
20 #include <arpa/inet.h>
21 #include <ctype.h>
22 #include <inttypes.h>
23 #include <net/if.h>
24 #include <openflow/openflow.h>
25 #include <signal.h>
26 #include <stdlib.h>
27 #include <strings.h>
28 #include <sys/stat.h>
29 #include <sys/socket.h>
30 #include <sys/types.h>
31 #include <unistd.h>
32 #include "bitmap.h"
33 #include "cfg.h"
34 #include "coverage.h"
35 #include "dirs.h"
36 #include "dpif.h"
37 #include "dynamic-string.h"
38 #include "flow.h"
39 #include "hash.h"
40 #include "list.h"
41 #include "mac-learning.h"
42 #include "netdev.h"
43 #include "odp-util.h"
44 #include "ofp-print.h"
45 #include "ofpbuf.h"
46 #include "packets.h"
47 #include "poll-loop.h"
48 #include "port-array.h"
49 #include "proc-net-compat.h"
50 #include "process.h"
51 #include "secchan/ofproto.h"
52 #include "socket-util.h"
53 #include "stp.h"
54 #include "svec.h"
55 #include "timeval.h"
56 #include "util.h"
57 #include "unixctl.h"
58 #include "vconn.h"
59 #include "vconn-ssl.h"
60 #include "xenserver.h"
61 #include "xtoxll.h"
62
63 #define THIS_MODULE VLM_bridge
64 #include "vlog.h"
65
66 struct dst {
67 uint16_t vlan;
68 uint16_t dp_ifidx;
69 };
70
71 extern uint64_t mgmt_id;
72
73 struct iface {
74 struct port *port; /* Containing port. */
75 size_t port_ifidx; /* Index within containing port. */
76
77 char *name; /* Host network device name. */
78 int dp_ifidx; /* Index within kernel datapath. */
79
80 uint8_t mac[ETH_ADDR_LEN]; /* Ethernet address (all zeros if unknowns). */
81
82 tag_type tag; /* Tag associated with this interface. */
83 bool enabled; /* May be chosen for flows? */
84 long long delay_expires; /* Time after which 'enabled' may change. */
85 };
86
87 #define BOND_MASK 0xff
88 struct bond_entry {
89 int iface_idx; /* Index of assigned iface, or -1 if none. */
90 uint64_t tx_bytes; /* Count of bytes recently transmitted. */
91 tag_type iface_tag; /* Tag associated with iface_idx. */
92 };
93
94 #define MAX_MIRRORS 32
95 typedef uint32_t mirror_mask_t;
96 #define MIRROR_MASK_C(X) UINT32_C(X)
97 BUILD_ASSERT_DECL(sizeof(mirror_mask_t) * CHAR_BIT >= MAX_MIRRORS);
98 struct mirror {
99 struct bridge *bridge;
100 size_t idx;
101 char *name;
102
103 /* Selection criteria. */
104 struct svec src_ports;
105 struct svec dst_ports;
106 int *vlans;
107 size_t n_vlans;
108
109 /* Output. */
110 struct port *out_port;
111 int out_vlan;
112 };
113
114 #define FLOOD_PORT ((struct port *) 1) /* The 'flood' output port. */
115 struct port {
116 struct bridge *bridge;
117 size_t port_idx;
118 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
119 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1. */
120 char *name;
121
122 /* An ordinary bridge port has 1 interface.
123 * A bridge port for bonding has at least 2 interfaces. */
124 struct iface **ifaces;
125 size_t n_ifaces, allocated_ifaces;
126
127 /* Bonding info. */
128 struct bond_entry *bond_hash; /* An array of (BOND_MASK + 1) elements. */
129 int active_iface; /* Ifidx on which bcasts accepted, or -1. */
130 tag_type active_iface_tag; /* Tag for bcast flows. */
131 tag_type no_ifaces_tag; /* Tag for flows when all ifaces disabled. */
132 int updelay, downdelay; /* Delay before iface goes up/down, in ms. */
133
134 /* Port mirroring info. */
135 mirror_mask_t src_mirrors; /* Mirrors triggered when packet received. */
136 mirror_mask_t dst_mirrors; /* Mirrors triggered when packet sent. */
137 bool is_mirror_output_port; /* Does port mirroring send frames here? */
138
139 /* Spanning tree info. */
140 enum stp_state stp_state; /* Always STP_FORWARDING if STP not in use. */
141 tag_type stp_state_tag; /* Tag for STP state change. */
142 };
143
144 #define DP_MAX_PORTS 255
145 struct bridge {
146 struct list node; /* Node in global list of bridges. */
147 char *name; /* User-specified arbitrary name. */
148 struct mac_learning *ml; /* MAC learning table, or null not to learn. */
149 bool sent_config_request; /* Successfully sent config request? */
150 uint8_t default_ea[ETH_ADDR_LEN]; /* Default MAC. */
151
152 /* Support for remote controllers. */
153 char *controller; /* NULL if there is no remote controller;
154 * "discover" to do controller discovery;
155 * otherwise a vconn name. */
156
157 /* OpenFlow switch processing. */
158 struct ofproto *ofproto; /* OpenFlow switch. */
159
160 /* Kernel datapath information. */
161 struct dpif dpif; /* Kernel datapath. */
162 struct port_array ifaces; /* Indexed by kernel datapath port number. */
163
164 /* Bridge ports. */
165 struct port **ports;
166 size_t n_ports, allocated_ports;
167
168 /* Bonding. */
169 bool has_bonded_ports;
170 long long int bond_next_rebalance;
171
172 /* Flow tracking. */
173 bool flush;
174
175 /* Flow statistics gathering. */
176 time_t next_stats_request;
177
178 /* Port mirroring. */
179 struct mirror *mirrors[MAX_MIRRORS];
180
181 /* Spanning tree. */
182 struct stp *stp;
183 long long int stp_last_tick;
184 };
185
186 /* List of all bridges. */
187 static struct list all_bridges = LIST_INITIALIZER(&all_bridges);
188
189 /* Maximum number of datapaths. */
190 enum { DP_MAX = 256 };
191
192 static struct bridge *bridge_create(const char *name);
193 static void bridge_destroy(struct bridge *);
194 static struct bridge *bridge_lookup(const char *name);
195 static int bridge_run_one(struct bridge *);
196 static void bridge_reconfigure_one(struct bridge *);
197 static void bridge_reconfigure_controller(struct bridge *);
198 static void bridge_get_all_ifaces(const struct bridge *, struct svec *ifaces);
199 static void bridge_fetch_dp_ifaces(struct bridge *);
200 static void bridge_flush(struct bridge *);
201 static void bridge_pick_local_hw_addr(struct bridge *,
202 uint8_t ea[ETH_ADDR_LEN],
203 const char **devname);
204 static uint64_t bridge_pick_datapath_id(struct bridge *,
205 const uint8_t bridge_ea[ETH_ADDR_LEN],
206 const char *devname);
207 static uint64_t dpid_from_hash(const void *, size_t nbytes);
208
209 static void bond_init(void);
210 static void bond_run(struct bridge *);
211 static void bond_wait(struct bridge *);
212 static void bond_rebalance_port(struct port *);
213 static void bond_send_learning_packets(struct port *);
214
215 static void port_create(struct bridge *, const char *name);
216 static void port_reconfigure(struct port *);
217 static void port_destroy(struct port *);
218 static struct port *port_lookup(const struct bridge *, const char *name);
219 static struct iface *port_lookup_iface(const struct port *, const char *name);
220 static struct port *port_from_dp_ifidx(const struct bridge *,
221 uint16_t dp_ifidx);
222 static void port_update_bond_compat(struct port *);
223 static void port_update_vlan_compat(struct port *);
224
225 static void mirror_create(struct bridge *, const char *name);
226 static void mirror_destroy(struct mirror *);
227 static void mirror_reconfigure(struct bridge *);
228 static void mirror_reconfigure_one(struct mirror *);
229 static bool vlan_is_mirrored(const struct mirror *, int vlan);
230
231 static void brstp_reconfigure(struct bridge *);
232 static void brstp_adjust_timers(struct bridge *);
233 static void brstp_run(struct bridge *);
234 static void brstp_wait(struct bridge *);
235
236 static void iface_create(struct port *, const char *name);
237 static void iface_destroy(struct iface *);
238 static struct iface *iface_lookup(const struct bridge *, const char *name);
239 static struct iface *iface_from_dp_ifidx(const struct bridge *,
240 uint16_t dp_ifidx);
241
242 /* Hooks into ofproto processing. */
243 static struct ofhooks bridge_ofhooks;
244 \f
245 /* Public functions. */
246
247 /* Adds the name of each interface used by a bridge, including local and
248 * internal ports, to 'svec'. */
249 void
250 bridge_get_ifaces(struct svec *svec)
251 {
252 struct bridge *br, *next;
253 size_t i, j;
254
255 LIST_FOR_EACH_SAFE (br, next, struct bridge, node, &all_bridges) {
256 for (i = 0; i < br->n_ports; i++) {
257 struct port *port = br->ports[i];
258
259 for (j = 0; j < port->n_ifaces; j++) {
260 struct iface *iface = port->ifaces[j];
261 if (iface->dp_ifidx < 0) {
262 VLOG_ERR("%s interface not in dp%u, ignoring",
263 iface->name, dpif_id(&br->dpif));
264 } else {
265 if (iface->dp_ifidx != ODPP_LOCAL) {
266 svec_add(svec, iface->name);
267 }
268 }
269 }
270 }
271 }
272 }
273
274 /* The caller must already have called cfg_read(). */
275 void
276 bridge_init(void)
277 {
278 int retval;
279 int i;
280
281 bond_init();
282
283 for (i = 0; i < DP_MAX; i++) {
284 struct dpif dpif;
285 char devname[16];
286
287 sprintf(devname, "dp%d", i);
288 retval = dpif_open(devname, &dpif);
289 if (!retval) {
290 char dpif_name[IF_NAMESIZE];
291 if (dpif_get_name(&dpif, dpif_name, sizeof dpif_name)
292 || !cfg_has("bridge.%s.port", dpif_name)) {
293 dpif_delete(&dpif);
294 }
295 dpif_close(&dpif);
296 } else if (retval != ENODEV) {
297 VLOG_ERR("failed to delete datapath dp%d: %s",
298 i, strerror(retval));
299 }
300 }
301
302 bridge_reconfigure();
303 }
304
305 #ifdef HAVE_OPENSSL
306 static bool
307 config_string_change(const char *key, char **valuep)
308 {
309 const char *value = cfg_get_string(0, "%s", key);
310 if (value && (!*valuep || strcmp(value, *valuep))) {
311 free(*valuep);
312 *valuep = xstrdup(value);
313 return true;
314 } else {
315 return false;
316 }
317 }
318
319 static void
320 bridge_configure_ssl(void)
321 {
322 /* XXX SSL should be configurable on a per-bridge basis.
323 * XXX should be possible to de-configure SSL. */
324 static char *private_key_file;
325 static char *certificate_file;
326 static char *cacert_file;
327 struct stat s;
328
329 if (config_string_change("ssl.private-key", &private_key_file)) {
330 vconn_ssl_set_private_key_file(private_key_file);
331 }
332
333 if (config_string_change("ssl.certificate", &certificate_file)) {
334 vconn_ssl_set_certificate_file(certificate_file);
335 }
336
337 /* We assume that even if the filename hasn't changed, if the CA cert
338 * file has been removed, that we want to move back into
339 * boot-strapping mode. This opens a small security hole, because
340 * the old certificate will still be trusted until vSwitch is
341 * restarted. We may want to address this in vconn's SSL library. */
342 if (config_string_change("ssl.ca-cert", &cacert_file)
343 || (stat(cacert_file, &s) && errno == ENOENT)) {
344 vconn_ssl_set_ca_cert_file(cacert_file,
345 cfg_get_bool(0, "ssl.bootstrap-ca-cert"));
346 }
347 }
348 #endif
349
350 void
351 bridge_reconfigure(void)
352 {
353 struct svec old_br, new_br, raw_new_br;
354 struct bridge *br, *next;
355 size_t i, j;
356
357 COVERAGE_INC(bridge_reconfigure);
358
359 /* Collect old bridges. */
360 svec_init(&old_br);
361 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
362 svec_add(&old_br, br->name);
363 }
364
365 /* Collect new bridges. */
366 svec_init(&raw_new_br);
367 cfg_get_subsections(&raw_new_br, "bridge");
368 svec_init(&new_br);
369 for (i = 0; i < raw_new_br.n; i++) {
370 const char *name = raw_new_br.names[i];
371 if ((!strncmp(name, "dp", 2) && isdigit(name[2])) ||
372 (!strncmp(name, "nl:", 3) && isdigit(name[3]))) {
373 VLOG_ERR("%s is not a valid bridge name (bridges may not be "
374 "named \"dp\" or \"nl:\" followed by a digit)", name);
375 } else {
376 svec_add(&new_br, name);
377 }
378 }
379 svec_destroy(&raw_new_br);
380
381 /* Get rid of deleted bridges and add new bridges. */
382 svec_sort(&old_br);
383 svec_sort(&new_br);
384 assert(svec_is_unique(&old_br));
385 assert(svec_is_unique(&new_br));
386 LIST_FOR_EACH_SAFE (br, next, struct bridge, node, &all_bridges) {
387 if (!svec_contains(&new_br, br->name)) {
388 bridge_destroy(br);
389 }
390 }
391 for (i = 0; i < new_br.n; i++) {
392 const char *name = new_br.names[i];
393 if (!svec_contains(&old_br, name)) {
394 bridge_create(name);
395 }
396 }
397 svec_destroy(&old_br);
398 svec_destroy(&new_br);
399
400 #ifdef HAVE_OPENSSL
401 /* Configure SSL. */
402 bridge_configure_ssl();
403 #endif
404
405 /* Reconfigure all bridges. */
406 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
407 bridge_reconfigure_one(br);
408 }
409
410 /* Add and delete ports on all datapaths.
411 *
412 * The kernel will reject any attempt to add a given port to a datapath if
413 * that port already belongs to a different datapath, so we must do all
414 * port deletions before any port additions. */
415 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
416 struct odp_port *dpif_ports;
417 size_t n_dpif_ports;
418 struct svec want_ifaces;
419
420 dpif_port_list(&br->dpif, &dpif_ports, &n_dpif_ports);
421 bridge_get_all_ifaces(br, &want_ifaces);
422 for (i = 0; i < n_dpif_ports; i++) {
423 const struct odp_port *p = &dpif_ports[i];
424 if (!svec_contains(&want_ifaces, p->devname)
425 && strcmp(p->devname, br->name)) {
426 int retval = dpif_port_del(&br->dpif, p->port);
427 if (retval) {
428 VLOG_ERR("failed to remove %s interface from dp%u: %s",
429 p->devname, dpif_id(&br->dpif), strerror(retval));
430 }
431 }
432 }
433 svec_destroy(&want_ifaces);
434 free(dpif_ports);
435 }
436 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
437 struct odp_port *dpif_ports;
438 size_t n_dpif_ports;
439 struct svec cur_ifaces, want_ifaces, add_ifaces;
440 int next_port_no;
441
442 dpif_port_list(&br->dpif, &dpif_ports, &n_dpif_ports);
443 svec_init(&cur_ifaces);
444 for (i = 0; i < n_dpif_ports; i++) {
445 svec_add(&cur_ifaces, dpif_ports[i].devname);
446 }
447 free(dpif_ports);
448 svec_sort_unique(&cur_ifaces);
449 bridge_get_all_ifaces(br, &want_ifaces);
450 svec_diff(&want_ifaces, &cur_ifaces, &add_ifaces, NULL, NULL);
451
452 next_port_no = 1;
453 for (i = 0; i < add_ifaces.n; i++) {
454 const char *if_name = add_ifaces.names[i];
455 for (;;) {
456 int internal = cfg_get_bool(0, "iface.%s.internal", if_name);
457 int error = dpif_port_add(&br->dpif, if_name, next_port_no++,
458 internal ? ODP_PORT_INTERNAL : 0);
459 if (error != EEXIST) {
460 if (next_port_no >= 256) {
461 VLOG_ERR("ran out of valid port numbers on dp%u",
462 dpif_id(&br->dpif));
463 goto out;
464 }
465 if (error) {
466 VLOG_ERR("failed to add %s interface to dp%u: %s",
467 if_name, dpif_id(&br->dpif), strerror(error));
468 }
469 break;
470 }
471 }
472 }
473 out:
474 svec_destroy(&cur_ifaces);
475 svec_destroy(&want_ifaces);
476 svec_destroy(&add_ifaces);
477 }
478 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
479 uint8_t ea[8];
480 uint64_t dpid;
481 struct iface *local_iface = NULL;
482 const char *devname;
483 uint8_t engine_type = br->dpif.minor;
484 uint8_t engine_id = br->dpif.minor;
485 bool add_id_to_iface = false;
486 struct svec nf_hosts;
487
488 bridge_fetch_dp_ifaces(br);
489 for (i = 0; i < br->n_ports; ) {
490 struct port *port = br->ports[i];
491
492 for (j = 0; j < port->n_ifaces; ) {
493 struct iface *iface = port->ifaces[j];
494 if (iface->dp_ifidx < 0) {
495 VLOG_ERR("%s interface not in dp%u, dropping",
496 iface->name, dpif_id(&br->dpif));
497 iface_destroy(iface);
498 } else {
499 if (iface->dp_ifidx == ODPP_LOCAL) {
500 local_iface = iface;
501 }
502 VLOG_DBG("dp%u has interface %s on port %d",
503 dpif_id(&br->dpif), iface->name, iface->dp_ifidx);
504 j++;
505 }
506 }
507 if (!port->n_ifaces) {
508 VLOG_ERR("%s port has no interfaces, dropping", port->name);
509 port_destroy(port);
510 continue;
511 }
512 i++;
513 }
514
515 /* Pick local port hardware address, datapath ID. */
516 bridge_pick_local_hw_addr(br, ea, &devname);
517 if (local_iface) {
518 int error = netdev_nodev_set_etheraddr(local_iface->name, ea);
519 if (error) {
520 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
521 VLOG_ERR_RL(&rl, "bridge %s: failed to set bridge "
522 "Ethernet address: %s",
523 br->name, strerror(error));
524 }
525 }
526
527 dpid = bridge_pick_datapath_id(br, ea, devname);
528 ofproto_set_datapath_id(br->ofproto, dpid);
529
530 /* Set NetFlow configuration on this bridge. */
531 if (cfg_has("netflow.%s.engine-type", br->name)) {
532 engine_type = cfg_get_int(0, "netflow.%s.engine-type",
533 br->name);
534 }
535 if (cfg_has("netflow.%s.engine-id", br->name)) {
536 engine_id = cfg_get_int(0, "netflow.%s.engine-id", br->name);
537 }
538 if (cfg_has("netflow.%s.add-id-to-iface", br->name)) {
539 add_id_to_iface = cfg_get_bool(0, "netflow.%s.add-id-to-iface",
540 br->name);
541 }
542 if (add_id_to_iface && engine_id > 0x7f) {
543 VLOG_WARN("bridge %s: netflow port mangling may conflict with "
544 "another vswitch, choose an engine id less than 128",
545 br->name);
546 }
547 if (add_id_to_iface && br->n_ports > 0x1ff) {
548 VLOG_WARN("bridge %s: netflow port mangling will conflict with "
549 "another port when 512 or more ports are used",
550 br->name);
551 }
552 svec_init(&nf_hosts);
553 cfg_get_all_keys(&nf_hosts, "netflow.%s.host", br->name);
554 if (ofproto_set_netflow(br->ofproto, &nf_hosts, engine_type,
555 engine_id, add_id_to_iface)) {
556 VLOG_ERR("bridge %s: problem setting netflow collectors",
557 br->name);
558 }
559
560 /* Update the controller and related settings. It would be more
561 * straightforward to call this from bridge_reconfigure_one(), but we
562 * can't do it there for two reasons. First, and most importantly, at
563 * that point we don't know the dp_ifidx of any interfaces that have
564 * been added to the bridge (because we haven't actually added them to
565 * the datapath). Second, at that point we haven't set the datapath ID
566 * yet; when a controller is configured, resetting the datapath ID will
567 * immediately disconnect from the controller, so it's better to set
568 * the datapath ID before the controller. */
569 bridge_reconfigure_controller(br);
570 }
571 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
572 for (i = 0; i < br->n_ports; i++) {
573 struct port *port = br->ports[i];
574 port_update_vlan_compat(port);
575 }
576 }
577 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
578 brstp_reconfigure(br);
579 }
580 }
581
582 static void
583 bridge_pick_local_hw_addr(struct bridge *br, uint8_t ea[ETH_ADDR_LEN],
584 const char **devname)
585 {
586 uint64_t requested_ea;
587 size_t i, j;
588 int error;
589
590 *devname = NULL;
591
592 /* Did the user request a particular MAC? */
593 requested_ea = cfg_get_mac(0, "bridge.%s.mac", br->name);
594 if (requested_ea) {
595 eth_addr_from_uint64(requested_ea, ea);
596 if (eth_addr_is_multicast(ea)) {
597 VLOG_ERR("bridge %s: cannot set MAC address to multicast "
598 "address "ETH_ADDR_FMT, br->name, ETH_ADDR_ARGS(ea));
599 } else if (eth_addr_is_zero(ea)) {
600 VLOG_ERR("bridge %s: cannot set MAC address to zero", br->name);
601 } else {
602 return;
603 }
604 }
605
606 /* Otherwise choose the minimum MAC address among all of the interfaces.
607 * (Xen uses FE:FF:FF:FF:FF:FF for virtual interfaces so this will get the
608 * MAC of the physical interface in such an environment.) */
609 memset(ea, 0xff, sizeof ea);
610 for (i = 0; i < br->n_ports; i++) {
611 struct port *port = br->ports[i];
612 if (port->is_mirror_output_port) {
613 continue;
614 }
615 for (j = 0; j < port->n_ifaces; j++) {
616 struct iface *iface = port->ifaces[j];
617 uint8_t iface_ea[ETH_ADDR_LEN];
618 if (iface->dp_ifidx == ODPP_LOCAL
619 || cfg_get_bool(0, "iface.%s.internal", iface->name)) {
620 continue;
621 }
622 error = netdev_nodev_get_etheraddr(iface->name, iface_ea);
623 if (!error) {
624 if (!eth_addr_is_multicast(iface_ea) &&
625 !eth_addr_is_reserved(iface_ea) &&
626 !eth_addr_is_zero(iface_ea) &&
627 memcmp(iface_ea, ea, ETH_ADDR_LEN) < 0) {
628 memcpy(ea, iface_ea, ETH_ADDR_LEN);
629 *devname = iface->name;
630 }
631 } else {
632 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
633 VLOG_ERR_RL(&rl, "failed to obtain Ethernet address of %s: %s",
634 iface->name, strerror(error));
635 }
636 }
637 }
638 if (eth_addr_is_multicast(ea) || eth_addr_is_vif(ea)) {
639 memcpy(ea, br->default_ea, ETH_ADDR_LEN);
640 *devname = NULL;
641 VLOG_WARN("bridge %s: using default bridge Ethernet "
642 "address "ETH_ADDR_FMT, br->name, ETH_ADDR_ARGS(ea));
643 } else {
644 VLOG_DBG("bridge %s: using bridge Ethernet address "ETH_ADDR_FMT,
645 br->name, ETH_ADDR_ARGS(ea));
646 }
647 }
648
649 /* Choose and returns the datapath ID for bridge 'br' given that the bridge
650 * Ethernet address is 'bridge_ea'. If 'bridge_ea' is the Ethernet address of
651 * a network device, then that network device's name must be passed in as
652 * 'devname'; if 'bridge_ea' was derived some other way, then 'devname' must be
653 * passed in as a null pointer. */
654 static uint64_t
655 bridge_pick_datapath_id(struct bridge *br,
656 const uint8_t bridge_ea[ETH_ADDR_LEN],
657 const char *devname)
658 {
659 /*
660 * The procedure for choosing a bridge MAC address will, in the most
661 * ordinary case, also choose a unique MAC that we can use as a datapath
662 * ID. In some special cases, though, multiple bridges will end up with
663 * the same MAC address. This is OK for the bridges, but it will confuse
664 * the OpenFlow controller, because each datapath needs a unique datapath
665 * ID.
666 *
667 * Datapath IDs must be unique. It is also very desirable that they be
668 * stable from one run to the next, so that policy set on a datapath
669 * "sticks".
670 */
671 uint64_t dpid;
672
673 dpid = cfg_get_dpid(0, "bridge.%s.datapath-id", br->name);
674 if (dpid) {
675 return dpid;
676 }
677
678 if (devname) {
679 int vlan;
680 if (!netdev_get_vlan_vid(devname, &vlan)) {
681 /*
682 * A bridge whose MAC address is taken from a VLAN network device
683 * (that is, a network device created with vconfig(8) or similar
684 * tool) will have the same MAC address as a bridge on the VLAN
685 * device's physical network device.
686 *
687 * Handle this case by hashing the physical network device MAC
688 * along with the VLAN identifier.
689 */
690 uint8_t buf[ETH_ADDR_LEN + 2];
691 memcpy(buf, bridge_ea, ETH_ADDR_LEN);
692 buf[ETH_ADDR_LEN] = vlan >> 8;
693 buf[ETH_ADDR_LEN + 1] = vlan;
694 return dpid_from_hash(buf, sizeof buf);
695 } else {
696 /*
697 * Assume that this bridge's MAC address is unique, since it
698 * doesn't fit any of the cases we handle specially.
699 */
700 }
701 } else {
702 /*
703 * A purely internal bridge, that is, one that has no non-virtual
704 * network devices on it at all, is more difficult because it has no
705 * natural unique identifier at all.
706 *
707 * When the host is a XenServer, we handle this case by hashing the
708 * host's UUID with the name of the bridge. Names of bridges are
709 * persistent across XenServer reboots, although they can be reused if
710 * an internal network is destroyed and then a new one is later
711 * created, so this is fairly effective.
712 *
713 * When the host is not a XenServer, we punt by using a random MAC
714 * address on each run.
715 */
716 const char *host_uuid = xenserver_get_host_uuid();
717 if (host_uuid) {
718 char *combined = xasprintf("%s,%s", host_uuid, br->name);
719 dpid = dpid_from_hash(combined, strlen(combined));
720 free(combined);
721 return dpid;
722 }
723 }
724
725 return eth_addr_to_uint64(bridge_ea);
726 }
727
728 static uint64_t
729 dpid_from_hash(const void *data, size_t n)
730 {
731 uint8_t hash[SHA1_DIGEST_SIZE];
732
733 BUILD_ASSERT_DECL(sizeof hash >= ETH_ADDR_LEN);
734 sha1_bytes(data, n, hash);
735 eth_addr_mark_random(hash);
736 return eth_addr_to_uint64(hash);
737 }
738
739 int
740 bridge_run(void)
741 {
742 struct bridge *br, *next;
743 int retval;
744
745 retval = 0;
746 LIST_FOR_EACH_SAFE (br, next, struct bridge, node, &all_bridges) {
747 int error = bridge_run_one(br);
748 if (error) {
749 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
750 VLOG_ERR_RL(&rl, "bridge %s: datapath was destroyed externally, "
751 "forcing reconfiguration", br->name);
752 if (!retval) {
753 retval = error;
754 }
755 }
756 }
757 return retval;
758 }
759
760 void
761 bridge_wait(void)
762 {
763 struct bridge *br;
764
765 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
766 ofproto_wait(br->ofproto);
767 if (br->controller) {
768 continue;
769 }
770
771 if (br->ml) {
772 mac_learning_wait(br->ml);
773 }
774 bond_wait(br);
775 brstp_wait(br);
776 }
777 }
778
779 /* Forces 'br' to revalidate all of its flows. This is appropriate when 'br''s
780 * configuration changes. */
781 static void
782 bridge_flush(struct bridge *br)
783 {
784 COVERAGE_INC(bridge_flush);
785 br->flush = true;
786 if (br->ml) {
787 mac_learning_flush(br->ml);
788 }
789 }
790 \f
791 /* Bridge reconfiguration functions. */
792
793 static struct bridge *
794 bridge_create(const char *name)
795 {
796 struct bridge *br;
797 int error;
798
799 assert(!bridge_lookup(name));
800 br = xcalloc(1, sizeof *br);
801
802 error = dpif_create(name, &br->dpif);
803 if (error == EEXIST) {
804 error = dpif_open(name, &br->dpif);
805 if (error) {
806 VLOG_ERR("datapath %s already exists but cannot be opened: %s",
807 name, strerror(error));
808 free(br);
809 return NULL;
810 }
811 dpif_flow_flush(&br->dpif);
812 } else if (error) {
813 VLOG_ERR("failed to create datapath %s: %s", name, strerror(error));
814 free(br);
815 return NULL;
816 }
817
818 error = ofproto_create(name, &bridge_ofhooks, br, &br->ofproto);
819 if (error) {
820 VLOG_ERR("failed to create switch %s: %s", name, strerror(error));
821 dpif_delete(&br->dpif);
822 dpif_close(&br->dpif);
823 free(br);
824 return NULL;
825 }
826
827 br->name = xstrdup(name);
828 br->ml = mac_learning_create();
829 br->sent_config_request = false;
830 eth_addr_random(br->default_ea);
831
832 port_array_init(&br->ifaces);
833
834 br->flush = false;
835 br->bond_next_rebalance = time_msec() + 10000;
836
837 list_push_back(&all_bridges, &br->node);
838
839 VLOG_INFO("created bridge %s on dp%u", br->name, dpif_id(&br->dpif));
840
841 return br;
842 }
843
844 static void
845 bridge_destroy(struct bridge *br)
846 {
847 if (br) {
848 int error;
849
850 while (br->n_ports > 0) {
851 port_destroy(br->ports[br->n_ports - 1]);
852 }
853 list_remove(&br->node);
854 error = dpif_delete(&br->dpif);
855 if (error && error != ENOENT) {
856 VLOG_ERR("failed to delete dp%u: %s",
857 dpif_id(&br->dpif), strerror(error));
858 }
859 dpif_close(&br->dpif);
860 ofproto_destroy(br->ofproto);
861 free(br->controller);
862 mac_learning_destroy(br->ml);
863 port_array_destroy(&br->ifaces);
864 free(br->ports);
865 free(br->name);
866 free(br);
867 }
868 }
869
870 static struct bridge *
871 bridge_lookup(const char *name)
872 {
873 struct bridge *br;
874
875 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
876 if (!strcmp(br->name, name)) {
877 return br;
878 }
879 }
880 return NULL;
881 }
882
883 bool
884 bridge_exists(const char *name)
885 {
886 return bridge_lookup(name) ? true : false;
887 }
888
889 uint64_t
890 bridge_get_datapathid(const char *name)
891 {
892 struct bridge *br = bridge_lookup(name);
893 return br ? ofproto_get_datapath_id(br->ofproto) : 0;
894 }
895
896 static int
897 bridge_run_one(struct bridge *br)
898 {
899 int error;
900
901 error = ofproto_run1(br->ofproto);
902 if (error) {
903 return error;
904 }
905
906 if (br->ml) {
907 mac_learning_run(br->ml, ofproto_get_revalidate_set(br->ofproto));
908 }
909 bond_run(br);
910 brstp_run(br);
911
912 error = ofproto_run2(br->ofproto, br->flush);
913 br->flush = false;
914
915 return error;
916 }
917
918 static const char *
919 bridge_get_controller(const struct bridge *br)
920 {
921 const char *controller;
922
923 controller = cfg_get_string(0, "bridge.%s.controller", br->name);
924 if (!controller) {
925 controller = cfg_get_string(0, "mgmt.controller");
926 }
927 return controller && controller[0] ? controller : NULL;
928 }
929
930 static void
931 bridge_reconfigure_one(struct bridge *br)
932 {
933 struct svec old_ports, new_ports, ifaces;
934 struct svec listeners, old_listeners;
935 struct svec snoops, old_snoops;
936 size_t i, j;
937
938 /* Collect old ports. */
939 svec_init(&old_ports);
940 for (i = 0; i < br->n_ports; i++) {
941 svec_add(&old_ports, br->ports[i]->name);
942 }
943 svec_sort(&old_ports);
944 assert(svec_is_unique(&old_ports));
945
946 /* Collect new ports. */
947 svec_init(&new_ports);
948 cfg_get_all_keys(&new_ports, "bridge.%s.port", br->name);
949 svec_sort(&new_ports);
950 if (bridge_get_controller(br) && !svec_contains(&new_ports, br->name)) {
951 svec_add(&new_ports, br->name);
952 svec_sort(&new_ports);
953 }
954 if (!svec_is_unique(&new_ports)) {
955 VLOG_WARN("bridge %s: %s specified twice as bridge port",
956 br->name, svec_get_duplicate(&new_ports));
957 svec_unique(&new_ports);
958 }
959
960 ofproto_set_mgmt_id(br->ofproto, mgmt_id);
961
962 /* Get rid of deleted ports and add new ports. */
963 for (i = 0; i < br->n_ports; ) {
964 struct port *port = br->ports[i];
965 if (!svec_contains(&new_ports, port->name)) {
966 port_destroy(port);
967 } else {
968 i++;
969 }
970 }
971 for (i = 0; i < new_ports.n; i++) {
972 const char *name = new_ports.names[i];
973 if (!svec_contains(&old_ports, name)) {
974 port_create(br, name);
975 }
976 }
977 svec_destroy(&old_ports);
978 svec_destroy(&new_ports);
979
980 /* Reconfigure all ports. */
981 for (i = 0; i < br->n_ports; i++) {
982 port_reconfigure(br->ports[i]);
983 }
984
985 /* Check and delete duplicate interfaces. */
986 svec_init(&ifaces);
987 for (i = 0; i < br->n_ports; ) {
988 struct port *port = br->ports[i];
989 for (j = 0; j < port->n_ifaces; ) {
990 struct iface *iface = port->ifaces[j];
991 if (svec_contains(&ifaces, iface->name)) {
992 VLOG_ERR("bridge %s: %s interface is on multiple ports, "
993 "removing from %s",
994 br->name, iface->name, port->name);
995 iface_destroy(iface);
996 } else {
997 svec_add(&ifaces, iface->name);
998 svec_sort(&ifaces);
999 j++;
1000 }
1001 }
1002 if (!port->n_ifaces) {
1003 VLOG_ERR("%s port has no interfaces, dropping", port->name);
1004 port_destroy(port);
1005 } else {
1006 i++;
1007 }
1008 }
1009 svec_destroy(&ifaces);
1010
1011 /* Delete all flows if we're switching from connected to standalone or vice
1012 * versa. (XXX Should we delete all flows if we are switching from one
1013 * controller to another?) */
1014
1015 /* Configure OpenFlow management listeners. */
1016 svec_init(&listeners);
1017 cfg_get_all_strings(&listeners, "bridge.%s.openflow.listeners", br->name);
1018 if (!listeners.n) {
1019 svec_add_nocopy(&listeners, xasprintf("punix:%s/%s.mgmt",
1020 ovs_rundir, br->name));
1021 } else if (listeners.n == 1 && !strcmp(listeners.names[0], "none")) {
1022 svec_clear(&listeners);
1023 }
1024 svec_sort_unique(&listeners);
1025
1026 svec_init(&old_listeners);
1027 ofproto_get_listeners(br->ofproto, &old_listeners);
1028 svec_sort_unique(&old_listeners);
1029
1030 if (!svec_equal(&listeners, &old_listeners)) {
1031 ofproto_set_listeners(br->ofproto, &listeners);
1032 }
1033 svec_destroy(&listeners);
1034 svec_destroy(&old_listeners);
1035
1036 /* Configure OpenFlow controller connection snooping. */
1037 svec_init(&snoops);
1038 cfg_get_all_strings(&snoops, "bridge.%s.openflow.snoops", br->name);
1039 if (!snoops.n) {
1040 svec_add_nocopy(&snoops, xasprintf("punix:%s/%s.snoop",
1041 ovs_rundir, br->name));
1042 } else if (snoops.n == 1 && !strcmp(snoops.names[0], "none")) {
1043 svec_clear(&snoops);
1044 }
1045 svec_sort_unique(&snoops);
1046
1047 svec_init(&old_snoops);
1048 ofproto_get_snoops(br->ofproto, &old_snoops);
1049 svec_sort_unique(&old_snoops);
1050
1051 if (!svec_equal(&snoops, &old_snoops)) {
1052 ofproto_set_snoops(br->ofproto, &snoops);
1053 }
1054 svec_destroy(&snoops);
1055 svec_destroy(&old_snoops);
1056
1057 mirror_reconfigure(br);
1058 }
1059
1060 static void
1061 bridge_reconfigure_controller(struct bridge *br)
1062 {
1063 char *pfx = xasprintf("bridge.%s.controller", br->name);
1064 const char *controller;
1065
1066 controller = bridge_get_controller(br);
1067 if ((br->controller != NULL) != (controller != NULL)) {
1068 ofproto_flush_flows(br->ofproto);
1069 }
1070 free(br->controller);
1071 br->controller = controller ? xstrdup(controller) : NULL;
1072
1073 if (controller) {
1074 const char *fail_mode;
1075 int max_backoff, probe;
1076 int rate_limit, burst_limit;
1077
1078 if (!strcmp(controller, "discover")) {
1079 bool update_resolv_conf = true;
1080
1081 if (cfg_has("%s.update-resolv.conf", pfx)) {
1082 update_resolv_conf = cfg_get_bool(0, "%s.update-resolv.conf",
1083 pfx);
1084 }
1085 ofproto_set_discovery(br->ofproto, true,
1086 cfg_get_string(0, "%s.accept-regex", pfx),
1087 update_resolv_conf);
1088 } else {
1089 struct netdev *netdev;
1090 bool in_band;
1091 int error;
1092
1093 in_band = (!cfg_is_valid(CFG_BOOL | CFG_REQUIRED,
1094 "%s.in-band", pfx)
1095 || cfg_get_bool(0, "%s.in-band", pfx));
1096 ofproto_set_discovery(br->ofproto, false, NULL, NULL);
1097 ofproto_set_in_band(br->ofproto, in_band);
1098
1099 error = netdev_open(br->name, NETDEV_ETH_TYPE_NONE, &netdev);
1100 if (!error) {
1101 if (cfg_is_valid(CFG_IP | CFG_REQUIRED, "%s.ip", pfx)) {
1102 struct in_addr ip, mask, gateway;
1103 ip.s_addr = cfg_get_ip(0, "%s.ip", pfx);
1104 mask.s_addr = cfg_get_ip(0, "%s.netmask", pfx);
1105 gateway.s_addr = cfg_get_ip(0, "%s.gateway", pfx);
1106
1107 netdev_turn_flags_on(netdev, NETDEV_UP, true);
1108 if (!mask.s_addr) {
1109 mask.s_addr = guess_netmask(ip.s_addr);
1110 }
1111 if (!netdev_set_in4(netdev, ip, mask)) {
1112 VLOG_INFO("bridge %s: configured IP address "IP_FMT", "
1113 "netmask "IP_FMT,
1114 br->name, IP_ARGS(&ip.s_addr),
1115 IP_ARGS(&mask.s_addr));
1116 }
1117
1118 if (gateway.s_addr) {
1119 if (!netdev_add_router(gateway)) {
1120 VLOG_INFO("bridge %s: configured gateway "IP_FMT,
1121 br->name, IP_ARGS(&gateway.s_addr));
1122 }
1123 }
1124 }
1125 netdev_close(netdev);
1126 }
1127 }
1128
1129 fail_mode = cfg_get_string(0, "%s.fail-mode", pfx);
1130 if (!fail_mode) {
1131 fail_mode = cfg_get_string(0, "mgmt.fail-mode");
1132 }
1133 ofproto_set_failure(br->ofproto,
1134 (!fail_mode
1135 || !strcmp(fail_mode, "standalone")
1136 || !strcmp(fail_mode, "open")));
1137
1138 probe = cfg_get_int(0, "%s.inactivity-probe", pfx);
1139 if (probe < 5) {
1140 probe = cfg_get_int(0, "mgmt.inactivity-probe");
1141 if (probe < 5) {
1142 probe = 15;
1143 }
1144 }
1145 ofproto_set_probe_interval(br->ofproto, probe);
1146
1147 max_backoff = cfg_get_int(0, "%s.max-backoff", pfx);
1148 if (!max_backoff) {
1149 max_backoff = cfg_get_int(0, "mgmt.max-backoff");
1150 if (!max_backoff) {
1151 max_backoff = 15;
1152 }
1153 }
1154 ofproto_set_max_backoff(br->ofproto, max_backoff);
1155
1156 rate_limit = cfg_get_int(0, "%s.rate-limit", pfx);
1157 if (!rate_limit) {
1158 rate_limit = cfg_get_int(0, "mgmt.rate-limit");
1159 }
1160 burst_limit = cfg_get_int(0, "%s.burst-limit", pfx);
1161 if (!burst_limit) {
1162 burst_limit = cfg_get_int(0, "mgmt.burst-limit");
1163 }
1164 ofproto_set_rate_limit(br->ofproto, rate_limit, burst_limit);
1165
1166 ofproto_set_stp(br->ofproto, cfg_get_bool(0, "%s.stp", pfx));
1167
1168 if (cfg_has("%s.commands.acl", pfx)) {
1169 struct svec command_acls;
1170 char *command_acl;
1171
1172 svec_init(&command_acls);
1173 cfg_get_all_strings(&command_acls, "%s.commands.acl", pfx);
1174 command_acl = svec_join(&command_acls, ",", "");
1175
1176 ofproto_set_remote_execution(br->ofproto, command_acl,
1177 cfg_get_string(0, "%s.commands.dir",
1178 pfx));
1179
1180 svec_destroy(&command_acls);
1181 free(command_acl);
1182 } else {
1183 ofproto_set_remote_execution(br->ofproto, NULL, NULL);
1184 }
1185 } else {
1186 union ofp_action action;
1187 flow_t flow;
1188
1189 /* Set up a flow that matches every packet and directs them to
1190 * OFPP_NORMAL (which goes to us). */
1191 memset(&action, 0, sizeof action);
1192 action.type = htons(OFPAT_OUTPUT);
1193 action.output.len = htons(sizeof action);
1194 action.output.port = htons(OFPP_NORMAL);
1195 memset(&flow, 0, sizeof flow);
1196 ofproto_add_flow(br->ofproto, &flow, OFPFW_ALL, 0,
1197 &action, 1, 0);
1198
1199 ofproto_set_in_band(br->ofproto, false);
1200 ofproto_set_max_backoff(br->ofproto, 1);
1201 ofproto_set_probe_interval(br->ofproto, 5);
1202 ofproto_set_failure(br->ofproto, false);
1203 ofproto_set_stp(br->ofproto, false);
1204 }
1205 free(pfx);
1206
1207 ofproto_set_controller(br->ofproto, br->controller);
1208 }
1209
1210 static void
1211 bridge_get_all_ifaces(const struct bridge *br, struct svec *ifaces)
1212 {
1213 size_t i, j;
1214
1215 svec_init(ifaces);
1216 for (i = 0; i < br->n_ports; i++) {
1217 struct port *port = br->ports[i];
1218 for (j = 0; j < port->n_ifaces; j++) {
1219 struct iface *iface = port->ifaces[j];
1220 svec_add(ifaces, iface->name);
1221 }
1222 }
1223 svec_sort(ifaces);
1224 assert(svec_is_unique(ifaces));
1225 }
1226
1227 /* For robustness, in case the administrator moves around datapath ports behind
1228 * our back, we re-check all the datapath port numbers here.
1229 *
1230 * This function will set the 'dp_ifidx' members of interfaces that have
1231 * disappeared to -1, so only call this function from a context where those
1232 * 'struct iface's will be removed from the bridge. Otherwise, the -1
1233 * 'dp_ifidx'es will cause trouble later when we try to send them to the
1234 * datapath, which doesn't support UINT16_MAX+1 ports. */
1235 static void
1236 bridge_fetch_dp_ifaces(struct bridge *br)
1237 {
1238 struct odp_port *dpif_ports;
1239 size_t n_dpif_ports;
1240 size_t i, j;
1241
1242 /* Reset all interface numbers. */
1243 for (i = 0; i < br->n_ports; i++) {
1244 struct port *port = br->ports[i];
1245 for (j = 0; j < port->n_ifaces; j++) {
1246 struct iface *iface = port->ifaces[j];
1247 iface->dp_ifidx = -1;
1248 }
1249 }
1250 port_array_clear(&br->ifaces);
1251
1252 dpif_port_list(&br->dpif, &dpif_ports, &n_dpif_ports);
1253 for (i = 0; i < n_dpif_ports; i++) {
1254 struct odp_port *p = &dpif_ports[i];
1255 struct iface *iface = iface_lookup(br, p->devname);
1256 if (iface) {
1257 if (iface->dp_ifidx >= 0) {
1258 VLOG_WARN("dp%u reported interface %s twice",
1259 dpif_id(&br->dpif), p->devname);
1260 } else if (iface_from_dp_ifidx(br, p->port)) {
1261 VLOG_WARN("dp%u reported interface %"PRIu16" twice",
1262 dpif_id(&br->dpif), p->port);
1263 } else {
1264 port_array_set(&br->ifaces, p->port, iface);
1265 iface->dp_ifidx = p->port;
1266 }
1267 }
1268 }
1269 free(dpif_ports);
1270 }
1271 \f
1272 /* Bridge packet processing functions. */
1273
1274 static int
1275 bond_hash(const uint8_t mac[ETH_ADDR_LEN])
1276 {
1277 return hash_bytes(mac, ETH_ADDR_LEN, 0) & BOND_MASK;
1278 }
1279
1280 static struct bond_entry *
1281 lookup_bond_entry(const struct port *port, const uint8_t mac[ETH_ADDR_LEN])
1282 {
1283 return &port->bond_hash[bond_hash(mac)];
1284 }
1285
1286 static int
1287 bond_choose_iface(const struct port *port)
1288 {
1289 size_t i;
1290 for (i = 0; i < port->n_ifaces; i++) {
1291 if (port->ifaces[i]->enabled) {
1292 return i;
1293 }
1294 }
1295 return -1;
1296 }
1297
1298 static bool
1299 choose_output_iface(const struct port *port, const uint8_t *dl_src,
1300 uint16_t *dp_ifidx, tag_type *tags)
1301 {
1302 struct iface *iface;
1303
1304 assert(port->n_ifaces);
1305 if (port->n_ifaces == 1) {
1306 iface = port->ifaces[0];
1307 } else {
1308 struct bond_entry *e = lookup_bond_entry(port, dl_src);
1309 if (e->iface_idx < 0 || e->iface_idx >= port->n_ifaces
1310 || !port->ifaces[e->iface_idx]->enabled) {
1311 /* XXX select interface properly. The current interface selection
1312 * is only good for testing the rebalancing code. */
1313 e->iface_idx = bond_choose_iface(port);
1314 if (e->iface_idx < 0) {
1315 *tags |= port->no_ifaces_tag;
1316 return false;
1317 }
1318 e->iface_tag = tag_create_random();
1319 }
1320 *tags |= e->iface_tag;
1321 iface = port->ifaces[e->iface_idx];
1322 }
1323 *dp_ifidx = iface->dp_ifidx;
1324 *tags |= iface->tag; /* Currently only used for bonding. */
1325 return true;
1326 }
1327
1328 static void
1329 bond_link_status_update(struct iface *iface, bool carrier)
1330 {
1331 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
1332 struct port *port = iface->port;
1333
1334 if ((carrier == iface->enabled) == (iface->delay_expires == LLONG_MAX)) {
1335 /* Nothing to do. */
1336 return;
1337 }
1338 VLOG_INFO_RL(&rl, "interface %s: carrier %s",
1339 iface->name, carrier ? "detected" : "dropped");
1340 if (carrier == iface->enabled) {
1341 iface->delay_expires = LLONG_MAX;
1342 VLOG_INFO_RL(&rl, "interface %s: will not be %s",
1343 iface->name, carrier ? "disabled" : "enabled");
1344 } else {
1345 int delay = carrier ? port->updelay : port->downdelay;
1346 iface->delay_expires = time_msec() + delay;
1347 if (delay) {
1348 VLOG_INFO_RL(&rl,
1349 "interface %s: will be %s if it stays %s for %d ms",
1350 iface->name,
1351 carrier ? "enabled" : "disabled",
1352 carrier ? "up" : "down",
1353 delay);
1354 }
1355 }
1356 }
1357
1358 static void
1359 bond_choose_active_iface(struct port *port)
1360 {
1361 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
1362
1363 port->active_iface = bond_choose_iface(port);
1364 port->active_iface_tag = tag_create_random();
1365 if (port->active_iface >= 0) {
1366 VLOG_INFO_RL(&rl, "port %s: active interface is now %s",
1367 port->name, port->ifaces[port->active_iface]->name);
1368 } else {
1369 VLOG_WARN_RL(&rl, "port %s: all ports disabled, no active interface",
1370 port->name);
1371 }
1372 }
1373
1374 static void
1375 bond_enable_slave(struct iface *iface, bool enable)
1376 {
1377 struct port *port = iface->port;
1378 struct bridge *br = port->bridge;
1379
1380 iface->delay_expires = LLONG_MAX;
1381 if (enable == iface->enabled) {
1382 return;
1383 }
1384
1385 iface->enabled = enable;
1386 if (!iface->enabled) {
1387 VLOG_WARN("interface %s: disabled", iface->name);
1388 ofproto_revalidate(br->ofproto, iface->tag);
1389 if (iface->port_ifidx == port->active_iface) {
1390 ofproto_revalidate(br->ofproto,
1391 port->active_iface_tag);
1392 bond_choose_active_iface(port);
1393 }
1394 bond_send_learning_packets(port);
1395 } else {
1396 VLOG_WARN("interface %s: enabled", iface->name);
1397 if (port->active_iface < 0) {
1398 ofproto_revalidate(br->ofproto, port->no_ifaces_tag);
1399 bond_choose_active_iface(port);
1400 bond_send_learning_packets(port);
1401 }
1402 iface->tag = tag_create_random();
1403 }
1404 }
1405
1406 static void
1407 bond_run(struct bridge *br)
1408 {
1409 size_t i, j;
1410
1411 for (i = 0; i < br->n_ports; i++) {
1412 struct port *port = br->ports[i];
1413 if (port->n_ifaces < 2) {
1414 continue;
1415 }
1416 for (j = 0; j < port->n_ifaces; j++) {
1417 struct iface *iface = port->ifaces[j];
1418 if (time_msec() >= iface->delay_expires) {
1419 bond_enable_slave(iface, !iface->enabled);
1420 }
1421 }
1422 }
1423 }
1424
1425 static void
1426 bond_wait(struct bridge *br)
1427 {
1428 size_t i, j;
1429
1430 for (i = 0; i < br->n_ports; i++) {
1431 struct port *port = br->ports[i];
1432 if (port->n_ifaces < 2) {
1433 continue;
1434 }
1435 for (j = 0; j < port->n_ifaces; j++) {
1436 struct iface *iface = port->ifaces[j];
1437 if (iface->delay_expires != LLONG_MAX) {
1438 poll_timer_wait(iface->delay_expires - time_msec());
1439 }
1440 }
1441 }
1442 }
1443
1444 static bool
1445 set_dst(struct dst *p, const flow_t *flow,
1446 const struct port *in_port, const struct port *out_port,
1447 tag_type *tags)
1448 {
1449 /* STP handling.
1450 *
1451 * XXX This uses too many tags: any broadcast flow will get one tag per
1452 * destination port, and thus a broadcast on a switch of any size is likely
1453 * to have all tag bits set. We should figure out a way to be smarter.
1454 *
1455 * This is OK when STP is disabled, because stp_state_tag is 0 then. */
1456 *tags |= out_port->stp_state_tag;
1457 if (!(out_port->stp_state & (STP_DISABLED | STP_FORWARDING))) {
1458 return false;
1459 }
1460
1461 p->vlan = (out_port->vlan >= 0 ? OFP_VLAN_NONE
1462 : in_port->vlan >= 0 ? in_port->vlan
1463 : ntohs(flow->dl_vlan));
1464 return choose_output_iface(out_port, flow->dl_src, &p->dp_ifidx, tags);
1465 }
1466
1467 static void
1468 swap_dst(struct dst *p, struct dst *q)
1469 {
1470 struct dst tmp = *p;
1471 *p = *q;
1472 *q = tmp;
1473 }
1474
1475 /* Moves all the dsts with vlan == 'vlan' to the front of the 'n_dsts' in
1476 * 'dsts'. (This may help performance by reducing the number of VLAN changes
1477 * that we push to the datapath. We could in fact fully sort the array by
1478 * vlan, but in most cases there are at most two different vlan tags so that's
1479 * possibly overkill.) */
1480 static void
1481 partition_dsts(struct dst *dsts, size_t n_dsts, int vlan)
1482 {
1483 struct dst *first = dsts;
1484 struct dst *last = dsts + n_dsts;
1485
1486 while (first != last) {
1487 /* Invariants:
1488 * - All dsts < first have vlan == 'vlan'.
1489 * - All dsts >= last have vlan != 'vlan'.
1490 * - first < last. */
1491 while (first->vlan == vlan) {
1492 if (++first == last) {
1493 return;
1494 }
1495 }
1496
1497 /* Same invariants, plus one additional:
1498 * - first->vlan != vlan.
1499 */
1500 while (last[-1].vlan != vlan) {
1501 if (--last == first) {
1502 return;
1503 }
1504 }
1505
1506 /* Same invariants, plus one additional:
1507 * - last[-1].vlan == vlan.*/
1508 swap_dst(first++, --last);
1509 }
1510 }
1511
1512 static int
1513 mirror_mask_ffs(mirror_mask_t mask)
1514 {
1515 BUILD_ASSERT_DECL(sizeof(unsigned int) >= sizeof(mask));
1516 return ffs(mask);
1517 }
1518
1519 static bool
1520 dst_is_duplicate(const struct dst *dsts, size_t n_dsts,
1521 const struct dst *test)
1522 {
1523 size_t i;
1524 for (i = 0; i < n_dsts; i++) {
1525 if (dsts[i].vlan == test->vlan && dsts[i].dp_ifidx == test->dp_ifidx) {
1526 return true;
1527 }
1528 }
1529 return false;
1530 }
1531
1532 static bool
1533 port_trunks_vlan(const struct port *port, uint16_t vlan)
1534 {
1535 return port->vlan < 0 && bitmap_is_set(port->trunks, vlan);
1536 }
1537
1538 static bool
1539 port_includes_vlan(const struct port *port, uint16_t vlan)
1540 {
1541 return vlan == port->vlan || port_trunks_vlan(port, vlan);
1542 }
1543
1544 static size_t
1545 compose_dsts(const struct bridge *br, const flow_t *flow, uint16_t vlan,
1546 const struct port *in_port, const struct port *out_port,
1547 struct dst dsts[], tag_type *tags)
1548 {
1549 mirror_mask_t mirrors = in_port->src_mirrors;
1550 struct dst *dst = dsts;
1551 size_t i;
1552
1553 *tags |= in_port->stp_state_tag;
1554 if (out_port == FLOOD_PORT) {
1555 /* XXX use ODP_FLOOD if no vlans or bonding. */
1556 /* XXX even better, define each VLAN as a datapath port group */
1557 for (i = 0; i < br->n_ports; i++) {
1558 struct port *port = br->ports[i];
1559 if (port != in_port && port_includes_vlan(port, vlan)
1560 && !port->is_mirror_output_port
1561 && set_dst(dst, flow, in_port, port, tags)) {
1562 mirrors |= port->dst_mirrors;
1563 dst++;
1564 }
1565 }
1566 } else if (out_port && set_dst(dst, flow, in_port, out_port, tags)) {
1567 mirrors |= out_port->dst_mirrors;
1568 dst++;
1569 }
1570
1571 while (mirrors) {
1572 struct mirror *m = br->mirrors[mirror_mask_ffs(mirrors) - 1];
1573 if (!m->n_vlans || vlan_is_mirrored(m, vlan)) {
1574 if (m->out_port) {
1575 if (set_dst(dst, flow, in_port, m->out_port, tags)
1576 && !dst_is_duplicate(dsts, dst - dsts, dst)) {
1577 dst++;
1578 }
1579 } else {
1580 for (i = 0; i < br->n_ports; i++) {
1581 struct port *port = br->ports[i];
1582 if (port_includes_vlan(port, m->out_vlan)
1583 && set_dst(dst, flow, in_port, port, tags)
1584 && !dst_is_duplicate(dsts, dst - dsts, dst))
1585 {
1586 if (port->vlan < 0) {
1587 dst->vlan = m->out_vlan;
1588 }
1589 if (dst->dp_ifidx == flow->in_port
1590 && dst->vlan == vlan) {
1591 /* Don't send out input port on same VLAN. */
1592 continue;
1593 }
1594 dst++;
1595 }
1596 }
1597 }
1598 }
1599 mirrors &= mirrors - 1;
1600 }
1601
1602 partition_dsts(dsts, dst - dsts, ntohs(flow->dl_vlan));
1603 return dst - dsts;
1604 }
1605
1606 static void UNUSED
1607 print_dsts(const struct dst *dsts, size_t n)
1608 {
1609 for (; n--; dsts++) {
1610 printf(">p%"PRIu16, dsts->dp_ifidx);
1611 if (dsts->vlan != OFP_VLAN_NONE) {
1612 printf("v%"PRIu16, dsts->vlan);
1613 }
1614 }
1615 }
1616
1617 static void
1618 compose_actions(struct bridge *br, const flow_t *flow, uint16_t vlan,
1619 const struct port *in_port, const struct port *out_port,
1620 tag_type *tags, struct odp_actions *actions)
1621 {
1622 struct dst dsts[DP_MAX_PORTS * (MAX_MIRRORS + 1)];
1623 size_t n_dsts;
1624 const struct dst *p;
1625 uint16_t cur_vlan;
1626
1627 n_dsts = compose_dsts(br, flow, vlan, in_port, out_port, dsts, tags);
1628
1629 cur_vlan = ntohs(flow->dl_vlan);
1630 for (p = dsts; p < &dsts[n_dsts]; p++) {
1631 union odp_action *a;
1632 if (p->vlan != cur_vlan) {
1633 if (p->vlan == OFP_VLAN_NONE) {
1634 odp_actions_add(actions, ODPAT_STRIP_VLAN);
1635 } else {
1636 a = odp_actions_add(actions, ODPAT_SET_VLAN_VID);
1637 a->vlan_vid.vlan_vid = htons(p->vlan);
1638 }
1639 cur_vlan = p->vlan;
1640 }
1641 a = odp_actions_add(actions, ODPAT_OUTPUT);
1642 a->output.port = p->dp_ifidx;
1643 }
1644 }
1645
1646 static bool
1647 is_bcast_arp_reply(const flow_t *flow, const struct ofpbuf *packet)
1648 {
1649 struct arp_eth_header *arp = (struct arp_eth_header *) packet->data;
1650 return (flow->dl_type == htons(ETH_TYPE_ARP)
1651 && eth_addr_is_broadcast(flow->dl_dst)
1652 && packet->size >= sizeof(struct arp_eth_header)
1653 && arp->ar_op == ARP_OP_REQUEST);
1654 }
1655
1656 /* If the composed actions may be applied to any packet in the given 'flow',
1657 * returns true. Otherwise, the actions should only be applied to 'packet', or
1658 * not at all, if 'packet' was NULL. */
1659 static bool
1660 process_flow(struct bridge *br, const flow_t *flow,
1661 const struct ofpbuf *packet, struct odp_actions *actions,
1662 tag_type *tags)
1663 {
1664 struct iface *in_iface;
1665 struct port *in_port;
1666 struct port *out_port = NULL; /* By default, drop the packet/flow. */
1667 int vlan;
1668
1669 /* Find the interface and port structure for the received packet. */
1670 in_iface = iface_from_dp_ifidx(br, flow->in_port);
1671 if (!in_iface) {
1672 /* No interface? Something fishy... */
1673 if (packet != NULL) {
1674 /* Odd. A few possible reasons here:
1675 *
1676 * - We deleted an interface but there are still a few packets
1677 * queued up from it.
1678 *
1679 * - Someone externally added an interface (e.g. with "ovs-dpctl
1680 * add-if") that we don't know about.
1681 *
1682 * - Packet arrived on the local port but the local port is not
1683 * one of our bridge ports.
1684 */
1685 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1686
1687 VLOG_WARN_RL(&rl, "bridge %s: received packet on unknown "
1688 "interface %"PRIu16, br->name, flow->in_port);
1689 }
1690
1691 /* Return without adding any actions, to drop packets on this flow. */
1692 return true;
1693 }
1694 in_port = in_iface->port;
1695
1696 /* Figure out what VLAN this packet belongs to.
1697 *
1698 * Note that dl_vlan of 0 and of OFP_VLAN_NONE both mean that the packet
1699 * belongs to VLAN 0, so we should treat both cases identically. (In the
1700 * former case, the packet has an 802.1Q header that specifies VLAN 0,
1701 * presumably to allow a priority to be specified. In the latter case, the
1702 * packet does not have any 802.1Q header.) */
1703 vlan = ntohs(flow->dl_vlan);
1704 if (vlan == OFP_VLAN_NONE) {
1705 vlan = 0;
1706 }
1707 if (in_port->vlan >= 0) {
1708 if (vlan) {
1709 /* XXX support double tagging? */
1710 if (packet != NULL) {
1711 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1712 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %"PRIu16" tagged "
1713 "packet received on port %s configured with "
1714 "implicit VLAN %"PRIu16,
1715 br->name, ntohs(flow->dl_vlan),
1716 in_port->name, in_port->vlan);
1717 }
1718 goto done;
1719 }
1720 vlan = in_port->vlan;
1721 } else {
1722 if (!port_includes_vlan(in_port, vlan)) {
1723 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1724 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %d tagged "
1725 "packet received on port %s not configured for "
1726 "trunking VLAN %d",
1727 br->name, vlan, in_port->name, vlan);
1728 goto done;
1729 }
1730 }
1731
1732 /* Drop frames for ports that STP wants entirely killed (both for
1733 * forwarding and for learning). Later, after we do learning, we'll drop
1734 * the frames that STP wants to do learning but not forwarding on. */
1735 if (in_port->stp_state & (STP_LISTENING | STP_BLOCKING)) {
1736 goto done;
1737 }
1738
1739 /* Drop frames for reserved multicast addresses. */
1740 if (eth_addr_is_reserved(flow->dl_dst)) {
1741 goto done;
1742 }
1743
1744 /* Drop frames on ports reserved for mirroring. */
1745 if (in_port->is_mirror_output_port) {
1746 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1747 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port %s, "
1748 "which is reserved exclusively for mirroring",
1749 br->name, in_port->name);
1750 goto done;
1751 }
1752
1753 /* Multicast (and broadcast) packets on bonds need special attention, to
1754 * avoid receiving duplicates. */
1755 if (in_port->n_ifaces > 1 && eth_addr_is_multicast(flow->dl_dst)) {
1756 *tags |= in_port->active_iface_tag;
1757 if (in_port->active_iface != in_iface->port_ifidx) {
1758 /* Drop all multicast packets on inactive slaves. */
1759 goto done;
1760 } else {
1761 /* Drop all multicast packets for which we have learned a different
1762 * input port, because we probably sent the packet on one slaves
1763 * and got it back on the active slave. Broadcast ARP replies are
1764 * an exception to this rule: the host has moved to another
1765 * switch. */
1766 int src_idx = mac_learning_lookup(br->ml, flow->dl_src, vlan);
1767 if (src_idx != -1 && src_idx != in_port->port_idx) {
1768 if (packet) {
1769 if (!is_bcast_arp_reply(flow, packet)) {
1770 goto done;
1771 }
1772 } else {
1773 /* No way to know whether it's an ARP reply, because the
1774 * flow entry doesn't include enough information and we
1775 * don't have a packet. Punt. */
1776 return false;
1777 }
1778 }
1779 }
1780 }
1781
1782 /* MAC learning. */
1783 out_port = FLOOD_PORT;
1784 if (br->ml) {
1785 int out_port_idx;
1786
1787 /* Learn source MAC (but don't try to learn from revalidation). */
1788 if (packet) {
1789 tag_type rev_tag = mac_learning_learn(br->ml, flow->dl_src,
1790 vlan, in_port->port_idx);
1791 if (rev_tag) {
1792 /* The log messages here could actually be useful in debugging,
1793 * so keep the rate limit relatively high. */
1794 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30,
1795 300);
1796 VLOG_DBG_RL(&rl, "bridge %s: learned that "ETH_ADDR_FMT" is "
1797 "on port %s in VLAN %d",
1798 br->name, ETH_ADDR_ARGS(flow->dl_src),
1799 in_port->name, vlan);
1800 ofproto_revalidate(br->ofproto, rev_tag);
1801 }
1802 }
1803
1804 /* Determine output port. */
1805 out_port_idx = mac_learning_lookup_tag(br->ml, flow->dl_dst, vlan,
1806 tags);
1807 if (out_port_idx >= 0 && out_port_idx < br->n_ports) {
1808 out_port = br->ports[out_port_idx];
1809 }
1810 }
1811
1812 /* Don't send packets out their input ports. Don't forward frames that STP
1813 * wants us to discard. */
1814 if (in_port == out_port || in_port->stp_state == STP_LEARNING) {
1815 out_port = NULL;
1816 }
1817
1818 done:
1819 compose_actions(br, flow, vlan, in_port, out_port, tags, actions);
1820
1821 /*
1822 * We send out only a single packet, instead of setting up a flow, if the
1823 * packet is an ARP directed to broadcast that arrived on a bonded
1824 * interface. In such a situation ARP requests and replies must be handled
1825 * differently, but OpenFlow unfortunately can't distinguish them.
1826 */
1827 return (in_port->n_ifaces < 2
1828 || flow->dl_type != htons(ETH_TYPE_ARP)
1829 || !eth_addr_is_broadcast(flow->dl_dst));
1830 }
1831
1832 /* Careful: 'opp' is in host byte order and opp->port_no is an OFP port
1833 * number. */
1834 static void
1835 bridge_port_changed_ofhook_cb(enum ofp_port_reason reason,
1836 const struct ofp_phy_port *opp,
1837 void *br_)
1838 {
1839 struct bridge *br = br_;
1840 struct iface *iface;
1841 struct port *port;
1842
1843 iface = iface_from_dp_ifidx(br, ofp_port_to_odp_port(opp->port_no));
1844 if (!iface) {
1845 return;
1846 }
1847 port = iface->port;
1848
1849 if (reason == OFPPR_DELETE) {
1850 VLOG_WARN("bridge %s: interface %s deleted unexpectedly",
1851 br->name, iface->name);
1852 iface_destroy(iface);
1853 if (!port->n_ifaces) {
1854 VLOG_WARN("bridge %s: port %s has no interfaces, dropping",
1855 br->name, port->name);
1856 port_destroy(port);
1857 }
1858
1859 bridge_flush(br);
1860 } else {
1861 memcpy(iface->mac, opp->hw_addr, ETH_ADDR_LEN);
1862 if (port->n_ifaces > 1) {
1863 bool up = !(opp->state & OFPPS_LINK_DOWN);
1864 bond_link_status_update(iface, up);
1865 port_update_bond_compat(port);
1866 }
1867 }
1868 }
1869
1870 static bool
1871 bridge_normal_ofhook_cb(const flow_t *flow, const struct ofpbuf *packet,
1872 struct odp_actions *actions, tag_type *tags, void *br_)
1873 {
1874 struct bridge *br = br_;
1875
1876 #if 0
1877 if (flow->dl_type == htons(OFP_DL_TYPE_NOT_ETH_TYPE)
1878 && eth_addr_equals(flow->dl_dst, stp_eth_addr)) {
1879 brstp_receive(br, flow, payload);
1880 return true;
1881 }
1882 #endif
1883
1884 COVERAGE_INC(bridge_process_flow);
1885 return process_flow(br, flow, packet, actions, tags);
1886 }
1887
1888 static void
1889 bridge_account_flow_ofhook_cb(const flow_t *flow,
1890 const union odp_action *actions,
1891 size_t n_actions, unsigned long long int n_bytes,
1892 void *br_)
1893 {
1894 struct bridge *br = br_;
1895 const union odp_action *a;
1896
1897 if (!br->has_bonded_ports) {
1898 return;
1899 }
1900
1901 for (a = actions; a < &actions[n_actions]; a++) {
1902 if (a->type == ODPAT_OUTPUT) {
1903 struct port *port = port_from_dp_ifidx(br, a->output.port);
1904 if (port && port->n_ifaces >= 2) {
1905 struct bond_entry *e = lookup_bond_entry(port, flow->dl_src);
1906 e->tx_bytes += n_bytes;
1907 }
1908 }
1909 }
1910 }
1911
1912 static void
1913 bridge_account_checkpoint_ofhook_cb(void *br_)
1914 {
1915 struct bridge *br = br_;
1916 size_t i;
1917
1918 if (!br->has_bonded_ports) {
1919 return;
1920 }
1921
1922 /* The current ofproto implementation calls this callback at least once a
1923 * second, so this timer implementation is sufficient. */
1924 if (time_msec() < br->bond_next_rebalance) {
1925 return;
1926 }
1927 br->bond_next_rebalance = time_msec() + 10000;
1928
1929 for (i = 0; i < br->n_ports; i++) {
1930 struct port *port = br->ports[i];
1931 if (port->n_ifaces > 1) {
1932 bond_rebalance_port(port);
1933 }
1934 }
1935 }
1936
1937 static struct ofhooks bridge_ofhooks = {
1938 bridge_port_changed_ofhook_cb,
1939 bridge_normal_ofhook_cb,
1940 bridge_account_flow_ofhook_cb,
1941 bridge_account_checkpoint_ofhook_cb,
1942 };
1943 \f
1944 /* Bonding functions. */
1945
1946 /* Statistics for a single interface on a bonded port, used for load-based
1947 * bond rebalancing. */
1948 struct slave_balance {
1949 struct iface *iface; /* The interface. */
1950 uint64_t tx_bytes; /* Sum of hashes[*]->tx_bytes. */
1951
1952 /* All the "bond_entry"s that are assigned to this interface, in order of
1953 * increasing tx_bytes. */
1954 struct bond_entry **hashes;
1955 size_t n_hashes;
1956 };
1957
1958 /* Sorts pointers to pointers to bond_entries in ascending order by the
1959 * interface to which they are assigned, and within a single interface in
1960 * ascending order of bytes transmitted. */
1961 static int
1962 compare_bond_entries(const void *a_, const void *b_)
1963 {
1964 const struct bond_entry *const *ap = a_;
1965 const struct bond_entry *const *bp = b_;
1966 const struct bond_entry *a = *ap;
1967 const struct bond_entry *b = *bp;
1968 if (a->iface_idx != b->iface_idx) {
1969 return a->iface_idx > b->iface_idx ? 1 : -1;
1970 } else if (a->tx_bytes != b->tx_bytes) {
1971 return a->tx_bytes > b->tx_bytes ? 1 : -1;
1972 } else {
1973 return 0;
1974 }
1975 }
1976
1977 /* Sorts slave_balances so that enabled ports come first, and otherwise in
1978 * *descending* order by number of bytes transmitted. */
1979 static int
1980 compare_slave_balance(const void *a_, const void *b_)
1981 {
1982 const struct slave_balance *a = a_;
1983 const struct slave_balance *b = b_;
1984 if (a->iface->enabled != b->iface->enabled) {
1985 return a->iface->enabled ? -1 : 1;
1986 } else if (a->tx_bytes != b->tx_bytes) {
1987 return a->tx_bytes > b->tx_bytes ? -1 : 1;
1988 } else {
1989 return 0;
1990 }
1991 }
1992
1993 static void
1994 swap_bals(struct slave_balance *a, struct slave_balance *b)
1995 {
1996 struct slave_balance tmp = *a;
1997 *a = *b;
1998 *b = tmp;
1999 }
2000
2001 /* Restores the 'n_bals' slave_balance structures in 'bals' to sorted order
2002 * given that 'p' (and only 'p') might be in the wrong location.
2003 *
2004 * This function invalidates 'p', since it might now be in a different memory
2005 * location. */
2006 static void
2007 resort_bals(struct slave_balance *p,
2008 struct slave_balance bals[], size_t n_bals)
2009 {
2010 if (n_bals > 1) {
2011 for (; p > bals && p->tx_bytes > p[-1].tx_bytes; p--) {
2012 swap_bals(p, p - 1);
2013 }
2014 for (; p < &bals[n_bals - 1] && p->tx_bytes < p[1].tx_bytes; p++) {
2015 swap_bals(p, p + 1);
2016 }
2017 }
2018 }
2019
2020 static void
2021 log_bals(const struct slave_balance *bals, size_t n_bals, struct port *port)
2022 {
2023 if (VLOG_IS_DBG_ENABLED()) {
2024 struct ds ds = DS_EMPTY_INITIALIZER;
2025 const struct slave_balance *b;
2026
2027 for (b = bals; b < bals + n_bals; b++) {
2028 size_t i;
2029
2030 if (b > bals) {
2031 ds_put_char(&ds, ',');
2032 }
2033 ds_put_format(&ds, " %s %"PRIu64"kB",
2034 b->iface->name, b->tx_bytes / 1024);
2035
2036 if (!b->iface->enabled) {
2037 ds_put_cstr(&ds, " (disabled)");
2038 }
2039 if (b->n_hashes > 0) {
2040 ds_put_cstr(&ds, " (");
2041 for (i = 0; i < b->n_hashes; i++) {
2042 const struct bond_entry *e = b->hashes[i];
2043 if (i > 0) {
2044 ds_put_cstr(&ds, " + ");
2045 }
2046 ds_put_format(&ds, "h%td: %"PRIu64"kB",
2047 e - port->bond_hash, e->tx_bytes / 1024);
2048 }
2049 ds_put_cstr(&ds, ")");
2050 }
2051 }
2052 VLOG_DBG("bond %s:%s", port->name, ds_cstr(&ds));
2053 ds_destroy(&ds);
2054 }
2055 }
2056
2057 /* Shifts 'hash' from 'from' to 'to' within 'port'. */
2058 static void
2059 bond_shift_load(struct slave_balance *from, struct slave_balance *to,
2060 struct bond_entry *hash)
2061 {
2062 struct port *port = from->iface->port;
2063 uint64_t delta = hash->tx_bytes;
2064
2065 VLOG_INFO("bond %s: shift %"PRIu64"kB of load (with hash %td) "
2066 "from %s to %s (now carrying %"PRIu64"kB and "
2067 "%"PRIu64"kB load, respectively)",
2068 port->name, delta / 1024, hash - port->bond_hash,
2069 from->iface->name, to->iface->name,
2070 (from->tx_bytes - delta) / 1024,
2071 (to->tx_bytes + delta) / 1024);
2072
2073 /* Delete element from from->hashes.
2074 *
2075 * We don't bother to add the element to to->hashes because not only would
2076 * it require more work, the only purpose it would be to allow that hash to
2077 * be migrated to another slave in this rebalancing run, and there is no
2078 * point in doing that. */
2079 if (from->hashes[0] == hash) {
2080 from->hashes++;
2081 } else {
2082 int i = hash - from->hashes[0];
2083 memmove(from->hashes + i, from->hashes + i + 1,
2084 (from->n_hashes - (i + 1)) * sizeof *from->hashes);
2085 }
2086 from->n_hashes--;
2087
2088 /* Shift load away from 'from' to 'to'. */
2089 from->tx_bytes -= delta;
2090 to->tx_bytes += delta;
2091
2092 /* Arrange for flows to be revalidated. */
2093 ofproto_revalidate(port->bridge->ofproto, hash->iface_tag);
2094 hash->iface_idx = to->iface->port_ifidx;
2095 hash->iface_tag = tag_create_random();
2096 }
2097
2098 static void
2099 bond_rebalance_port(struct port *port)
2100 {
2101 struct slave_balance bals[DP_MAX_PORTS];
2102 size_t n_bals;
2103 struct bond_entry *hashes[BOND_MASK + 1];
2104 struct slave_balance *b, *from, *to;
2105 struct bond_entry *e;
2106 size_t i;
2107
2108 /* Sets up 'bals' to describe each of the port's interfaces, sorted in
2109 * descending order of tx_bytes, so that bals[0] represents the most
2110 * heavily loaded slave and bals[n_bals - 1] represents the least heavily
2111 * loaded slave.
2112 *
2113 * The code is a bit tricky: to avoid dynamically allocating a 'hashes'
2114 * array for each slave_balance structure, we sort our local array of
2115 * hashes in order by slave, so that all of the hashes for a given slave
2116 * become contiguous in memory, and then we point each 'hashes' members of
2117 * a slave_balance structure to the start of a contiguous group. */
2118 n_bals = port->n_ifaces;
2119 for (b = bals; b < &bals[n_bals]; b++) {
2120 b->iface = port->ifaces[b - bals];
2121 b->tx_bytes = 0;
2122 b->hashes = NULL;
2123 b->n_hashes = 0;
2124 }
2125 for (i = 0; i <= BOND_MASK; i++) {
2126 hashes[i] = &port->bond_hash[i];
2127 }
2128 qsort(hashes, BOND_MASK + 1, sizeof *hashes, compare_bond_entries);
2129 for (i = 0; i <= BOND_MASK; i++) {
2130 e = hashes[i];
2131 if (e->iface_idx >= 0 && e->iface_idx < port->n_ifaces) {
2132 b = &bals[e->iface_idx];
2133 b->tx_bytes += e->tx_bytes;
2134 if (!b->hashes) {
2135 b->hashes = &hashes[i];
2136 }
2137 b->n_hashes++;
2138 }
2139 }
2140 qsort(bals, n_bals, sizeof *bals, compare_slave_balance);
2141 log_bals(bals, n_bals, port);
2142
2143 /* Discard slaves that aren't enabled (which were sorted to the back of the
2144 * array earlier). */
2145 while (!bals[n_bals - 1].iface->enabled) {
2146 n_bals--;
2147 if (!n_bals) {
2148 return;
2149 }
2150 }
2151
2152 /* Shift load from the most-loaded slaves to the least-loaded slaves. */
2153 to = &bals[n_bals - 1];
2154 for (from = bals; from < to; ) {
2155 uint64_t overload = from->tx_bytes - to->tx_bytes;
2156 if (overload < to->tx_bytes >> 5 || overload < 100000) {
2157 /* The extra load on 'from' (and all less-loaded slaves), compared
2158 * to that of 'to' (the least-loaded slave), is less than ~3%, or
2159 * it is less than ~1Mbps. No point in rebalancing. */
2160 break;
2161 } else if (from->n_hashes == 1) {
2162 /* 'from' only carries a single MAC hash, so we can't shift any
2163 * load away from it, even though we want to. */
2164 from++;
2165 } else {
2166 /* 'from' is carrying significantly more load than 'to', and that
2167 * load is split across at least two different hashes. Pick a hash
2168 * to migrate to 'to' (the least-loaded slave), given that doing so
2169 * must not cause 'to''s load to exceed 'from''s load.
2170 *
2171 * The sort order we use means that we prefer to shift away the
2172 * smallest hashes instead of the biggest ones. There is little
2173 * reason behind this decision; we could use the opposite sort
2174 * order to shift away big hashes ahead of small ones. */
2175 size_t i;
2176
2177 for (i = 0; i < from->n_hashes; i++) {
2178 uint64_t delta = from->hashes[i]->tx_bytes;
2179 if (to->tx_bytes + delta < from->tx_bytes - delta) {
2180 break;
2181 }
2182 }
2183 if (i < from->n_hashes) {
2184 bond_shift_load(from, to, from->hashes[i]);
2185
2186 /* Re-sort 'bals'. Note that this may make 'from' and 'to'
2187 * point to different slave_balance structures. It is only
2188 * valid to do these two operations in a row at all because we
2189 * know that 'from' will not move past 'to' and vice versa. */
2190 resort_bals(from, bals, n_bals);
2191 resort_bals(to, bals, n_bals);
2192 } else {
2193 from++;
2194 }
2195 }
2196 }
2197
2198 /* Implement exponentially weighted moving average. A weight of 1/2 causes
2199 * historical data to decay to <1% in 7 rebalancing runs. */
2200 for (e = &port->bond_hash[0]; e <= &port->bond_hash[BOND_MASK]; e++) {
2201 e->tx_bytes /= 2;
2202 }
2203 }
2204
2205 static void
2206 bond_send_learning_packets(struct port *port)
2207 {
2208 struct bridge *br = port->bridge;
2209 struct mac_entry *e;
2210 struct ofpbuf packet;
2211 int error, n_packets, n_errors;
2212
2213 if (!port->n_ifaces || port->active_iface < 0 || !br->ml) {
2214 return;
2215 }
2216
2217 ofpbuf_init(&packet, 128);
2218 error = n_packets = n_errors = 0;
2219 LIST_FOR_EACH (e, struct mac_entry, lru_node, &br->ml->lrus) {
2220 static const char s[] = "Open vSwitch Bond Failover";
2221 union ofp_action actions[2], *a;
2222 struct eth_header *eth;
2223 struct llc_snap_header *llc_snap;
2224 uint16_t dp_ifidx;
2225 tag_type tags = 0;
2226 flow_t flow;
2227 int retval;
2228
2229 if (e->port == port->port_idx
2230 || !choose_output_iface(port, e->mac, &dp_ifidx, &tags)) {
2231 continue;
2232 }
2233
2234 /* Compose packet to send. */
2235 ofpbuf_clear(&packet);
2236 eth = ofpbuf_put_zeros(&packet, ETH_HEADER_LEN);
2237 llc_snap = ofpbuf_put_zeros(&packet, LLC_SNAP_HEADER_LEN);
2238 ofpbuf_put(&packet, s, sizeof s); /* Includes null byte. */
2239 ofpbuf_put(&packet, e->mac, ETH_ADDR_LEN);
2240
2241 memcpy(eth->eth_dst, eth_addr_broadcast, ETH_ADDR_LEN);
2242 memcpy(eth->eth_src, e->mac, ETH_ADDR_LEN);
2243 eth->eth_type = htons(packet.size - ETH_HEADER_LEN);
2244
2245 llc_snap->llc.llc_dsap = LLC_DSAP_SNAP;
2246 llc_snap->llc.llc_ssap = LLC_SSAP_SNAP;
2247 llc_snap->llc.llc_cntl = LLC_CNTL_SNAP;
2248 memcpy(llc_snap->snap.snap_org, "\x00\x23\x20", 3);
2249 llc_snap->snap.snap_type = htons(0xf177); /* Random number. */
2250
2251 /* Compose actions. */
2252 memset(actions, 0, sizeof actions);
2253 a = actions;
2254 if (e->vlan) {
2255 a->vlan_vid.type = htons(OFPAT_SET_VLAN_VID);
2256 a->vlan_vid.len = htons(sizeof *a);
2257 a->vlan_vid.vlan_vid = htons(e->vlan);
2258 a++;
2259 }
2260 a->output.type = htons(OFPAT_OUTPUT);
2261 a->output.len = htons(sizeof *a);
2262 a->output.port = htons(odp_port_to_ofp_port(dp_ifidx));
2263 a++;
2264
2265 /* Send packet. */
2266 n_packets++;
2267 flow_extract(&packet, ODPP_NONE, &flow);
2268 retval = ofproto_send_packet(br->ofproto, &flow, actions, a - actions,
2269 &packet);
2270 if (retval) {
2271 error = retval;
2272 n_errors++;
2273 }
2274 }
2275 ofpbuf_uninit(&packet);
2276
2277 if (n_errors) {
2278 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2279 VLOG_WARN_RL(&rl, "bond %s: %d errors sending %d gratuitous learning "
2280 "packets, last error was: %s",
2281 port->name, n_errors, n_packets, strerror(error));
2282 } else {
2283 VLOG_DBG("bond %s: sent %d gratuitous learning packets",
2284 port->name, n_packets);
2285 }
2286 }
2287 \f
2288 /* Bonding unixctl user interface functions. */
2289
2290 static void
2291 bond_unixctl_list(struct unixctl_conn *conn, const char *args UNUSED)
2292 {
2293 struct ds ds = DS_EMPTY_INITIALIZER;
2294 const struct bridge *br;
2295
2296 ds_put_cstr(&ds, "bridge\tbond\tslaves\n");
2297
2298 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
2299 size_t i;
2300
2301 for (i = 0; i < br->n_ports; i++) {
2302 const struct port *port = br->ports[i];
2303 if (port->n_ifaces > 1) {
2304 size_t j;
2305
2306 ds_put_format(&ds, "%s\t%s\t", br->name, port->name);
2307 for (j = 0; j < port->n_ifaces; j++) {
2308 const struct iface *iface = port->ifaces[j];
2309 if (j) {
2310 ds_put_cstr(&ds, ", ");
2311 }
2312 ds_put_cstr(&ds, iface->name);
2313 }
2314 ds_put_char(&ds, '\n');
2315 }
2316 }
2317 }
2318 unixctl_command_reply(conn, 200, ds_cstr(&ds));
2319 ds_destroy(&ds);
2320 }
2321
2322 static struct port *
2323 bond_find(const char *name)
2324 {
2325 const struct bridge *br;
2326
2327 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
2328 size_t i;
2329
2330 for (i = 0; i < br->n_ports; i++) {
2331 struct port *port = br->ports[i];
2332 if (!strcmp(port->name, name) && port->n_ifaces > 1) {
2333 return port;
2334 }
2335 }
2336 }
2337 return NULL;
2338 }
2339
2340 static void
2341 bond_unixctl_show(struct unixctl_conn *conn, const char *args)
2342 {
2343 struct ds ds = DS_EMPTY_INITIALIZER;
2344 const struct port *port;
2345 size_t j;
2346
2347 port = bond_find(args);
2348 if (!port) {
2349 unixctl_command_reply(conn, 501, "no such bond");
2350 return;
2351 }
2352
2353 ds_put_format(&ds, "updelay: %d ms\n", port->updelay);
2354 ds_put_format(&ds, "downdelay: %d ms\n", port->downdelay);
2355 ds_put_format(&ds, "next rebalance: %lld ms\n",
2356 port->bridge->bond_next_rebalance - time_msec());
2357 for (j = 0; j < port->n_ifaces; j++) {
2358 const struct iface *iface = port->ifaces[j];
2359 struct bond_entry *be;
2360
2361 /* Basic info. */
2362 ds_put_format(&ds, "slave %s: %s\n",
2363 iface->name, iface->enabled ? "enabled" : "disabled");
2364 if (j == port->active_iface) {
2365 ds_put_cstr(&ds, "\tactive slave\n");
2366 }
2367 if (iface->delay_expires != LLONG_MAX) {
2368 ds_put_format(&ds, "\t%s expires in %lld ms\n",
2369 iface->enabled ? "downdelay" : "updelay",
2370 iface->delay_expires - time_msec());
2371 }
2372
2373 /* Hashes. */
2374 for (be = port->bond_hash; be <= &port->bond_hash[BOND_MASK]; be++) {
2375 int hash = be - port->bond_hash;
2376 struct mac_entry *me;
2377
2378 if (be->iface_idx != j) {
2379 continue;
2380 }
2381
2382 ds_put_format(&ds, "\thash %d: %lld kB load\n",
2383 hash, be->tx_bytes / 1024);
2384
2385 /* MACs. */
2386 if (!port->bridge->ml) {
2387 break;
2388 }
2389
2390 LIST_FOR_EACH (me, struct mac_entry, lru_node,
2391 &port->bridge->ml->lrus) {
2392 uint16_t dp_ifidx;
2393 tag_type tags = 0;
2394 if (bond_hash(me->mac) == hash
2395 && me->port != port->port_idx
2396 && choose_output_iface(port, me->mac, &dp_ifidx, &tags)
2397 && dp_ifidx == iface->dp_ifidx)
2398 {
2399 ds_put_format(&ds, "\t\t"ETH_ADDR_FMT"\n",
2400 ETH_ADDR_ARGS(me->mac));
2401 }
2402 }
2403 }
2404 }
2405 unixctl_command_reply(conn, 200, ds_cstr(&ds));
2406 ds_destroy(&ds);
2407 }
2408
2409 static void
2410 bond_unixctl_migrate(struct unixctl_conn *conn, const char *args_)
2411 {
2412 char *args = (char *) args_;
2413 char *save_ptr = NULL;
2414 char *bond_s, *hash_s, *slave_s;
2415 uint8_t mac[ETH_ADDR_LEN];
2416 struct port *port;
2417 struct iface *iface;
2418 struct bond_entry *entry;
2419 int hash;
2420
2421 bond_s = strtok_r(args, " ", &save_ptr);
2422 hash_s = strtok_r(NULL, " ", &save_ptr);
2423 slave_s = strtok_r(NULL, " ", &save_ptr);
2424 if (!slave_s) {
2425 unixctl_command_reply(conn, 501,
2426 "usage: bond/migrate BOND HASH SLAVE");
2427 return;
2428 }
2429
2430 port = bond_find(bond_s);
2431 if (!port) {
2432 unixctl_command_reply(conn, 501, "no such bond");
2433 return;
2434 }
2435
2436 if (sscanf(hash_s, "%"SCNx8":%"SCNx8":%"SCNx8":%"SCNx8":%"SCNx8":%"SCNx8,
2437 &mac[0], &mac[1], &mac[2], &mac[3], &mac[4], &mac[5]) == 6) {
2438 hash = bond_hash(mac);
2439 } else if (strspn(hash_s, "0123456789") == strlen(hash_s)) {
2440 hash = atoi(hash_s) & BOND_MASK;
2441 } else {
2442 unixctl_command_reply(conn, 501, "bad hash");
2443 return;
2444 }
2445
2446 iface = port_lookup_iface(port, slave_s);
2447 if (!iface) {
2448 unixctl_command_reply(conn, 501, "no such slave");
2449 return;
2450 }
2451
2452 if (!iface->enabled) {
2453 unixctl_command_reply(conn, 501, "cannot migrate to disabled slave");
2454 return;
2455 }
2456
2457 entry = &port->bond_hash[hash];
2458 ofproto_revalidate(port->bridge->ofproto, entry->iface_tag);
2459 entry->iface_idx = iface->port_ifidx;
2460 entry->iface_tag = tag_create_random();
2461 unixctl_command_reply(conn, 200, "migrated");
2462 }
2463
2464 static void
2465 bond_unixctl_set_active_slave(struct unixctl_conn *conn, const char *args_)
2466 {
2467 char *args = (char *) args_;
2468 char *save_ptr = NULL;
2469 char *bond_s, *slave_s;
2470 struct port *port;
2471 struct iface *iface;
2472
2473 bond_s = strtok_r(args, " ", &save_ptr);
2474 slave_s = strtok_r(NULL, " ", &save_ptr);
2475 if (!slave_s) {
2476 unixctl_command_reply(conn, 501,
2477 "usage: bond/set-active-slave BOND SLAVE");
2478 return;
2479 }
2480
2481 port = bond_find(bond_s);
2482 if (!port) {
2483 unixctl_command_reply(conn, 501, "no such bond");
2484 return;
2485 }
2486
2487 iface = port_lookup_iface(port, slave_s);
2488 if (!iface) {
2489 unixctl_command_reply(conn, 501, "no such slave");
2490 return;
2491 }
2492
2493 if (!iface->enabled) {
2494 unixctl_command_reply(conn, 501, "cannot make disabled slave active");
2495 return;
2496 }
2497
2498 if (port->active_iface != iface->port_ifidx) {
2499 ofproto_revalidate(port->bridge->ofproto, port->active_iface_tag);
2500 port->active_iface = iface->port_ifidx;
2501 port->active_iface_tag = tag_create_random();
2502 VLOG_INFO("port %s: active interface is now %s",
2503 port->name, iface->name);
2504 bond_send_learning_packets(port);
2505 unixctl_command_reply(conn, 200, "done");
2506 } else {
2507 unixctl_command_reply(conn, 200, "no change");
2508 }
2509 }
2510
2511 static void
2512 enable_slave(struct unixctl_conn *conn, const char *args_, bool enable)
2513 {
2514 char *args = (char *) args_;
2515 char *save_ptr = NULL;
2516 char *bond_s, *slave_s;
2517 struct port *port;
2518 struct iface *iface;
2519
2520 bond_s = strtok_r(args, " ", &save_ptr);
2521 slave_s = strtok_r(NULL, " ", &save_ptr);
2522 if (!slave_s) {
2523 unixctl_command_reply(conn, 501,
2524 "usage: bond/enable/disable-slave BOND SLAVE");
2525 return;
2526 }
2527
2528 port = bond_find(bond_s);
2529 if (!port) {
2530 unixctl_command_reply(conn, 501, "no such bond");
2531 return;
2532 }
2533
2534 iface = port_lookup_iface(port, slave_s);
2535 if (!iface) {
2536 unixctl_command_reply(conn, 501, "no such slave");
2537 return;
2538 }
2539
2540 bond_enable_slave(iface, enable);
2541 unixctl_command_reply(conn, 501, enable ? "enabled" : "disabled");
2542 }
2543
2544 static void
2545 bond_unixctl_enable_slave(struct unixctl_conn *conn, const char *args)
2546 {
2547 enable_slave(conn, args, true);
2548 }
2549
2550 static void
2551 bond_unixctl_disable_slave(struct unixctl_conn *conn, const char *args)
2552 {
2553 enable_slave(conn, args, false);
2554 }
2555
2556 static void
2557 bond_init(void)
2558 {
2559 unixctl_command_register("bond/list", bond_unixctl_list);
2560 unixctl_command_register("bond/show", bond_unixctl_show);
2561 unixctl_command_register("bond/migrate", bond_unixctl_migrate);
2562 unixctl_command_register("bond/set-active-slave",
2563 bond_unixctl_set_active_slave);
2564 unixctl_command_register("bond/enable-slave", bond_unixctl_enable_slave);
2565 unixctl_command_register("bond/disable-slave", bond_unixctl_disable_slave);
2566 }
2567 \f
2568 /* Port functions. */
2569
2570 static void
2571 port_create(struct bridge *br, const char *name)
2572 {
2573 struct port *port;
2574
2575 port = xcalloc(1, sizeof *port);
2576 port->bridge = br;
2577 port->port_idx = br->n_ports;
2578 port->vlan = -1;
2579 port->trunks = NULL;
2580 port->name = xstrdup(name);
2581 port->active_iface = -1;
2582 port->stp_state = STP_DISABLED;
2583 port->stp_state_tag = 0;
2584
2585 if (br->n_ports >= br->allocated_ports) {
2586 br->ports = x2nrealloc(br->ports, &br->allocated_ports,
2587 sizeof *br->ports);
2588 }
2589 br->ports[br->n_ports++] = port;
2590
2591 VLOG_INFO("created port %s on bridge %s", port->name, br->name);
2592 bridge_flush(br);
2593 }
2594
2595 static void
2596 port_reconfigure(struct port *port)
2597 {
2598 bool bonded = cfg_has_section("bonding.%s", port->name);
2599 struct svec old_ifaces, new_ifaces;
2600 unsigned long *trunks;
2601 int vlan;
2602 size_t i;
2603
2604 /* Collect old and new interfaces. */
2605 svec_init(&old_ifaces);
2606 svec_init(&new_ifaces);
2607 for (i = 0; i < port->n_ifaces; i++) {
2608 svec_add(&old_ifaces, port->ifaces[i]->name);
2609 }
2610 svec_sort(&old_ifaces);
2611 if (bonded) {
2612 cfg_get_all_keys(&new_ifaces, "bonding.%s.slave", port->name);
2613 if (!new_ifaces.n) {
2614 VLOG_ERR("port %s: no interfaces specified for bonded port",
2615 port->name);
2616 } else if (new_ifaces.n == 1) {
2617 VLOG_WARN("port %s: only 1 interface specified for bonded port",
2618 port->name);
2619 }
2620
2621 port->updelay = cfg_get_int(0, "bonding.%s.updelay", port->name);
2622 if (port->updelay < 0) {
2623 port->updelay = 0;
2624 }
2625 port->downdelay = cfg_get_int(0, "bonding.%s.downdelay", port->name);
2626 if (port->downdelay < 0) {
2627 port->downdelay = 0;
2628 }
2629 } else {
2630 svec_init(&new_ifaces);
2631 svec_add(&new_ifaces, port->name);
2632 }
2633
2634 /* Get rid of deleted interfaces and add new interfaces. */
2635 for (i = 0; i < port->n_ifaces; i++) {
2636 struct iface *iface = port->ifaces[i];
2637 if (!svec_contains(&new_ifaces, iface->name)) {
2638 iface_destroy(iface);
2639 } else {
2640 i++;
2641 }
2642 }
2643 for (i = 0; i < new_ifaces.n; i++) {
2644 const char *name = new_ifaces.names[i];
2645 if (!svec_contains(&old_ifaces, name)) {
2646 iface_create(port, name);
2647 }
2648 }
2649
2650 /* Get VLAN tag. */
2651 vlan = -1;
2652 if (cfg_has("vlan.%s.tag", port->name)) {
2653 if (!bonded) {
2654 vlan = cfg_get_vlan(0, "vlan.%s.tag", port->name);
2655 if (vlan >= 0 && vlan <= 4095) {
2656 VLOG_DBG("port %s: assigning VLAN tag %d", port->name, vlan);
2657 }
2658 } else {
2659 /* It's possible that bonded, VLAN-tagged ports make sense. Maybe
2660 * they even work as-is. But they have not been tested. */
2661 VLOG_WARN("port %s: VLAN tags not supported on bonded ports",
2662 port->name);
2663 }
2664 }
2665 if (port->vlan != vlan) {
2666 port->vlan = vlan;
2667 bridge_flush(port->bridge);
2668 }
2669
2670 /* Get trunked VLANs. */
2671 trunks = NULL;
2672 if (vlan < 0) {
2673 size_t n_trunks, n_errors;
2674 size_t i;
2675
2676 trunks = bitmap_allocate(4096);
2677 n_trunks = cfg_count("vlan.%s.trunks", port->name);
2678 n_errors = 0;
2679 for (i = 0; i < n_trunks; i++) {
2680 int trunk = cfg_get_vlan(i, "vlan.%s.trunks", port->name);
2681 if (trunk >= 0) {
2682 bitmap_set1(trunks, trunk);
2683 } else {
2684 n_errors++;
2685 }
2686 }
2687 if (n_errors) {
2688 VLOG_ERR("port %s: invalid values for %zu trunk VLANs",
2689 port->name, n_trunks);
2690 }
2691 if (n_errors == n_trunks) {
2692 if (n_errors) {
2693 VLOG_ERR("port %s: no valid trunks, trunking all VLANs",
2694 port->name);
2695 }
2696 bitmap_set_multiple(trunks, 0, 4096, 1);
2697 }
2698 } else {
2699 if (cfg_has("vlan.%s.trunks", port->name)) {
2700 VLOG_ERR("ignoring vlan.%s.trunks in favor of vlan.%s.vlan",
2701 port->name, port->name);
2702 }
2703 }
2704 if (trunks == NULL
2705 ? port->trunks != NULL
2706 : port->trunks == NULL || !bitmap_equal(trunks, port->trunks, 4096)) {
2707 bridge_flush(port->bridge);
2708 }
2709 bitmap_free(port->trunks);
2710 port->trunks = trunks;
2711
2712 svec_destroy(&old_ifaces);
2713 svec_destroy(&new_ifaces);
2714 }
2715
2716 static void
2717 port_destroy(struct port *port)
2718 {
2719 if (port) {
2720 struct bridge *br = port->bridge;
2721 struct port *del;
2722 size_t i;
2723
2724 proc_net_compat_update_vlan(port->name, NULL, 0);
2725
2726 for (i = 0; i < MAX_MIRRORS; i++) {
2727 struct mirror *m = br->mirrors[i];
2728 if (m && m->out_port == port) {
2729 mirror_destroy(m);
2730 }
2731 }
2732
2733 while (port->n_ifaces > 0) {
2734 iface_destroy(port->ifaces[port->n_ifaces - 1]);
2735 }
2736
2737 del = br->ports[port->port_idx] = br->ports[--br->n_ports];
2738 del->port_idx = port->port_idx;
2739
2740 free(port->ifaces);
2741 bitmap_free(port->trunks);
2742 free(port->name);
2743 free(port);
2744 bridge_flush(br);
2745 }
2746 }
2747
2748 static struct port *
2749 port_from_dp_ifidx(const struct bridge *br, uint16_t dp_ifidx)
2750 {
2751 struct iface *iface = iface_from_dp_ifidx(br, dp_ifidx);
2752 return iface ? iface->port : NULL;
2753 }
2754
2755 static struct port *
2756 port_lookup(const struct bridge *br, const char *name)
2757 {
2758 size_t i;
2759
2760 for (i = 0; i < br->n_ports; i++) {
2761 struct port *port = br->ports[i];
2762 if (!strcmp(port->name, name)) {
2763 return port;
2764 }
2765 }
2766 return NULL;
2767 }
2768
2769 static struct iface *
2770 port_lookup_iface(const struct port *port, const char *name)
2771 {
2772 size_t j;
2773
2774 for (j = 0; j < port->n_ifaces; j++) {
2775 struct iface *iface = port->ifaces[j];
2776 if (!strcmp(iface->name, name)) {
2777 return iface;
2778 }
2779 }
2780 return NULL;
2781 }
2782
2783 static void
2784 port_update_bonding(struct port *port)
2785 {
2786 if (port->n_ifaces < 2) {
2787 /* Not a bonded port. */
2788 if (port->bond_hash) {
2789 free(port->bond_hash);
2790 port->bond_hash = NULL;
2791 proc_net_compat_update_bond(port->name, NULL);
2792 }
2793 } else {
2794 if (!port->bond_hash) {
2795 size_t i;
2796
2797 port->bond_hash = xcalloc(BOND_MASK + 1, sizeof *port->bond_hash);
2798 for (i = 0; i <= BOND_MASK; i++) {
2799 struct bond_entry *e = &port->bond_hash[i];
2800 e->iface_idx = -1;
2801 e->tx_bytes = 0;
2802 }
2803 port->no_ifaces_tag = tag_create_random();
2804 bond_choose_active_iface(port);
2805 }
2806 port_update_bond_compat(port);
2807 }
2808 }
2809
2810 static void
2811 port_update_bond_compat(struct port *port)
2812 {
2813 struct compat_bond bond;
2814 size_t i;
2815
2816 if (port->n_ifaces < 2) {
2817 return;
2818 }
2819
2820 bond.up = false;
2821 bond.updelay = port->updelay;
2822 bond.downdelay = port->downdelay;
2823 bond.n_slaves = port->n_ifaces;
2824 bond.slaves = xmalloc(port->n_ifaces * sizeof *bond.slaves);
2825 for (i = 0; i < port->n_ifaces; i++) {
2826 struct iface *iface = port->ifaces[i];
2827 struct compat_bond_slave *slave = &bond.slaves[i];
2828 slave->name = iface->name;
2829 slave->up = ((iface->enabled && iface->delay_expires == LLONG_MAX) ||
2830 (!iface->enabled && iface->delay_expires != LLONG_MAX));
2831 if (slave->up) {
2832 bond.up = true;
2833 }
2834 memcpy(slave->mac, iface->mac, ETH_ADDR_LEN);
2835 }
2836 proc_net_compat_update_bond(port->name, &bond);
2837 free(bond.slaves);
2838 }
2839
2840 static void
2841 port_update_vlan_compat(struct port *port)
2842 {
2843 struct bridge *br = port->bridge;
2844 char *vlandev_name = NULL;
2845
2846 if (port->vlan > 0) {
2847 /* Figure out the name that the VLAN device should actually have, if it
2848 * existed. This takes some work because the VLAN device would not
2849 * have port->name in its name; rather, it would have the trunk port's
2850 * name, and 'port' would be attached to a bridge that also had the
2851 * VLAN device one of its ports. So we need to find a trunk port that
2852 * includes port->vlan.
2853 *
2854 * There might be more than one candidate. This doesn't happen on
2855 * XenServer, so if it happens we just pick the first choice in
2856 * alphabetical order instead of creating multiple VLAN devices. */
2857 size_t i;
2858 for (i = 0; i < br->n_ports; i++) {
2859 struct port *p = br->ports[i];
2860 if (port_trunks_vlan(p, port->vlan)
2861 && p->n_ifaces
2862 && (!vlandev_name || strcmp(p->name, vlandev_name) <= 0))
2863 {
2864 const uint8_t *ea = p->ifaces[0]->mac;
2865 if (!eth_addr_is_multicast(ea) &&
2866 !eth_addr_is_reserved(ea) &&
2867 !eth_addr_is_zero(ea)) {
2868 vlandev_name = p->name;
2869 }
2870 }
2871 }
2872 }
2873 proc_net_compat_update_vlan(port->name, vlandev_name, port->vlan);
2874 }
2875 \f
2876 /* Interface functions. */
2877
2878 static void
2879 iface_create(struct port *port, const char *name)
2880 {
2881 struct iface *iface;
2882
2883 iface = xcalloc(1, sizeof *iface);
2884 iface->port = port;
2885 iface->port_ifidx = port->n_ifaces;
2886 iface->name = xstrdup(name);
2887 iface->dp_ifidx = -1;
2888 iface->tag = tag_create_random();
2889 iface->delay_expires = LLONG_MAX;
2890
2891 netdev_nodev_get_etheraddr(name, iface->mac);
2892 netdev_nodev_get_carrier(name, &iface->enabled);
2893
2894 if (port->n_ifaces >= port->allocated_ifaces) {
2895 port->ifaces = x2nrealloc(port->ifaces, &port->allocated_ifaces,
2896 sizeof *port->ifaces);
2897 }
2898 port->ifaces[port->n_ifaces++] = iface;
2899 if (port->n_ifaces > 1) {
2900 port->bridge->has_bonded_ports = true;
2901 }
2902
2903 VLOG_DBG("attached network device %s to port %s", iface->name, port->name);
2904
2905 port_update_bonding(port);
2906 bridge_flush(port->bridge);
2907 }
2908
2909 static void
2910 iface_destroy(struct iface *iface)
2911 {
2912 if (iface) {
2913 struct port *port = iface->port;
2914 struct bridge *br = port->bridge;
2915 bool del_active = port->active_iface == iface->port_ifidx;
2916 struct iface *del;
2917
2918 if (iface->dp_ifidx >= 0) {
2919 port_array_set(&br->ifaces, iface->dp_ifidx, NULL);
2920 }
2921
2922 del = port->ifaces[iface->port_ifidx] = port->ifaces[--port->n_ifaces];
2923 del->port_ifidx = iface->port_ifidx;
2924
2925 free(iface->name);
2926 free(iface);
2927
2928 if (del_active) {
2929 ofproto_revalidate(port->bridge->ofproto, port->active_iface_tag);
2930 bond_choose_active_iface(port);
2931 bond_send_learning_packets(port);
2932 }
2933
2934 port_update_bonding(port);
2935 bridge_flush(port->bridge);
2936 }
2937 }
2938
2939 static struct iface *
2940 iface_lookup(const struct bridge *br, const char *name)
2941 {
2942 size_t i, j;
2943
2944 for (i = 0; i < br->n_ports; i++) {
2945 struct port *port = br->ports[i];
2946 for (j = 0; j < port->n_ifaces; j++) {
2947 struct iface *iface = port->ifaces[j];
2948 if (!strcmp(iface->name, name)) {
2949 return iface;
2950 }
2951 }
2952 }
2953 return NULL;
2954 }
2955
2956 static struct iface *
2957 iface_from_dp_ifidx(const struct bridge *br, uint16_t dp_ifidx)
2958 {
2959 return port_array_get(&br->ifaces, dp_ifidx);
2960 }
2961 \f
2962 /* Port mirroring. */
2963
2964 static void
2965 mirror_reconfigure(struct bridge *br)
2966 {
2967 struct svec old_mirrors, new_mirrors;
2968 size_t i;
2969
2970 /* Collect old and new mirrors. */
2971 svec_init(&old_mirrors);
2972 svec_init(&new_mirrors);
2973 cfg_get_subsections(&new_mirrors, "mirror.%s", br->name);
2974 for (i = 0; i < MAX_MIRRORS; i++) {
2975 if (br->mirrors[i]) {
2976 svec_add(&old_mirrors, br->mirrors[i]->name);
2977 }
2978 }
2979
2980 /* Get rid of deleted mirrors and add new mirrors. */
2981 svec_sort(&old_mirrors);
2982 assert(svec_is_unique(&old_mirrors));
2983 svec_sort(&new_mirrors);
2984 assert(svec_is_unique(&new_mirrors));
2985 for (i = 0; i < MAX_MIRRORS; i++) {
2986 struct mirror *m = br->mirrors[i];
2987 if (m && !svec_contains(&new_mirrors, m->name)) {
2988 mirror_destroy(m);
2989 }
2990 }
2991 for (i = 0; i < new_mirrors.n; i++) {
2992 const char *name = new_mirrors.names[i];
2993 if (!svec_contains(&old_mirrors, name)) {
2994 mirror_create(br, name);
2995 }
2996 }
2997 svec_destroy(&old_mirrors);
2998 svec_destroy(&new_mirrors);
2999
3000 /* Reconfigure all mirrors. */
3001 for (i = 0; i < MAX_MIRRORS; i++) {
3002 if (br->mirrors[i]) {
3003 mirror_reconfigure_one(br->mirrors[i]);
3004 }
3005 }
3006
3007 /* Update port reserved status. */
3008 for (i = 0; i < br->n_ports; i++) {
3009 br->ports[i]->is_mirror_output_port = false;
3010 }
3011 for (i = 0; i < MAX_MIRRORS; i++) {
3012 struct mirror *m = br->mirrors[i];
3013 if (m && m->out_port) {
3014 m->out_port->is_mirror_output_port = true;
3015 }
3016 }
3017 }
3018
3019 static void
3020 mirror_create(struct bridge *br, const char *name)
3021 {
3022 struct mirror *m;
3023 size_t i;
3024
3025 for (i = 0; ; i++) {
3026 if (i >= MAX_MIRRORS) {
3027 VLOG_WARN("bridge %s: maximum of %d port mirrors reached, "
3028 "cannot create %s", br->name, MAX_MIRRORS, name);
3029 return;
3030 }
3031 if (!br->mirrors[i]) {
3032 break;
3033 }
3034 }
3035
3036 VLOG_INFO("created port mirror %s on bridge %s", name, br->name);
3037 bridge_flush(br);
3038
3039 br->mirrors[i] = m = xcalloc(1, sizeof *m);
3040 m->bridge = br;
3041 m->idx = i;
3042 m->name = xstrdup(name);
3043 svec_init(&m->src_ports);
3044 svec_init(&m->dst_ports);
3045 m->vlans = NULL;
3046 m->n_vlans = 0;
3047 m->out_vlan = -1;
3048 m->out_port = NULL;
3049 }
3050
3051 static void
3052 mirror_destroy(struct mirror *m)
3053 {
3054 if (m) {
3055 struct bridge *br = m->bridge;
3056 size_t i;
3057
3058 for (i = 0; i < br->n_ports; i++) {
3059 br->ports[i]->src_mirrors &= ~(MIRROR_MASK_C(1) << m->idx);
3060 br->ports[i]->dst_mirrors &= ~(MIRROR_MASK_C(1) << m->idx);
3061 }
3062
3063 svec_destroy(&m->src_ports);
3064 svec_destroy(&m->dst_ports);
3065 free(m->vlans);
3066
3067 m->bridge->mirrors[m->idx] = NULL;
3068 free(m);
3069
3070 bridge_flush(br);
3071 }
3072 }
3073
3074 static void
3075 prune_ports(struct mirror *m, struct svec *ports)
3076 {
3077 struct svec tmp;
3078 size_t i;
3079
3080 svec_sort_unique(ports);
3081
3082 svec_init(&tmp);
3083 for (i = 0; i < ports->n; i++) {
3084 const char *name = ports->names[i];
3085 if (port_lookup(m->bridge, name)) {
3086 svec_add(&tmp, name);
3087 } else {
3088 VLOG_WARN("mirror.%s.%s: cannot match on nonexistent port %s",
3089 m->bridge->name, m->name, name);
3090 }
3091 }
3092 svec_swap(ports, &tmp);
3093 svec_destroy(&tmp);
3094 }
3095
3096 static size_t
3097 prune_vlans(struct mirror *m, struct svec *vlan_strings, int **vlans)
3098 {
3099 size_t n_vlans, i;
3100
3101 /* This isn't perfect: it won't combine "0" and "00", and the textual sort
3102 * order won't give us numeric sort order. But that's good enough for what
3103 * we need right now. */
3104 svec_sort_unique(vlan_strings);
3105
3106 *vlans = xmalloc(sizeof *vlans * vlan_strings->n);
3107 n_vlans = 0;
3108 for (i = 0; i < vlan_strings->n; i++) {
3109 const char *name = vlan_strings->names[i];
3110 int vlan;
3111 if (!str_to_int(name, 10, &vlan) || vlan < 0 || vlan > 4095) {
3112 VLOG_WARN("mirror.%s.%s.select.vlan: ignoring invalid VLAN %s",
3113 m->bridge->name, m->name, name);
3114 } else {
3115 (*vlans)[n_vlans++] = vlan;
3116 }
3117 }
3118 return n_vlans;
3119 }
3120
3121 static bool
3122 vlan_is_mirrored(const struct mirror *m, int vlan)
3123 {
3124 size_t i;
3125
3126 for (i = 0; i < m->n_vlans; i++) {
3127 if (m->vlans[i] == vlan) {
3128 return true;
3129 }
3130 }
3131 return false;
3132 }
3133
3134 static bool
3135 port_trunks_any_mirrored_vlan(const struct mirror *m, const struct port *p)
3136 {
3137 size_t i;
3138
3139 for (i = 0; i < m->n_vlans; i++) {
3140 if (port_trunks_vlan(p, m->vlans[i])) {
3141 return true;
3142 }
3143 }
3144 return false;
3145 }
3146
3147 static void
3148 mirror_reconfigure_one(struct mirror *m)
3149 {
3150 char *pfx = xasprintf("mirror.%s.%s", m->bridge->name, m->name);
3151 struct svec src_ports, dst_ports, ports;
3152 struct svec vlan_strings;
3153 mirror_mask_t mirror_bit;
3154 const char *out_port_name;
3155 struct port *out_port;
3156 int out_vlan;
3157 size_t n_vlans;
3158 int *vlans;
3159 size_t i;
3160 bool mirror_all_ports;
3161
3162 /* Get output port. */
3163 out_port_name = cfg_get_key(0, "mirror.%s.%s.output.port",
3164 m->bridge->name, m->name);
3165 if (out_port_name) {
3166 out_port = port_lookup(m->bridge, out_port_name);
3167 if (!out_port) {
3168 VLOG_ERR("%s.output.port: bridge %s does not have a port "
3169 "named %s", pfx, m->bridge->name, out_port_name);
3170 mirror_destroy(m);
3171 free(pfx);
3172 return;
3173 }
3174 out_vlan = -1;
3175
3176 if (cfg_has("%s.output.vlan", pfx)) {
3177 VLOG_ERR("%s.output.port and %s.output.vlan both specified; "
3178 "ignoring %s.output.vlan", pfx, pfx, pfx);
3179 }
3180 } else if (cfg_has("%s.output.vlan", pfx)) {
3181 out_port = NULL;
3182 out_vlan = cfg_get_vlan(0, "%s.output.vlan", pfx);
3183 } else {
3184 VLOG_ERR("%s: neither %s.output.port nor %s.output.vlan specified, "
3185 "but exactly one is required; disabling port mirror %s",
3186 pfx, pfx, pfx, pfx);
3187 mirror_destroy(m);
3188 free(pfx);
3189 return;
3190 }
3191
3192 /* Get all the ports, and drop duplicates and ports that don't exist. */
3193 svec_init(&src_ports);
3194 svec_init(&dst_ports);
3195 svec_init(&ports);
3196 cfg_get_all_keys(&src_ports, "%s.select.src-port", pfx);
3197 cfg_get_all_keys(&dst_ports, "%s.select.dst-port", pfx);
3198 cfg_get_all_keys(&ports, "%s.select.port", pfx);
3199 svec_append(&src_ports, &ports);
3200 svec_append(&dst_ports, &ports);
3201 svec_destroy(&ports);
3202 prune_ports(m, &src_ports);
3203 prune_ports(m, &dst_ports);
3204
3205 /* Get all the vlans, and drop duplicate and invalid vlans. */
3206 svec_init(&vlan_strings);
3207 cfg_get_all_keys(&vlan_strings, "%s.select.vlan", pfx);
3208 n_vlans = prune_vlans(m, &vlan_strings, &vlans);
3209 svec_destroy(&vlan_strings);
3210
3211 /* Update mirror data. */
3212 if (!svec_equal(&m->src_ports, &src_ports)
3213 || !svec_equal(&m->dst_ports, &dst_ports)
3214 || m->n_vlans != n_vlans
3215 || memcmp(m->vlans, vlans, sizeof *vlans * n_vlans)
3216 || m->out_port != out_port
3217 || m->out_vlan != out_vlan) {
3218 bridge_flush(m->bridge);
3219 }
3220 svec_swap(&m->src_ports, &src_ports);
3221 svec_swap(&m->dst_ports, &dst_ports);
3222 free(m->vlans);
3223 m->vlans = vlans;
3224 m->n_vlans = n_vlans;
3225 m->out_port = out_port;
3226 m->out_vlan = out_vlan;
3227
3228 /* If no selection criteria have been given, mirror for all ports. */
3229 mirror_all_ports = (!m->src_ports.n) && (!m->dst_ports.n) && (!m->n_vlans);
3230
3231 /* Update ports. */
3232 mirror_bit = MIRROR_MASK_C(1) << m->idx;
3233 for (i = 0; i < m->bridge->n_ports; i++) {
3234 struct port *port = m->bridge->ports[i];
3235
3236 if (mirror_all_ports
3237 || svec_contains(&m->src_ports, port->name)
3238 || (m->n_vlans
3239 && (!port->vlan
3240 ? port_trunks_any_mirrored_vlan(m, port)
3241 : vlan_is_mirrored(m, port->vlan)))) {
3242 port->src_mirrors |= mirror_bit;
3243 } else {
3244 port->src_mirrors &= ~mirror_bit;
3245 }
3246
3247 if (mirror_all_ports || svec_contains(&m->dst_ports, port->name)) {
3248 port->dst_mirrors |= mirror_bit;
3249 } else {
3250 port->dst_mirrors &= ~mirror_bit;
3251 }
3252 }
3253
3254 /* Clean up. */
3255 svec_destroy(&src_ports);
3256 svec_destroy(&dst_ports);
3257 free(pfx);
3258 }
3259 \f
3260 /* Spanning tree protocol. */
3261
3262 static void brstp_update_port_state(struct port *);
3263
3264 static void
3265 brstp_send_bpdu(struct ofpbuf *pkt, int port_no, void *br_)
3266 {
3267 struct bridge *br = br_;
3268 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3269 struct iface *iface = iface_from_dp_ifidx(br, port_no);
3270 if (!iface) {
3271 VLOG_WARN_RL(&rl, "%s: cannot send BPDU on unknown port %d",
3272 br->name, port_no);
3273 } else if (eth_addr_is_zero(iface->mac)) {
3274 VLOG_WARN_RL(&rl, "%s: cannot send BPDU on port %d with unknown MAC",
3275 br->name, port_no);
3276 } else {
3277 union ofp_action action;
3278 struct eth_header *eth = pkt->l2;
3279 flow_t flow;
3280
3281 memcpy(eth->eth_src, iface->mac, ETH_ADDR_LEN);
3282
3283 memset(&action, 0, sizeof action);
3284 action.type = htons(OFPAT_OUTPUT);
3285 action.output.len = htons(sizeof action);
3286 action.output.port = htons(port_no);
3287
3288 flow_extract(pkt, ODPP_NONE, &flow);
3289 ofproto_send_packet(br->ofproto, &flow, &action, 1, pkt);
3290 }
3291 ofpbuf_delete(pkt);
3292 }
3293
3294 static void
3295 brstp_reconfigure(struct bridge *br)
3296 {
3297 size_t i;
3298
3299 if (!cfg_get_bool(0, "stp.%s.enabled", br->name)) {
3300 if (br->stp) {
3301 stp_destroy(br->stp);
3302 br->stp = NULL;
3303
3304 bridge_flush(br);
3305 }
3306 } else {
3307 uint64_t bridge_address, bridge_id;
3308 int bridge_priority;
3309
3310 bridge_address = cfg_get_mac(0, "stp.%s.address", br->name);
3311 if (!bridge_address) {
3312 if (br->stp) {
3313 bridge_address = (stp_get_bridge_id(br->stp)
3314 & ((UINT64_C(1) << 48) - 1));
3315 } else {
3316 uint8_t mac[ETH_ADDR_LEN];
3317 eth_addr_random(mac);
3318 bridge_address = eth_addr_to_uint64(mac);
3319 }
3320 }
3321
3322 if (cfg_is_valid(CFG_INT | CFG_REQUIRED, "stp.%s.priority",
3323 br->name)) {
3324 bridge_priority = cfg_get_int(0, "stp.%s.priority", br->name);
3325 } else {
3326 bridge_priority = STP_DEFAULT_BRIDGE_PRIORITY;
3327 }
3328
3329 bridge_id = bridge_address | ((uint64_t) bridge_priority << 48);
3330 if (!br->stp) {
3331 br->stp = stp_create(br->name, bridge_id, brstp_send_bpdu, br);
3332 br->stp_last_tick = time_msec();
3333 bridge_flush(br);
3334 } else {
3335 if (bridge_id != stp_get_bridge_id(br->stp)) {
3336 stp_set_bridge_id(br->stp, bridge_id);
3337 bridge_flush(br);
3338 }
3339 }
3340
3341 for (i = 0; i < br->n_ports; i++) {
3342 struct port *p = br->ports[i];
3343 int dp_ifidx;
3344 struct stp_port *sp;
3345 int path_cost, priority;
3346 bool enable;
3347
3348 if (!p->n_ifaces) {
3349 continue;
3350 }
3351 dp_ifidx = p->ifaces[0]->dp_ifidx;
3352 if (dp_ifidx < 0 || dp_ifidx >= STP_MAX_PORTS) {
3353 continue;
3354 }
3355
3356 sp = stp_get_port(br->stp, dp_ifidx);
3357 enable = (!cfg_is_valid(CFG_BOOL | CFG_REQUIRED,
3358 "stp.%s.port.%s.enabled",
3359 br->name, p->name)
3360 || cfg_get_bool(0, "stp.%s.port.%s.enabled",
3361 br->name, p->name));
3362 if (p->is_mirror_output_port) {
3363 enable = false;
3364 }
3365 if (enable != (stp_port_get_state(sp) != STP_DISABLED)) {
3366 bridge_flush(br); /* Might not be necessary. */
3367 if (enable) {
3368 stp_port_enable(sp);
3369 } else {
3370 stp_port_disable(sp);
3371 }
3372 }
3373
3374 path_cost = cfg_get_int(0, "stp.%s.port.%s.path-cost",
3375 br->name, p->name);
3376 stp_port_set_path_cost(sp, path_cost ? path_cost : 19 /* XXX */);
3377
3378 priority = (cfg_is_valid(CFG_INT | CFG_REQUIRED,
3379 "stp.%s.port.%s.priority",
3380 br->name, p->name)
3381 ? cfg_get_int(0, "stp.%s.port.%s.priority",
3382 br->name, p->name)
3383 : STP_DEFAULT_PORT_PRIORITY);
3384 stp_port_set_priority(sp, priority);
3385 }
3386
3387 brstp_adjust_timers(br);
3388 }
3389 for (i = 0; i < br->n_ports; i++) {
3390 brstp_update_port_state(br->ports[i]);
3391 }
3392 }
3393
3394 static void
3395 brstp_update_port_state(struct port *p)
3396 {
3397 struct bridge *br = p->bridge;
3398 enum stp_state state;
3399
3400 /* Figure out new state. */
3401 state = STP_DISABLED;
3402 if (br->stp && p->n_ifaces > 0) {
3403 int dp_ifidx = p->ifaces[0]->dp_ifidx;
3404 if (dp_ifidx >= 0 && dp_ifidx < STP_MAX_PORTS) {
3405 state = stp_port_get_state(stp_get_port(br->stp, dp_ifidx));
3406 }
3407 }
3408
3409 /* Update state. */
3410 if (p->stp_state != state) {
3411 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
3412 VLOG_INFO_RL(&rl, "port %s: STP state changed from %s to %s",
3413 p->name, stp_state_name(p->stp_state),
3414 stp_state_name(state));
3415 if (p->stp_state == STP_DISABLED) {
3416 bridge_flush(br);
3417 } else {
3418 ofproto_revalidate(p->bridge->ofproto, p->stp_state_tag);
3419 }
3420 p->stp_state = state;
3421 p->stp_state_tag = (p->stp_state == STP_DISABLED ? 0
3422 : tag_create_random());
3423 }
3424 }
3425
3426 static void
3427 brstp_adjust_timers(struct bridge *br)
3428 {
3429 int hello_time = cfg_get_int(0, "stp.%s.hello-time", br->name);
3430 int max_age = cfg_get_int(0, "stp.%s.max-age", br->name);
3431 int forward_delay = cfg_get_int(0, "stp.%s.forward-delay", br->name);
3432
3433 stp_set_hello_time(br->stp, hello_time ? hello_time : 2000);
3434 stp_set_max_age(br->stp, max_age ? max_age : 20000);
3435 stp_set_forward_delay(br->stp, forward_delay ? forward_delay : 15000);
3436 }
3437
3438 static void
3439 brstp_run(struct bridge *br)
3440 {
3441 if (br->stp) {
3442 long long int now = time_msec();
3443 long long int elapsed = now - br->stp_last_tick;
3444 struct stp_port *sp;
3445
3446 if (elapsed > 0) {
3447 stp_tick(br->stp, MIN(INT_MAX, elapsed));
3448 br->stp_last_tick = now;
3449 }
3450 while (stp_get_changed_port(br->stp, &sp)) {
3451 struct port *p = port_from_dp_ifidx(br, stp_port_no(sp));
3452 if (p) {
3453 brstp_update_port_state(p);
3454 }
3455 }
3456 }
3457 }
3458
3459 static void
3460 brstp_wait(struct bridge *br)
3461 {
3462 if (br->stp) {
3463 poll_timer_wait(1000);
3464 }
3465 }