]> git.proxmox.com Git - ovs.git/blob - vswitchd/bridge.c
bridge: Immediately drop interfaces that can't be opened.
[ovs.git] / vswitchd / bridge.c
1 /* Copyright (c) 2008, 2009, 2010 Nicira Networks
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <config.h>
17 #include "bridge.h"
18 #include <assert.h>
19 #include <errno.h>
20 #include <arpa/inet.h>
21 #include <ctype.h>
22 #include <inttypes.h>
23 #include <net/if.h>
24 #include <openflow/openflow.h>
25 #include <signal.h>
26 #include <stdlib.h>
27 #include <strings.h>
28 #include <sys/stat.h>
29 #include <sys/socket.h>
30 #include <sys/types.h>
31 #include <unistd.h>
32 #include "bitmap.h"
33 #include "coverage.h"
34 #include "dirs.h"
35 #include "dpif.h"
36 #include "dynamic-string.h"
37 #include "flow.h"
38 #include "hash.h"
39 #include "jsonrpc.h"
40 #include "list.h"
41 #include "mac-learning.h"
42 #include "netdev.h"
43 #include "odp-util.h"
44 #include "ofp-print.h"
45 #include "ofpbuf.h"
46 #include "ofproto/netflow.h"
47 #include "ofproto/ofproto.h"
48 #include "packets.h"
49 #include "poll-loop.h"
50 #include "port-array.h"
51 #include "proc-net-compat.h"
52 #include "process.h"
53 #include "sha1.h"
54 #include "shash.h"
55 #include "socket-util.h"
56 #include "stream-ssl.h"
57 #include "svec.h"
58 #include "timeval.h"
59 #include "util.h"
60 #include "unixctl.h"
61 #include "vconn.h"
62 #include "vswitchd/vswitch-idl.h"
63 #include "xenserver.h"
64 #include "xtoxll.h"
65 #include "sflow_api.h"
66
67 #define THIS_MODULE VLM_bridge
68 #include "vlog.h"
69
70 struct dst {
71 uint16_t vlan;
72 uint16_t dp_ifidx;
73 };
74
75 struct iface {
76 /* These members are always valid. */
77 struct port *port; /* Containing port. */
78 size_t port_ifidx; /* Index within containing port. */
79 char *name; /* Host network device name. */
80 tag_type tag; /* Tag associated with this interface. */
81 long long delay_expires; /* Time after which 'enabled' may change. */
82
83 /* These members are valid only after bridge_reconfigure() causes them to
84 * be initialized.*/
85 int dp_ifidx; /* Index within kernel datapath. */
86 struct netdev *netdev; /* Network device. */
87 bool enabled; /* May be chosen for flows? */
88
89 /* This member is only valid *during* bridge_reconfigure(). */
90 const struct ovsrec_interface *cfg;
91 };
92
93 #define BOND_MASK 0xff
94 struct bond_entry {
95 int iface_idx; /* Index of assigned iface, or -1 if none. */
96 uint64_t tx_bytes; /* Count of bytes recently transmitted. */
97 tag_type iface_tag; /* Tag associated with iface_idx. */
98 };
99
100 #define MAX_MIRRORS 32
101 typedef uint32_t mirror_mask_t;
102 #define MIRROR_MASK_C(X) UINT32_C(X)
103 BUILD_ASSERT_DECL(sizeof(mirror_mask_t) * CHAR_BIT >= MAX_MIRRORS);
104 struct mirror {
105 struct bridge *bridge;
106 size_t idx;
107 char *name;
108
109 /* Selection criteria. */
110 struct shash src_ports; /* Name is port name; data is always NULL. */
111 struct shash dst_ports; /* Name is port name; data is always NULL. */
112 int *vlans;
113 size_t n_vlans;
114
115 /* Output. */
116 struct port *out_port;
117 int out_vlan;
118 };
119
120 #define FLOOD_PORT ((struct port *) 1) /* The 'flood' output port. */
121 struct port {
122 struct bridge *bridge;
123 size_t port_idx;
124 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
125 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1. */
126 char *name;
127
128 /* An ordinary bridge port has 1 interface.
129 * A bridge port for bonding has at least 2 interfaces. */
130 struct iface **ifaces;
131 size_t n_ifaces, allocated_ifaces;
132
133 /* Bonding info. */
134 struct bond_entry *bond_hash; /* An array of (BOND_MASK + 1) elements. */
135 int active_iface; /* Ifidx on which bcasts accepted, or -1. */
136 tag_type active_iface_tag; /* Tag for bcast flows. */
137 tag_type no_ifaces_tag; /* Tag for flows when all ifaces disabled. */
138 int updelay, downdelay; /* Delay before iface goes up/down, in ms. */
139 bool bond_compat_is_stale; /* Need to call port_update_bond_compat()? */
140 bool bond_fake_iface; /* Fake a bond interface for legacy compat? */
141 long bond_next_fake_iface_update; /* Next update to fake bond stats. */
142 int bond_rebalance_interval; /* Interval between rebalances, in ms. */
143 long long int bond_next_rebalance; /* Next rebalancing time. */
144
145 /* Port mirroring info. */
146 mirror_mask_t src_mirrors; /* Mirrors triggered when packet received. */
147 mirror_mask_t dst_mirrors; /* Mirrors triggered when packet sent. */
148 bool is_mirror_output_port; /* Does port mirroring send frames here? */
149
150 /* This member is only valid *during* bridge_reconfigure(). */
151 const struct ovsrec_port *cfg;
152 };
153
154 #define DP_MAX_PORTS 255
155 struct bridge {
156 struct list node; /* Node in global list of bridges. */
157 char *name; /* User-specified arbitrary name. */
158 struct mac_learning *ml; /* MAC learning table. */
159 bool sent_config_request; /* Successfully sent config request? */
160 uint8_t default_ea[ETH_ADDR_LEN]; /* Default MAC. */
161
162 /* OpenFlow switch processing. */
163 struct ofproto *ofproto; /* OpenFlow switch. */
164
165 /* Description strings. */
166 char *mfr_desc; /* Manufacturer. */
167 char *hw_desc; /* Hardware. */
168 char *sw_desc; /* Software version. */
169 char *serial_desc; /* Serial number. */
170 char *dp_desc; /* Datapath description. */
171
172 /* Kernel datapath information. */
173 struct dpif *dpif; /* Datapath. */
174 struct port_array ifaces; /* Indexed by kernel datapath port number. */
175
176 /* Bridge ports. */
177 struct port **ports;
178 size_t n_ports, allocated_ports;
179
180 /* Bonding. */
181 bool has_bonded_ports;
182
183 /* Flow tracking. */
184 bool flush;
185
186 /* Flow statistics gathering. */
187 time_t next_stats_request;
188
189 /* Port mirroring. */
190 struct mirror *mirrors[MAX_MIRRORS];
191
192 /* This member is only valid *during* bridge_reconfigure(). */
193 const struct ovsrec_bridge *cfg;
194 };
195
196 /* List of all bridges. */
197 static struct list all_bridges = LIST_INITIALIZER(&all_bridges);
198
199 /* Maximum number of datapaths. */
200 enum { DP_MAX = 256 };
201
202 static struct bridge *bridge_create(const struct ovsrec_bridge *br_cfg);
203 static void bridge_destroy(struct bridge *);
204 static struct bridge *bridge_lookup(const char *name);
205 static unixctl_cb_func bridge_unixctl_dump_flows;
206 static int bridge_run_one(struct bridge *);
207 static size_t bridge_get_controllers(const struct ovsrec_open_vswitch *ovs_cfg,
208 const struct bridge *br,
209 struct ovsrec_controller ***controllersp);
210 static void bridge_reconfigure_one(const struct ovsrec_open_vswitch *,
211 struct bridge *);
212 static void bridge_reconfigure_remotes(const struct ovsrec_open_vswitch *,
213 struct bridge *,
214 const struct sockaddr_in *managers,
215 size_t n_managers);
216 static void bridge_get_all_ifaces(const struct bridge *, struct shash *ifaces);
217 static void bridge_fetch_dp_ifaces(struct bridge *);
218 static void bridge_flush(struct bridge *);
219 static void bridge_pick_local_hw_addr(struct bridge *,
220 uint8_t ea[ETH_ADDR_LEN],
221 struct iface **hw_addr_iface);
222 static uint64_t bridge_pick_datapath_id(struct bridge *,
223 const uint8_t bridge_ea[ETH_ADDR_LEN],
224 struct iface *hw_addr_iface);
225 static struct iface *bridge_get_local_iface(struct bridge *);
226 static uint64_t dpid_from_hash(const void *, size_t nbytes);
227
228 static unixctl_cb_func bridge_unixctl_fdb_show;
229
230 static void bond_init(void);
231 static void bond_run(struct bridge *);
232 static void bond_wait(struct bridge *);
233 static void bond_rebalance_port(struct port *);
234 static void bond_send_learning_packets(struct port *);
235 static void bond_enable_slave(struct iface *iface, bool enable);
236
237 static struct port *port_create(struct bridge *, const char *name);
238 static void port_reconfigure(struct port *, const struct ovsrec_port *);
239 static void port_destroy(struct port *);
240 static struct port *port_lookup(const struct bridge *, const char *name);
241 static struct iface *port_lookup_iface(const struct port *, const char *name);
242 static struct port *port_from_dp_ifidx(const struct bridge *,
243 uint16_t dp_ifidx);
244 static void port_update_bond_compat(struct port *);
245 static void port_update_vlan_compat(struct port *);
246 static void port_update_bonding(struct port *);
247
248 static struct mirror *mirror_create(struct bridge *, const char *name);
249 static void mirror_destroy(struct mirror *);
250 static void mirror_reconfigure(struct bridge *);
251 static void mirror_reconfigure_one(struct mirror *, struct ovsrec_mirror *);
252 static bool vlan_is_mirrored(const struct mirror *, int vlan);
253
254 static struct iface *iface_create(struct port *port,
255 const struct ovsrec_interface *if_cfg);
256 static void iface_destroy(struct iface *);
257 static struct iface *iface_lookup(const struct bridge *, const char *name);
258 static struct iface *iface_from_dp_ifidx(const struct bridge *,
259 uint16_t dp_ifidx);
260 static bool iface_is_internal(const struct bridge *, const char *name);
261 static void iface_set_mac(struct iface *);
262
263 /* Hooks into ofproto processing. */
264 static struct ofhooks bridge_ofhooks;
265 \f
266 /* Public functions. */
267
268 /* Adds the name of each interface used by a bridge, including local and
269 * internal ports, to 'svec'. */
270 void
271 bridge_get_ifaces(struct svec *svec)
272 {
273 struct bridge *br, *next;
274 size_t i, j;
275
276 LIST_FOR_EACH_SAFE (br, next, struct bridge, node, &all_bridges) {
277 for (i = 0; i < br->n_ports; i++) {
278 struct port *port = br->ports[i];
279
280 for (j = 0; j < port->n_ifaces; j++) {
281 struct iface *iface = port->ifaces[j];
282 if (iface->dp_ifidx < 0) {
283 VLOG_ERR("%s interface not in datapath %s, ignoring",
284 iface->name, dpif_name(br->dpif));
285 } else {
286 if (iface->dp_ifidx != ODPP_LOCAL) {
287 svec_add(svec, iface->name);
288 }
289 }
290 }
291 }
292 }
293 }
294
295 void
296 bridge_init(const struct ovsrec_open_vswitch *cfg)
297 {
298 struct svec bridge_names;
299 struct svec dpif_names, dpif_types;
300 size_t i;
301
302 unixctl_command_register("fdb/show", bridge_unixctl_fdb_show, NULL);
303
304 svec_init(&bridge_names);
305 for (i = 0; i < cfg->n_bridges; i++) {
306 svec_add(&bridge_names, cfg->bridges[i]->name);
307 }
308 svec_sort(&bridge_names);
309
310 svec_init(&dpif_names);
311 svec_init(&dpif_types);
312 dp_enumerate_types(&dpif_types);
313 for (i = 0; i < dpif_types.n; i++) {
314 struct dpif *dpif;
315 int retval;
316 size_t j;
317
318 dp_enumerate_names(dpif_types.names[i], &dpif_names);
319
320 for (j = 0; j < dpif_names.n; j++) {
321 retval = dpif_open(dpif_names.names[j], dpif_types.names[i], &dpif);
322 if (!retval) {
323 struct svec all_names;
324 size_t k;
325
326 svec_init(&all_names);
327 dpif_get_all_names(dpif, &all_names);
328 for (k = 0; k < all_names.n; k++) {
329 if (svec_contains(&bridge_names, all_names.names[k])) {
330 goto found;
331 }
332 }
333 dpif_delete(dpif);
334 found:
335 svec_destroy(&all_names);
336 dpif_close(dpif);
337 }
338 }
339 }
340 svec_destroy(&bridge_names);
341 svec_destroy(&dpif_names);
342 svec_destroy(&dpif_types);
343
344 unixctl_command_register("bridge/dump-flows", bridge_unixctl_dump_flows,
345 NULL);
346
347 bond_init();
348 bridge_reconfigure(cfg);
349 }
350
351 #ifdef HAVE_OPENSSL
352 static void
353 bridge_configure_ssl(const struct ovsrec_ssl *ssl)
354 {
355 /* XXX SSL should be configurable on a per-bridge basis. */
356 if (ssl) {
357 stream_ssl_set_private_key_file(ssl->private_key);
358 stream_ssl_set_certificate_file(ssl->certificate);
359 stream_ssl_set_ca_cert_file(ssl->ca_cert, ssl->bootstrap_ca_cert);
360 }
361 }
362 #endif
363
364 /* Attempt to create the network device 'iface_name' through the netdev
365 * library. */
366 static int
367 set_up_iface(const struct ovsrec_interface *iface_cfg, struct iface *iface,
368 bool create)
369 {
370 struct shash_node *node;
371 struct shash options;
372 int error = 0;
373 size_t i;
374
375 shash_init(&options);
376 for (i = 0; i < iface_cfg->n_options; i++) {
377 shash_add(&options, iface_cfg->key_options[i],
378 xstrdup(iface_cfg->value_options[i]));
379 }
380
381 if (create) {
382 struct netdev_options netdev_options;
383
384 memset(&netdev_options, 0, sizeof netdev_options);
385 netdev_options.name = iface_cfg->name;
386 if (!strcmp(iface_cfg->type, "internal")) {
387 /* An "internal" config type maps to a netdev "system" type. */
388 netdev_options.type = "system";
389 } else {
390 netdev_options.type = iface_cfg->type;
391 }
392 netdev_options.args = &options;
393 netdev_options.ethertype = NETDEV_ETH_TYPE_NONE;
394 netdev_options.may_create = true;
395 if (iface_is_internal(iface->port->bridge, iface_cfg->name)) {
396 netdev_options.may_open = true;
397 }
398
399 error = netdev_open(&netdev_options, &iface->netdev);
400
401 if (iface->netdev) {
402 netdev_get_carrier(iface->netdev, &iface->enabled);
403 }
404 } else if (iface->netdev) {
405 const char *netdev_type = netdev_get_type(iface->netdev);
406 const char *iface_type = iface_cfg->type && strlen(iface_cfg->type)
407 ? iface_cfg->type : NULL;
408
409 /* An "internal" config type maps to a netdev "system" type. */
410 if (iface_type && !strcmp(iface_type, "internal")) {
411 iface_type = "system";
412 }
413
414 if (!iface_type || !strcmp(netdev_type, iface_type)) {
415 error = netdev_reconfigure(iface->netdev, &options);
416 } else {
417 VLOG_WARN("%s: attempting change device type from %s to %s",
418 iface_cfg->name, netdev_type, iface_type);
419 error = EINVAL;
420 }
421 }
422
423 SHASH_FOR_EACH (node, &options) {
424 free(node->data);
425 }
426 shash_destroy(&options);
427
428 return error;
429 }
430
431 static int
432 reconfigure_iface(const struct ovsrec_interface *iface_cfg, struct iface *iface)
433 {
434 return set_up_iface(iface_cfg, iface, false);
435 }
436
437 static bool
438 check_iface_netdev(struct bridge *br OVS_UNUSED, struct iface *iface,
439 void *aux OVS_UNUSED)
440 {
441 if (!iface->netdev) {
442 int error = set_up_iface(iface->cfg, iface, true);
443 if (error) {
444 VLOG_WARN("could not open netdev on %s, dropping: %s", iface->name,
445 strerror(error));
446 return false;
447 }
448 }
449
450 return true;
451 }
452
453 static bool
454 check_iface_dp_ifidx(struct bridge *br, struct iface *iface,
455 void *aux OVS_UNUSED)
456 {
457 if (iface->dp_ifidx >= 0) {
458 VLOG_DBG("%s has interface %s on port %d",
459 dpif_name(br->dpif),
460 iface->name, iface->dp_ifidx);
461 return true;
462 } else {
463 VLOG_ERR("%s interface not in %s, dropping",
464 iface->name, dpif_name(br->dpif));
465 return false;
466 }
467 }
468
469 static bool
470 set_iface_properties(struct bridge *br OVS_UNUSED, struct iface *iface,
471 void *aux OVS_UNUSED)
472 {
473 /* Set policing attributes. */
474 netdev_set_policing(iface->netdev,
475 iface->cfg->ingress_policing_rate,
476 iface->cfg->ingress_policing_burst);
477
478 /* Set MAC address of internal interfaces other than the local
479 * interface. */
480 if (iface->dp_ifidx != ODPP_LOCAL
481 && iface_is_internal(br, iface->name)) {
482 iface_set_mac(iface);
483 }
484
485 return true;
486 }
487
488 /* Calls 'cb' for each interfaces in 'br', passing along the 'aux' argument.
489 * Deletes from 'br' all the interfaces for which 'cb' returns false, and then
490 * deletes from 'br' any ports that no longer have any interfaces. */
491 static void
492 iterate_and_prune_ifaces(struct bridge *br,
493 bool (*cb)(struct bridge *, struct iface *,
494 void *aux),
495 void *aux)
496 {
497 size_t i, j;
498
499 for (i = 0; i < br->n_ports; ) {
500 struct port *port = br->ports[i];
501 for (j = 0; j < port->n_ifaces; ) {
502 struct iface *iface = port->ifaces[j];
503 if (cb(br, iface, aux)) {
504 j++;
505 } else {
506 iface_destroy(iface);
507 }
508 }
509
510 if (port->n_ifaces) {
511 i++;
512 } else {
513 VLOG_ERR("%s port has no interfaces, dropping", port->name);
514 port_destroy(port);
515 }
516 }
517 }
518
519 /* Looks at the list of managers in 'ovs_cfg' and extracts their remote IP
520 * addresses and ports into '*managersp' and '*n_managersp'. The caller is
521 * responsible for freeing '*managersp' (with free()).
522 *
523 * You may be asking yourself "why does ovs-vswitchd care?", because
524 * ovsdb-server is responsible for connecting to the managers, and ovs-vswitchd
525 * should not be and in fact is not directly involved in that. But
526 * ovs-vswitchd needs to make sure that ovsdb-server can reach the managers, so
527 * it has to tell in-band control where the managers are to enable that.
528 */
529 static void
530 collect_managers(const struct ovsrec_open_vswitch *ovs_cfg,
531 struct sockaddr_in **managersp, size_t *n_managersp)
532 {
533 struct sockaddr_in *managers = NULL;
534 size_t n_managers = 0;
535
536 if (ovs_cfg->n_managers > 0) {
537 size_t i;
538
539 managers = xmalloc(ovs_cfg->n_managers * sizeof *managers);
540 for (i = 0; i < ovs_cfg->n_managers; i++) {
541 const char *name = ovs_cfg->managers[i];
542 struct sockaddr_in *sin = &managers[i];
543
544 if ((!strncmp(name, "tcp:", 4)
545 && inet_parse_active(name + 4, JSONRPC_TCP_PORT, sin)) ||
546 (!strncmp(name, "ssl:", 4)
547 && inet_parse_active(name + 4, JSONRPC_SSL_PORT, sin))) {
548 n_managers++;
549 }
550 }
551 }
552
553 *managersp = managers;
554 *n_managersp = n_managers;
555 }
556
557 void
558 bridge_reconfigure(const struct ovsrec_open_vswitch *ovs_cfg)
559 {
560 struct ovsdb_idl_txn *txn;
561 struct shash old_br, new_br;
562 struct shash_node *node;
563 struct bridge *br, *next;
564 struct sockaddr_in *managers;
565 size_t n_managers;
566 size_t i;
567 int sflow_bridge_number;
568
569 COVERAGE_INC(bridge_reconfigure);
570
571 txn = ovsdb_idl_txn_create(ovs_cfg->header_.table->idl);
572
573 collect_managers(ovs_cfg, &managers, &n_managers);
574
575 /* Collect old and new bridges. */
576 shash_init(&old_br);
577 shash_init(&new_br);
578 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
579 shash_add(&old_br, br->name, br);
580 }
581 for (i = 0; i < ovs_cfg->n_bridges; i++) {
582 const struct ovsrec_bridge *br_cfg = ovs_cfg->bridges[i];
583 if (!shash_add_once(&new_br, br_cfg->name, br_cfg)) {
584 VLOG_WARN("more than one bridge named %s", br_cfg->name);
585 }
586 }
587
588 /* Get rid of deleted bridges and add new bridges. */
589 LIST_FOR_EACH_SAFE (br, next, struct bridge, node, &all_bridges) {
590 struct ovsrec_bridge *br_cfg = shash_find_data(&new_br, br->name);
591 if (br_cfg) {
592 br->cfg = br_cfg;
593 } else {
594 bridge_destroy(br);
595 }
596 }
597 SHASH_FOR_EACH (node, &new_br) {
598 const char *br_name = node->name;
599 const struct ovsrec_bridge *br_cfg = node->data;
600 br = shash_find_data(&old_br, br_name);
601 if (br) {
602 /* If the bridge datapath type has changed, we need to tear it
603 * down and recreate. */
604 if (strcmp(br->cfg->datapath_type, br_cfg->datapath_type)) {
605 bridge_destroy(br);
606 bridge_create(br_cfg);
607 }
608 } else {
609 bridge_create(br_cfg);
610 }
611 }
612 shash_destroy(&old_br);
613 shash_destroy(&new_br);
614
615 #ifdef HAVE_OPENSSL
616 /* Configure SSL. */
617 bridge_configure_ssl(ovs_cfg->ssl);
618 #endif
619
620 /* Reconfigure all bridges. */
621 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
622 bridge_reconfigure_one(ovs_cfg, br);
623 }
624
625 /* Add and delete ports on all datapaths.
626 *
627 * The kernel will reject any attempt to add a given port to a datapath if
628 * that port already belongs to a different datapath, so we must do all
629 * port deletions before any port additions. */
630 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
631 struct odp_port *dpif_ports;
632 size_t n_dpif_ports;
633 struct shash want_ifaces;
634
635 dpif_port_list(br->dpif, &dpif_ports, &n_dpif_ports);
636 bridge_get_all_ifaces(br, &want_ifaces);
637 for (i = 0; i < n_dpif_ports; i++) {
638 const struct odp_port *p = &dpif_ports[i];
639 if (!shash_find(&want_ifaces, p->devname)
640 && strcmp(p->devname, br->name)) {
641 int retval = dpif_port_del(br->dpif, p->port);
642 if (retval) {
643 VLOG_ERR("failed to remove %s interface from %s: %s",
644 p->devname, dpif_name(br->dpif),
645 strerror(retval));
646 }
647 }
648 }
649 shash_destroy(&want_ifaces);
650 free(dpif_ports);
651 }
652 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
653 struct odp_port *dpif_ports;
654 size_t n_dpif_ports;
655 struct shash cur_ifaces, want_ifaces;
656 struct shash_node *node;
657
658 /* Get the set of interfaces currently in this datapath. */
659 dpif_port_list(br->dpif, &dpif_ports, &n_dpif_ports);
660 shash_init(&cur_ifaces);
661 for (i = 0; i < n_dpif_ports; i++) {
662 const char *name = dpif_ports[i].devname;
663 if (!shash_find(&cur_ifaces, name)) {
664 shash_add(&cur_ifaces, name, NULL);
665 }
666 }
667 free(dpif_ports);
668
669 /* Get the set of interfaces we want on this datapath. */
670 bridge_get_all_ifaces(br, &want_ifaces);
671
672 SHASH_FOR_EACH (node, &want_ifaces) {
673 const char *if_name = node->name;
674 struct iface *iface = node->data;
675
676 if (shash_find(&cur_ifaces, if_name)) {
677 /* Already exists, just reconfigure it. */
678 if (iface) {
679 reconfigure_iface(iface->cfg, iface);
680 }
681 } else {
682 /* Need to add to datapath. */
683 bool internal;
684 int error;
685
686 /* Add to datapath. */
687 internal = iface_is_internal(br, if_name);
688 error = dpif_port_add(br->dpif, if_name,
689 internal ? ODP_PORT_INTERNAL : 0, NULL);
690 if (error == EFBIG) {
691 VLOG_ERR("ran out of valid port numbers on %s",
692 dpif_name(br->dpif));
693 break;
694 } else if (error) {
695 VLOG_ERR("failed to add %s interface to %s: %s",
696 if_name, dpif_name(br->dpif), strerror(error));
697 }
698 }
699 }
700 shash_destroy(&cur_ifaces);
701 shash_destroy(&want_ifaces);
702 }
703 sflow_bridge_number = 0;
704 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
705 uint8_t ea[8];
706 uint64_t dpid;
707 struct iface *local_iface;
708 struct iface *hw_addr_iface;
709 char *dpid_string;
710
711 bridge_fetch_dp_ifaces(br);
712
713 iterate_and_prune_ifaces(br, check_iface_netdev, NULL);
714 iterate_and_prune_ifaces(br, check_iface_dp_ifidx, NULL);
715
716 /* Pick local port hardware address, datapath ID. */
717 bridge_pick_local_hw_addr(br, ea, &hw_addr_iface);
718 local_iface = bridge_get_local_iface(br);
719 if (local_iface) {
720 int error = netdev_set_etheraddr(local_iface->netdev, ea);
721 if (error) {
722 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
723 VLOG_ERR_RL(&rl, "bridge %s: failed to set bridge "
724 "Ethernet address: %s",
725 br->name, strerror(error));
726 }
727 }
728
729 dpid = bridge_pick_datapath_id(br, ea, hw_addr_iface);
730 ofproto_set_datapath_id(br->ofproto, dpid);
731
732 dpid_string = xasprintf("%012"PRIx64, dpid);
733 ovsrec_bridge_set_datapath_id(br->cfg, dpid_string);
734 free(dpid_string);
735
736 /* Set NetFlow configuration on this bridge. */
737 if (br->cfg->netflow) {
738 struct ovsrec_netflow *nf_cfg = br->cfg->netflow;
739 struct netflow_options opts;
740
741 memset(&opts, 0, sizeof opts);
742
743 dpif_get_netflow_ids(br->dpif, &opts.engine_type, &opts.engine_id);
744 if (nf_cfg->engine_type) {
745 opts.engine_type = *nf_cfg->engine_type;
746 }
747 if (nf_cfg->engine_id) {
748 opts.engine_id = *nf_cfg->engine_id;
749 }
750
751 opts.active_timeout = nf_cfg->active_timeout;
752 if (!opts.active_timeout) {
753 opts.active_timeout = -1;
754 } else if (opts.active_timeout < 0) {
755 VLOG_WARN("bridge %s: active timeout interval set to negative "
756 "value, using default instead (%d seconds)", br->name,
757 NF_ACTIVE_TIMEOUT_DEFAULT);
758 opts.active_timeout = -1;
759 }
760
761 opts.add_id_to_iface = nf_cfg->add_id_to_interface;
762 if (opts.add_id_to_iface) {
763 if (opts.engine_id > 0x7f) {
764 VLOG_WARN("bridge %s: netflow port mangling may conflict "
765 "with another vswitch, choose an engine id less "
766 "than 128", br->name);
767 }
768 if (br->n_ports > 508) {
769 VLOG_WARN("bridge %s: netflow port mangling will conflict "
770 "with another port when more than 508 ports are "
771 "used", br->name);
772 }
773 }
774
775 opts.collectors.n = nf_cfg->n_targets;
776 opts.collectors.names = nf_cfg->targets;
777 if (ofproto_set_netflow(br->ofproto, &opts)) {
778 VLOG_ERR("bridge %s: problem setting netflow collectors",
779 br->name);
780 }
781 } else {
782 ofproto_set_netflow(br->ofproto, NULL);
783 }
784
785 /* Set sFlow configuration on this bridge. */
786 if (br->cfg->sflow) {
787 const struct ovsrec_sflow *sflow_cfg = br->cfg->sflow;
788 struct ovsrec_controller **controllers;
789 struct ofproto_sflow_options oso;
790 size_t n_controllers;
791 size_t i;
792
793 memset(&oso, 0, sizeof oso);
794
795 oso.targets.n = sflow_cfg->n_targets;
796 oso.targets.names = sflow_cfg->targets;
797
798 oso.sampling_rate = SFL_DEFAULT_SAMPLING_RATE;
799 if (sflow_cfg->sampling) {
800 oso.sampling_rate = *sflow_cfg->sampling;
801 }
802
803 oso.polling_interval = SFL_DEFAULT_POLLING_INTERVAL;
804 if (sflow_cfg->polling) {
805 oso.polling_interval = *sflow_cfg->polling;
806 }
807
808 oso.header_len = SFL_DEFAULT_HEADER_SIZE;
809 if (sflow_cfg->header) {
810 oso.header_len = *sflow_cfg->header;
811 }
812
813 oso.sub_id = sflow_bridge_number++;
814 oso.agent_device = sflow_cfg->agent;
815
816 oso.control_ip = NULL;
817 n_controllers = bridge_get_controllers(ovs_cfg, br, &controllers);
818 for (i = 0; i < n_controllers; i++) {
819 if (controllers[i]->local_ip) {
820 oso.control_ip = controllers[i]->local_ip;
821 break;
822 }
823 }
824 ofproto_set_sflow(br->ofproto, &oso);
825
826 svec_destroy(&oso.targets);
827 } else {
828 ofproto_set_sflow(br->ofproto, NULL);
829 }
830
831 /* Update the controller and related settings. It would be more
832 * straightforward to call this from bridge_reconfigure_one(), but we
833 * can't do it there for two reasons. First, and most importantly, at
834 * that point we don't know the dp_ifidx of any interfaces that have
835 * been added to the bridge (because we haven't actually added them to
836 * the datapath). Second, at that point we haven't set the datapath ID
837 * yet; when a controller is configured, resetting the datapath ID will
838 * immediately disconnect from the controller, so it's better to set
839 * the datapath ID before the controller. */
840 bridge_reconfigure_remotes(ovs_cfg, br, managers, n_managers);
841 }
842 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
843 for (i = 0; i < br->n_ports; i++) {
844 struct port *port = br->ports[i];
845
846 port_update_vlan_compat(port);
847 port_update_bonding(port);
848 }
849 }
850 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
851 iterate_and_prune_ifaces(br, set_iface_properties, NULL);
852 }
853
854 ovsrec_open_vswitch_set_cur_cfg(ovs_cfg, ovs_cfg->next_cfg);
855
856 ovsdb_idl_txn_commit(txn);
857 ovsdb_idl_txn_destroy(txn); /* XXX */
858
859 free(managers);
860 }
861
862 static const char *
863 get_ovsrec_key_value(const char *key, char **keys, char **values, size_t n)
864 {
865 size_t i;
866
867 for (i = 0; i < n; i++) {
868 if (!strcmp(keys[i], key)) {
869 return values[i];
870 }
871 }
872 return NULL;
873 }
874
875 static const char *
876 bridge_get_other_config(const struct ovsrec_bridge *br_cfg, const char *key)
877 {
878 return get_ovsrec_key_value(key,
879 br_cfg->key_other_config,
880 br_cfg->value_other_config,
881 br_cfg->n_other_config);
882 }
883
884 static void
885 bridge_pick_local_hw_addr(struct bridge *br, uint8_t ea[ETH_ADDR_LEN],
886 struct iface **hw_addr_iface)
887 {
888 const char *hwaddr;
889 size_t i, j;
890 int error;
891
892 *hw_addr_iface = NULL;
893
894 /* Did the user request a particular MAC? */
895 hwaddr = bridge_get_other_config(br->cfg, "hwaddr");
896 if (hwaddr && eth_addr_from_string(hwaddr, ea)) {
897 if (eth_addr_is_multicast(ea)) {
898 VLOG_ERR("bridge %s: cannot set MAC address to multicast "
899 "address "ETH_ADDR_FMT, br->name, ETH_ADDR_ARGS(ea));
900 } else if (eth_addr_is_zero(ea)) {
901 VLOG_ERR("bridge %s: cannot set MAC address to zero", br->name);
902 } else {
903 return;
904 }
905 }
906
907 /* Otherwise choose the minimum non-local MAC address among all of the
908 * interfaces. */
909 memset(ea, 0xff, sizeof ea);
910 for (i = 0; i < br->n_ports; i++) {
911 struct port *port = br->ports[i];
912 uint8_t iface_ea[ETH_ADDR_LEN];
913 struct iface *iface;
914
915 /* Mirror output ports don't participate. */
916 if (port->is_mirror_output_port) {
917 continue;
918 }
919
920 /* Choose the MAC address to represent the port. */
921 if (port->cfg->mac && eth_addr_from_string(port->cfg->mac, iface_ea)) {
922 /* Find the interface with this Ethernet address (if any) so that
923 * we can provide the correct devname to the caller. */
924 iface = NULL;
925 for (j = 0; j < port->n_ifaces; j++) {
926 struct iface *candidate = port->ifaces[j];
927 uint8_t candidate_ea[ETH_ADDR_LEN];
928 if (!netdev_get_etheraddr(candidate->netdev, candidate_ea)
929 && eth_addr_equals(iface_ea, candidate_ea)) {
930 iface = candidate;
931 }
932 }
933 } else {
934 /* Choose the interface whose MAC address will represent the port.
935 * The Linux kernel bonding code always chooses the MAC address of
936 * the first slave added to a bond, and the Fedora networking
937 * scripts always add slaves to a bond in alphabetical order, so
938 * for compatibility we choose the interface with the name that is
939 * first in alphabetical order. */
940 iface = port->ifaces[0];
941 for (j = 1; j < port->n_ifaces; j++) {
942 struct iface *candidate = port->ifaces[j];
943 if (strcmp(candidate->name, iface->name) < 0) {
944 iface = candidate;
945 }
946 }
947
948 /* The local port doesn't count (since we're trying to choose its
949 * MAC address anyway). */
950 if (iface->dp_ifidx == ODPP_LOCAL) {
951 continue;
952 }
953
954 /* Grab MAC. */
955 error = netdev_get_etheraddr(iface->netdev, iface_ea);
956 if (error) {
957 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
958 VLOG_ERR_RL(&rl, "failed to obtain Ethernet address of %s: %s",
959 iface->name, strerror(error));
960 continue;
961 }
962 }
963
964 /* Compare against our current choice. */
965 if (!eth_addr_is_multicast(iface_ea) &&
966 !eth_addr_is_local(iface_ea) &&
967 !eth_addr_is_reserved(iface_ea) &&
968 !eth_addr_is_zero(iface_ea) &&
969 memcmp(iface_ea, ea, ETH_ADDR_LEN) < 0)
970 {
971 memcpy(ea, iface_ea, ETH_ADDR_LEN);
972 *hw_addr_iface = iface;
973 }
974 }
975 if (eth_addr_is_multicast(ea)) {
976 memcpy(ea, br->default_ea, ETH_ADDR_LEN);
977 *hw_addr_iface = NULL;
978 VLOG_WARN("bridge %s: using default bridge Ethernet "
979 "address "ETH_ADDR_FMT, br->name, ETH_ADDR_ARGS(ea));
980 } else {
981 VLOG_DBG("bridge %s: using bridge Ethernet address "ETH_ADDR_FMT,
982 br->name, ETH_ADDR_ARGS(ea));
983 }
984 }
985
986 /* Choose and returns the datapath ID for bridge 'br' given that the bridge
987 * Ethernet address is 'bridge_ea'. If 'bridge_ea' is the Ethernet address of
988 * an interface on 'br', then that interface must be passed in as
989 * 'hw_addr_iface'; if 'bridge_ea' was derived some other way, then
990 * 'hw_addr_iface' must be passed in as a null pointer. */
991 static uint64_t
992 bridge_pick_datapath_id(struct bridge *br,
993 const uint8_t bridge_ea[ETH_ADDR_LEN],
994 struct iface *hw_addr_iface)
995 {
996 /*
997 * The procedure for choosing a bridge MAC address will, in the most
998 * ordinary case, also choose a unique MAC that we can use as a datapath
999 * ID. In some special cases, though, multiple bridges will end up with
1000 * the same MAC address. This is OK for the bridges, but it will confuse
1001 * the OpenFlow controller, because each datapath needs a unique datapath
1002 * ID.
1003 *
1004 * Datapath IDs must be unique. It is also very desirable that they be
1005 * stable from one run to the next, so that policy set on a datapath
1006 * "sticks".
1007 */
1008 const char *datapath_id;
1009 uint64_t dpid;
1010
1011 datapath_id = bridge_get_other_config(br->cfg, "datapath-id");
1012 if (datapath_id && dpid_from_string(datapath_id, &dpid)) {
1013 return dpid;
1014 }
1015
1016 if (hw_addr_iface) {
1017 int vlan;
1018 if (!netdev_get_vlan_vid(hw_addr_iface->netdev, &vlan)) {
1019 /*
1020 * A bridge whose MAC address is taken from a VLAN network device
1021 * (that is, a network device created with vconfig(8) or similar
1022 * tool) will have the same MAC address as a bridge on the VLAN
1023 * device's physical network device.
1024 *
1025 * Handle this case by hashing the physical network device MAC
1026 * along with the VLAN identifier.
1027 */
1028 uint8_t buf[ETH_ADDR_LEN + 2];
1029 memcpy(buf, bridge_ea, ETH_ADDR_LEN);
1030 buf[ETH_ADDR_LEN] = vlan >> 8;
1031 buf[ETH_ADDR_LEN + 1] = vlan;
1032 return dpid_from_hash(buf, sizeof buf);
1033 } else {
1034 /*
1035 * Assume that this bridge's MAC address is unique, since it
1036 * doesn't fit any of the cases we handle specially.
1037 */
1038 }
1039 } else {
1040 /*
1041 * A purely internal bridge, that is, one that has no non-virtual
1042 * network devices on it at all, is more difficult because it has no
1043 * natural unique identifier at all.
1044 *
1045 * When the host is a XenServer, we handle this case by hashing the
1046 * host's UUID with the name of the bridge. Names of bridges are
1047 * persistent across XenServer reboots, although they can be reused if
1048 * an internal network is destroyed and then a new one is later
1049 * created, so this is fairly effective.
1050 *
1051 * When the host is not a XenServer, we punt by using a random MAC
1052 * address on each run.
1053 */
1054 const char *host_uuid = xenserver_get_host_uuid();
1055 if (host_uuid) {
1056 char *combined = xasprintf("%s,%s", host_uuid, br->name);
1057 dpid = dpid_from_hash(combined, strlen(combined));
1058 free(combined);
1059 return dpid;
1060 }
1061 }
1062
1063 return eth_addr_to_uint64(bridge_ea);
1064 }
1065
1066 static uint64_t
1067 dpid_from_hash(const void *data, size_t n)
1068 {
1069 uint8_t hash[SHA1_DIGEST_SIZE];
1070
1071 BUILD_ASSERT_DECL(sizeof hash >= ETH_ADDR_LEN);
1072 sha1_bytes(data, n, hash);
1073 eth_addr_mark_random(hash);
1074 return eth_addr_to_uint64(hash);
1075 }
1076
1077 int
1078 bridge_run(void)
1079 {
1080 struct bridge *br, *next;
1081 int retval;
1082
1083 retval = 0;
1084 LIST_FOR_EACH_SAFE (br, next, struct bridge, node, &all_bridges) {
1085 int error = bridge_run_one(br);
1086 if (error) {
1087 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1088 VLOG_ERR_RL(&rl, "bridge %s: datapath was destroyed externally, "
1089 "forcing reconfiguration", br->name);
1090 if (!retval) {
1091 retval = error;
1092 }
1093 }
1094 }
1095 return retval;
1096 }
1097
1098 void
1099 bridge_wait(void)
1100 {
1101 struct bridge *br;
1102
1103 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
1104 ofproto_wait(br->ofproto);
1105 if (ofproto_has_controller(br->ofproto)) {
1106 continue;
1107 }
1108
1109 mac_learning_wait(br->ml);
1110 bond_wait(br);
1111 }
1112 }
1113
1114 /* Forces 'br' to revalidate all of its flows. This is appropriate when 'br''s
1115 * configuration changes. */
1116 static void
1117 bridge_flush(struct bridge *br)
1118 {
1119 COVERAGE_INC(bridge_flush);
1120 br->flush = true;
1121 mac_learning_flush(br->ml);
1122 }
1123
1124 /* Returns the 'br' interface for the ODPP_LOCAL port, or null if 'br' has no
1125 * such interface. */
1126 static struct iface *
1127 bridge_get_local_iface(struct bridge *br)
1128 {
1129 size_t i, j;
1130
1131 for (i = 0; i < br->n_ports; i++) {
1132 struct port *port = br->ports[i];
1133 for (j = 0; j < port->n_ifaces; j++) {
1134 struct iface *iface = port->ifaces[j];
1135 if (iface->dp_ifidx == ODPP_LOCAL) {
1136 return iface;
1137 }
1138 }
1139 }
1140
1141 return NULL;
1142 }
1143 \f
1144 /* Bridge unixctl user interface functions. */
1145 static void
1146 bridge_unixctl_fdb_show(struct unixctl_conn *conn,
1147 const char *args, void *aux OVS_UNUSED)
1148 {
1149 struct ds ds = DS_EMPTY_INITIALIZER;
1150 const struct bridge *br;
1151 const struct mac_entry *e;
1152
1153 br = bridge_lookup(args);
1154 if (!br) {
1155 unixctl_command_reply(conn, 501, "no such bridge");
1156 return;
1157 }
1158
1159 ds_put_cstr(&ds, " port VLAN MAC Age\n");
1160 LIST_FOR_EACH (e, struct mac_entry, lru_node, &br->ml->lrus) {
1161 if (e->port < 0 || e->port >= br->n_ports) {
1162 continue;
1163 }
1164 ds_put_format(&ds, "%5d %4d "ETH_ADDR_FMT" %3d\n",
1165 br->ports[e->port]->ifaces[0]->dp_ifidx,
1166 e->vlan, ETH_ADDR_ARGS(e->mac), mac_entry_age(e));
1167 }
1168 unixctl_command_reply(conn, 200, ds_cstr(&ds));
1169 ds_destroy(&ds);
1170 }
1171 \f
1172 /* Bridge reconfiguration functions. */
1173 static struct bridge *
1174 bridge_create(const struct ovsrec_bridge *br_cfg)
1175 {
1176 struct bridge *br;
1177 int error;
1178
1179 assert(!bridge_lookup(br_cfg->name));
1180 br = xzalloc(sizeof *br);
1181
1182 error = dpif_create_and_open(br_cfg->name, br_cfg->datapath_type,
1183 &br->dpif);
1184 if (error) {
1185 free(br);
1186 return NULL;
1187 }
1188 dpif_flow_flush(br->dpif);
1189
1190 error = ofproto_create(br_cfg->name, br_cfg->datapath_type, &bridge_ofhooks,
1191 br, &br->ofproto);
1192 if (error) {
1193 VLOG_ERR("failed to create switch %s: %s", br_cfg->name,
1194 strerror(error));
1195 dpif_delete(br->dpif);
1196 dpif_close(br->dpif);
1197 free(br);
1198 return NULL;
1199 }
1200
1201 br->name = xstrdup(br_cfg->name);
1202 br->cfg = br_cfg;
1203 br->ml = mac_learning_create();
1204 br->sent_config_request = false;
1205 eth_addr_nicira_random(br->default_ea);
1206
1207 port_array_init(&br->ifaces);
1208
1209 br->flush = false;
1210
1211 list_push_back(&all_bridges, &br->node);
1212
1213 VLOG_INFO("created bridge %s on %s", br->name, dpif_name(br->dpif));
1214
1215 return br;
1216 }
1217
1218 static void
1219 bridge_destroy(struct bridge *br)
1220 {
1221 if (br) {
1222 int error;
1223
1224 while (br->n_ports > 0) {
1225 port_destroy(br->ports[br->n_ports - 1]);
1226 }
1227 list_remove(&br->node);
1228 error = dpif_delete(br->dpif);
1229 if (error && error != ENOENT) {
1230 VLOG_ERR("failed to delete %s: %s",
1231 dpif_name(br->dpif), strerror(error));
1232 }
1233 dpif_close(br->dpif);
1234 ofproto_destroy(br->ofproto);
1235 mac_learning_destroy(br->ml);
1236 port_array_destroy(&br->ifaces);
1237 free(br->ports);
1238 free(br->name);
1239 free(br);
1240 }
1241 }
1242
1243 static struct bridge *
1244 bridge_lookup(const char *name)
1245 {
1246 struct bridge *br;
1247
1248 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
1249 if (!strcmp(br->name, name)) {
1250 return br;
1251 }
1252 }
1253 return NULL;
1254 }
1255
1256 bool
1257 bridge_exists(const char *name)
1258 {
1259 return bridge_lookup(name) ? true : false;
1260 }
1261
1262 uint64_t
1263 bridge_get_datapathid(const char *name)
1264 {
1265 struct bridge *br = bridge_lookup(name);
1266 return br ? ofproto_get_datapath_id(br->ofproto) : 0;
1267 }
1268
1269 /* Handle requests for a listing of all flows known by the OpenFlow
1270 * stack, including those normally hidden. */
1271 static void
1272 bridge_unixctl_dump_flows(struct unixctl_conn *conn,
1273 const char *args, void *aux OVS_UNUSED)
1274 {
1275 struct bridge *br;
1276 struct ds results;
1277
1278 br = bridge_lookup(args);
1279 if (!br) {
1280 unixctl_command_reply(conn, 501, "Unknown bridge");
1281 return;
1282 }
1283
1284 ds_init(&results);
1285 ofproto_get_all_flows(br->ofproto, &results);
1286
1287 unixctl_command_reply(conn, 200, ds_cstr(&results));
1288 ds_destroy(&results);
1289 }
1290
1291 static int
1292 bridge_run_one(struct bridge *br)
1293 {
1294 int error;
1295
1296 error = ofproto_run1(br->ofproto);
1297 if (error) {
1298 return error;
1299 }
1300
1301 mac_learning_run(br->ml, ofproto_get_revalidate_set(br->ofproto));
1302 bond_run(br);
1303
1304 error = ofproto_run2(br->ofproto, br->flush);
1305 br->flush = false;
1306
1307 return error;
1308 }
1309
1310 static size_t
1311 bridge_get_controllers(const struct ovsrec_open_vswitch *ovs_cfg,
1312 const struct bridge *br,
1313 struct ovsrec_controller ***controllersp)
1314 {
1315 struct ovsrec_controller **controllers;
1316 size_t n_controllers;
1317
1318 if (br->cfg->n_controller) {
1319 controllers = br->cfg->controller;
1320 n_controllers = br->cfg->n_controller;
1321 } else {
1322 controllers = ovs_cfg->controller;
1323 n_controllers = ovs_cfg->n_controller;
1324 }
1325
1326 if (n_controllers == 1 && !strcmp(controllers[0]->target, "none")) {
1327 controllers = NULL;
1328 n_controllers = 0;
1329 }
1330
1331 if (controllersp) {
1332 *controllersp = controllers;
1333 }
1334 return n_controllers;
1335 }
1336
1337 static bool
1338 check_duplicate_ifaces(struct bridge *br, struct iface *iface, void *ifaces_)
1339 {
1340 struct svec *ifaces = ifaces_;
1341 if (!svec_contains(ifaces, iface->name)) {
1342 svec_add(ifaces, iface->name);
1343 svec_sort(ifaces);
1344 return true;
1345 } else {
1346 VLOG_ERR("bridge %s: %s interface is on multiple ports, "
1347 "removing from %s",
1348 br->name, iface->name, iface->port->name);
1349 return false;
1350 }
1351 }
1352
1353 static void
1354 bridge_update_desc(struct bridge *br OVS_UNUSED)
1355 {
1356 #if 0
1357 bool changed = false;
1358 const char *desc;
1359
1360 desc = cfg_get_string(0, "bridge.%s.mfr-desc", br->name);
1361 if (desc != br->mfr_desc) {
1362 free(br->mfr_desc);
1363 if (desc) {
1364 br->mfr_desc = xstrdup(desc);
1365 } else {
1366 br->mfr_desc = xstrdup(DEFAULT_MFR_DESC);
1367 }
1368 changed = true;
1369 }
1370
1371 desc = cfg_get_string(0, "bridge.%s.hw-desc", br->name);
1372 if (desc != br->hw_desc) {
1373 free(br->hw_desc);
1374 if (desc) {
1375 br->hw_desc = xstrdup(desc);
1376 } else {
1377 br->hw_desc = xstrdup(DEFAULT_HW_DESC);
1378 }
1379 changed = true;
1380 }
1381
1382 desc = cfg_get_string(0, "bridge.%s.sw-desc", br->name);
1383 if (desc != br->sw_desc) {
1384 free(br->sw_desc);
1385 if (desc) {
1386 br->sw_desc = xstrdup(desc);
1387 } else {
1388 br->sw_desc = xstrdup(DEFAULT_SW_DESC);
1389 }
1390 changed = true;
1391 }
1392
1393 desc = cfg_get_string(0, "bridge.%s.serial-desc", br->name);
1394 if (desc != br->serial_desc) {
1395 free(br->serial_desc);
1396 if (desc) {
1397 br->serial_desc = xstrdup(desc);
1398 } else {
1399 br->serial_desc = xstrdup(DEFAULT_SERIAL_DESC);
1400 }
1401 changed = true;
1402 }
1403
1404 desc = cfg_get_string(0, "bridge.%s.dp-desc", br->name);
1405 if (desc != br->dp_desc) {
1406 free(br->dp_desc);
1407 if (desc) {
1408 br->dp_desc = xstrdup(desc);
1409 } else {
1410 br->dp_desc = xstrdup(DEFAULT_DP_DESC);
1411 }
1412 changed = true;
1413 }
1414
1415 if (changed) {
1416 ofproto_set_desc(br->ofproto, br->mfr_desc, br->hw_desc,
1417 br->sw_desc, br->serial_desc, br->dp_desc);
1418 }
1419 #endif
1420 }
1421
1422 static void
1423 bridge_reconfigure_one(const struct ovsrec_open_vswitch *ovs_cfg,
1424 struct bridge *br)
1425 {
1426 struct shash old_ports, new_ports;
1427 struct svec ifaces;
1428 struct svec listeners, old_listeners;
1429 struct svec snoops, old_snoops;
1430 struct shash_node *node;
1431 size_t i;
1432
1433 /* Collect old ports. */
1434 shash_init(&old_ports);
1435 for (i = 0; i < br->n_ports; i++) {
1436 shash_add(&old_ports, br->ports[i]->name, br->ports[i]);
1437 }
1438
1439 /* Collect new ports. */
1440 shash_init(&new_ports);
1441 for (i = 0; i < br->cfg->n_ports; i++) {
1442 const char *name = br->cfg->ports[i]->name;
1443 if (!shash_add_once(&new_ports, name, br->cfg->ports[i])) {
1444 VLOG_WARN("bridge %s: %s specified twice as bridge port",
1445 br->name, name);
1446 }
1447 }
1448
1449 /* If we have a controller, then we need a local port. Complain if the
1450 * user didn't specify one.
1451 *
1452 * XXX perhaps we should synthesize a port ourselves in this case. */
1453 if (bridge_get_controllers(ovs_cfg, br, NULL)) {
1454 char local_name[IF_NAMESIZE];
1455 int error;
1456
1457 error = dpif_port_get_name(br->dpif, ODPP_LOCAL,
1458 local_name, sizeof local_name);
1459 if (!error && !shash_find(&new_ports, local_name)) {
1460 VLOG_WARN("bridge %s: controller specified but no local port "
1461 "(port named %s) defined",
1462 br->name, local_name);
1463 }
1464 }
1465
1466 /* Get rid of deleted ports and add new ports. */
1467 SHASH_FOR_EACH (node, &old_ports) {
1468 if (!shash_find(&new_ports, node->name)) {
1469 port_destroy(node->data);
1470 }
1471 }
1472 SHASH_FOR_EACH (node, &new_ports) {
1473 struct port *port = shash_find_data(&old_ports, node->name);
1474 if (!port) {
1475 port = port_create(br, node->name);
1476 }
1477
1478 port_reconfigure(port, node->data);
1479 if (!port->n_ifaces) {
1480 VLOG_WARN("bridge %s: port %s has no interfaces, dropping",
1481 br->name, port->name);
1482 port_destroy(port);
1483 }
1484 }
1485 shash_destroy(&old_ports);
1486 shash_destroy(&new_ports);
1487
1488 /* Check and delete duplicate interfaces. */
1489 svec_init(&ifaces);
1490 iterate_and_prune_ifaces(br, check_duplicate_ifaces, &ifaces);
1491 svec_destroy(&ifaces);
1492
1493 /* Delete all flows if we're switching from connected to standalone or vice
1494 * versa. (XXX Should we delete all flows if we are switching from one
1495 * controller to another?) */
1496
1497 #if 0
1498 /* Configure OpenFlow management listeners. */
1499 svec_init(&listeners);
1500 cfg_get_all_strings(&listeners, "bridge.%s.openflow.listeners", br->name);
1501 if (!listeners.n) {
1502 svec_add_nocopy(&listeners, xasprintf("punix:%s/%s.mgmt",
1503 ovs_rundir, br->name));
1504 } else if (listeners.n == 1 && !strcmp(listeners.names[0], "none")) {
1505 svec_clear(&listeners);
1506 }
1507 svec_sort_unique(&listeners);
1508
1509 svec_init(&old_listeners);
1510 ofproto_get_listeners(br->ofproto, &old_listeners);
1511 svec_sort_unique(&old_listeners);
1512
1513 if (!svec_equal(&listeners, &old_listeners)) {
1514 ofproto_set_listeners(br->ofproto, &listeners);
1515 }
1516 svec_destroy(&listeners);
1517 svec_destroy(&old_listeners);
1518
1519 /* Configure OpenFlow controller connection snooping. */
1520 svec_init(&snoops);
1521 cfg_get_all_strings(&snoops, "bridge.%s.openflow.snoops", br->name);
1522 if (!snoops.n) {
1523 svec_add_nocopy(&snoops, xasprintf("punix:%s/%s.snoop",
1524 ovs_rundir, br->name));
1525 } else if (snoops.n == 1 && !strcmp(snoops.names[0], "none")) {
1526 svec_clear(&snoops);
1527 }
1528 svec_sort_unique(&snoops);
1529
1530 svec_init(&old_snoops);
1531 ofproto_get_snoops(br->ofproto, &old_snoops);
1532 svec_sort_unique(&old_snoops);
1533
1534 if (!svec_equal(&snoops, &old_snoops)) {
1535 ofproto_set_snoops(br->ofproto, &snoops);
1536 }
1537 svec_destroy(&snoops);
1538 svec_destroy(&old_snoops);
1539 #else
1540 /* Default listener. */
1541 svec_init(&listeners);
1542 svec_add_nocopy(&listeners, xasprintf("punix:%s/%s.mgmt",
1543 ovs_rundir, br->name));
1544 svec_init(&old_listeners);
1545 ofproto_get_listeners(br->ofproto, &old_listeners);
1546 if (!svec_equal(&listeners, &old_listeners)) {
1547 ofproto_set_listeners(br->ofproto, &listeners);
1548 }
1549 svec_destroy(&listeners);
1550 svec_destroy(&old_listeners);
1551
1552 /* Default snoop. */
1553 svec_init(&snoops);
1554 svec_add_nocopy(&snoops, xasprintf("punix:%s/%s.snoop",
1555 ovs_rundir, br->name));
1556 svec_init(&old_snoops);
1557 ofproto_get_snoops(br->ofproto, &old_snoops);
1558 if (!svec_equal(&snoops, &old_snoops)) {
1559 ofproto_set_snoops(br->ofproto, &snoops);
1560 }
1561 svec_destroy(&snoops);
1562 svec_destroy(&old_snoops);
1563 #endif
1564
1565 mirror_reconfigure(br);
1566
1567 bridge_update_desc(br);
1568 }
1569
1570 static void
1571 bridge_reconfigure_remotes(const struct ovsrec_open_vswitch *ovs_cfg,
1572 struct bridge *br,
1573 const struct sockaddr_in *managers,
1574 size_t n_managers)
1575 {
1576 struct ovsrec_controller **controllers;
1577 size_t n_controllers;
1578
1579 ofproto_set_extra_in_band_remotes(br->ofproto, managers, n_managers);
1580
1581 n_controllers = bridge_get_controllers(ovs_cfg, br, &controllers);
1582 if (ofproto_has_controller(br->ofproto) != (n_controllers != 0)) {
1583 ofproto_flush_flows(br->ofproto);
1584 }
1585
1586 if (!n_controllers) {
1587 union ofp_action action;
1588 flow_t flow;
1589
1590 /* Clear out controllers. */
1591 ofproto_set_controllers(br->ofproto, NULL, 0);
1592
1593 /* Set up a flow that matches every packet and directs them to
1594 * OFPP_NORMAL (which goes to us). */
1595 memset(&action, 0, sizeof action);
1596 action.type = htons(OFPAT_OUTPUT);
1597 action.output.len = htons(sizeof action);
1598 action.output.port = htons(OFPP_NORMAL);
1599 memset(&flow, 0, sizeof flow);
1600 ofproto_add_flow(br->ofproto, &flow, OVSFW_ALL, 0, &action, 1, 0);
1601 } else {
1602 struct ofproto_controller *ocs;
1603 size_t i;
1604
1605 ocs = xmalloc(n_controllers * sizeof *ocs);
1606 for (i = 0; i < n_controllers; i++) {
1607 struct ovsrec_controller *c = controllers[i];
1608 struct ofproto_controller *oc = &ocs[i];
1609
1610 if (strcmp(c->target, "discover")) {
1611 struct iface *local_iface;
1612 struct in_addr ip;
1613
1614 local_iface = bridge_get_local_iface(br);
1615 if (local_iface && c->local_ip
1616 && inet_aton(c->local_ip, &ip)) {
1617 struct netdev *netdev = local_iface->netdev;
1618 struct in_addr mask, gateway;
1619
1620 if (!c->local_netmask
1621 || !inet_aton(c->local_netmask, &mask)) {
1622 mask.s_addr = 0;
1623 }
1624 if (!c->local_gateway
1625 || !inet_aton(c->local_gateway, &gateway)) {
1626 gateway.s_addr = 0;
1627 }
1628
1629 netdev_turn_flags_on(netdev, NETDEV_UP, true);
1630 if (!mask.s_addr) {
1631 mask.s_addr = guess_netmask(ip.s_addr);
1632 }
1633 if (!netdev_set_in4(netdev, ip, mask)) {
1634 VLOG_INFO("bridge %s: configured IP address "IP_FMT", "
1635 "netmask "IP_FMT,
1636 br->name, IP_ARGS(&ip.s_addr),
1637 IP_ARGS(&mask.s_addr));
1638 }
1639
1640 if (gateway.s_addr) {
1641 if (!netdev_add_router(netdev, gateway)) {
1642 VLOG_INFO("bridge %s: configured gateway "IP_FMT,
1643 br->name, IP_ARGS(&gateway.s_addr));
1644 }
1645 }
1646 }
1647 }
1648
1649 oc->target = c->target;
1650 oc->max_backoff = c->max_backoff ? *c->max_backoff / 1000 : 8;
1651 oc->probe_interval = (c->inactivity_probe
1652 ? *c->inactivity_probe / 1000 : 5);
1653 oc->fail = (!c->fail_mode
1654 || !strcmp(c->fail_mode, "standalone")
1655 || !strcmp(c->fail_mode, "open")
1656 ? OFPROTO_FAIL_STANDALONE
1657 : OFPROTO_FAIL_SECURE);
1658 oc->band = (!c->connection_mode
1659 || !strcmp(c->connection_mode, "in-band")
1660 ? OFPROTO_IN_BAND
1661 : OFPROTO_OUT_OF_BAND);
1662 oc->accept_re = c->discover_accept_regex;
1663 oc->update_resolv_conf = c->discover_update_resolv_conf;
1664 oc->rate_limit = (c->controller_rate_limit
1665 ? *c->controller_rate_limit : 0);
1666 oc->burst_limit = (c->controller_burst_limit
1667 ? *c->controller_burst_limit : 0);
1668 }
1669 ofproto_set_controllers(br->ofproto, ocs, n_controllers);
1670 free(ocs);
1671 }
1672 }
1673
1674 static void
1675 bridge_get_all_ifaces(const struct bridge *br, struct shash *ifaces)
1676 {
1677 size_t i, j;
1678
1679 shash_init(ifaces);
1680 for (i = 0; i < br->n_ports; i++) {
1681 struct port *port = br->ports[i];
1682 for (j = 0; j < port->n_ifaces; j++) {
1683 struct iface *iface = port->ifaces[j];
1684 shash_add_once(ifaces, iface->name, iface);
1685 }
1686 if (port->n_ifaces > 1 && port->cfg->bond_fake_iface) {
1687 shash_add_once(ifaces, port->name, NULL);
1688 }
1689 }
1690 }
1691
1692 /* For robustness, in case the administrator moves around datapath ports behind
1693 * our back, we re-check all the datapath port numbers here.
1694 *
1695 * This function will set the 'dp_ifidx' members of interfaces that have
1696 * disappeared to -1, so only call this function from a context where those
1697 * 'struct iface's will be removed from the bridge. Otherwise, the -1
1698 * 'dp_ifidx'es will cause trouble later when we try to send them to the
1699 * datapath, which doesn't support UINT16_MAX+1 ports. */
1700 static void
1701 bridge_fetch_dp_ifaces(struct bridge *br)
1702 {
1703 struct odp_port *dpif_ports;
1704 size_t n_dpif_ports;
1705 size_t i, j;
1706
1707 /* Reset all interface numbers. */
1708 for (i = 0; i < br->n_ports; i++) {
1709 struct port *port = br->ports[i];
1710 for (j = 0; j < port->n_ifaces; j++) {
1711 struct iface *iface = port->ifaces[j];
1712 iface->dp_ifidx = -1;
1713 }
1714 }
1715 port_array_clear(&br->ifaces);
1716
1717 dpif_port_list(br->dpif, &dpif_ports, &n_dpif_ports);
1718 for (i = 0; i < n_dpif_ports; i++) {
1719 struct odp_port *p = &dpif_ports[i];
1720 struct iface *iface = iface_lookup(br, p->devname);
1721 if (iface) {
1722 if (iface->dp_ifidx >= 0) {
1723 VLOG_WARN("%s reported interface %s twice",
1724 dpif_name(br->dpif), p->devname);
1725 } else if (iface_from_dp_ifidx(br, p->port)) {
1726 VLOG_WARN("%s reported interface %"PRIu16" twice",
1727 dpif_name(br->dpif), p->port);
1728 } else {
1729 port_array_set(&br->ifaces, p->port, iface);
1730 iface->dp_ifidx = p->port;
1731 }
1732
1733 if (iface->cfg) {
1734 int64_t ofport = (iface->dp_ifidx >= 0
1735 ? odp_port_to_ofp_port(iface->dp_ifidx)
1736 : -1);
1737 ovsrec_interface_set_ofport(iface->cfg, &ofport, 1);
1738 }
1739 }
1740 }
1741 free(dpif_ports);
1742 }
1743 \f
1744 /* Bridge packet processing functions. */
1745
1746 static int
1747 bond_hash(const uint8_t mac[ETH_ADDR_LEN])
1748 {
1749 return hash_bytes(mac, ETH_ADDR_LEN, 0) & BOND_MASK;
1750 }
1751
1752 static struct bond_entry *
1753 lookup_bond_entry(const struct port *port, const uint8_t mac[ETH_ADDR_LEN])
1754 {
1755 return &port->bond_hash[bond_hash(mac)];
1756 }
1757
1758 static int
1759 bond_choose_iface(const struct port *port)
1760 {
1761 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
1762 size_t i, best_down_slave = -1;
1763 long long next_delay_expiration = LLONG_MAX;
1764
1765 for (i = 0; i < port->n_ifaces; i++) {
1766 struct iface *iface = port->ifaces[i];
1767
1768 if (iface->enabled) {
1769 return i;
1770 } else if (iface->delay_expires < next_delay_expiration) {
1771 best_down_slave = i;
1772 next_delay_expiration = iface->delay_expires;
1773 }
1774 }
1775
1776 if (best_down_slave != -1) {
1777 struct iface *iface = port->ifaces[best_down_slave];
1778
1779 VLOG_INFO_RL(&rl, "interface %s: skipping remaining %lli ms updelay "
1780 "since no other interface is up", iface->name,
1781 iface->delay_expires - time_msec());
1782 bond_enable_slave(iface, true);
1783 }
1784
1785 return best_down_slave;
1786 }
1787
1788 static bool
1789 choose_output_iface(const struct port *port, const uint8_t *dl_src,
1790 uint16_t *dp_ifidx, tag_type *tags)
1791 {
1792 struct iface *iface;
1793
1794 assert(port->n_ifaces);
1795 if (port->n_ifaces == 1) {
1796 iface = port->ifaces[0];
1797 } else {
1798 struct bond_entry *e = lookup_bond_entry(port, dl_src);
1799 if (e->iface_idx < 0 || e->iface_idx >= port->n_ifaces
1800 || !port->ifaces[e->iface_idx]->enabled) {
1801 /* XXX select interface properly. The current interface selection
1802 * is only good for testing the rebalancing code. */
1803 e->iface_idx = bond_choose_iface(port);
1804 if (e->iface_idx < 0) {
1805 *tags |= port->no_ifaces_tag;
1806 return false;
1807 }
1808 e->iface_tag = tag_create_random();
1809 ((struct port *) port)->bond_compat_is_stale = true;
1810 }
1811 *tags |= e->iface_tag;
1812 iface = port->ifaces[e->iface_idx];
1813 }
1814 *dp_ifidx = iface->dp_ifidx;
1815 *tags |= iface->tag; /* Currently only used for bonding. */
1816 return true;
1817 }
1818
1819 static void
1820 bond_link_status_update(struct iface *iface, bool carrier)
1821 {
1822 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
1823 struct port *port = iface->port;
1824
1825 if ((carrier == iface->enabled) == (iface->delay_expires == LLONG_MAX)) {
1826 /* Nothing to do. */
1827 return;
1828 }
1829 VLOG_INFO_RL(&rl, "interface %s: carrier %s",
1830 iface->name, carrier ? "detected" : "dropped");
1831 if (carrier == iface->enabled) {
1832 iface->delay_expires = LLONG_MAX;
1833 VLOG_INFO_RL(&rl, "interface %s: will not be %s",
1834 iface->name, carrier ? "disabled" : "enabled");
1835 } else if (carrier && port->active_iface < 0) {
1836 bond_enable_slave(iface, true);
1837 if (port->updelay) {
1838 VLOG_INFO_RL(&rl, "interface %s: skipping %d ms updelay since no "
1839 "other interface is up", iface->name, port->updelay);
1840 }
1841 } else {
1842 int delay = carrier ? port->updelay : port->downdelay;
1843 iface->delay_expires = time_msec() + delay;
1844 if (delay) {
1845 VLOG_INFO_RL(&rl,
1846 "interface %s: will be %s if it stays %s for %d ms",
1847 iface->name,
1848 carrier ? "enabled" : "disabled",
1849 carrier ? "up" : "down",
1850 delay);
1851 }
1852 }
1853 }
1854
1855 static void
1856 bond_choose_active_iface(struct port *port)
1857 {
1858 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
1859
1860 port->active_iface = bond_choose_iface(port);
1861 port->active_iface_tag = tag_create_random();
1862 if (port->active_iface >= 0) {
1863 VLOG_INFO_RL(&rl, "port %s: active interface is now %s",
1864 port->name, port->ifaces[port->active_iface]->name);
1865 } else {
1866 VLOG_WARN_RL(&rl, "port %s: all ports disabled, no active interface",
1867 port->name);
1868 }
1869 }
1870
1871 static void
1872 bond_enable_slave(struct iface *iface, bool enable)
1873 {
1874 struct port *port = iface->port;
1875 struct bridge *br = port->bridge;
1876
1877 /* This acts as a recursion check. If the act of disabling a slave
1878 * causes a different slave to be enabled, the flag will allow us to
1879 * skip redundant work when we reenter this function. It must be
1880 * cleared on exit to keep things safe with multiple bonds. */
1881 static bool moving_active_iface = false;
1882
1883 iface->delay_expires = LLONG_MAX;
1884 if (enable == iface->enabled) {
1885 return;
1886 }
1887
1888 iface->enabled = enable;
1889 if (!iface->enabled) {
1890 VLOG_WARN("interface %s: disabled", iface->name);
1891 ofproto_revalidate(br->ofproto, iface->tag);
1892 if (iface->port_ifidx == port->active_iface) {
1893 ofproto_revalidate(br->ofproto,
1894 port->active_iface_tag);
1895
1896 /* Disabling a slave can lead to another slave being immediately
1897 * enabled if there will be no active slaves but one is waiting
1898 * on an updelay. In this case we do not need to run most of the
1899 * code for the newly enabled slave since there was no period
1900 * without an active slave and it is redundant with the disabling
1901 * path. */
1902 moving_active_iface = true;
1903 bond_choose_active_iface(port);
1904 }
1905 bond_send_learning_packets(port);
1906 } else {
1907 VLOG_WARN("interface %s: enabled", iface->name);
1908 if (port->active_iface < 0 && !moving_active_iface) {
1909 ofproto_revalidate(br->ofproto, port->no_ifaces_tag);
1910 bond_choose_active_iface(port);
1911 bond_send_learning_packets(port);
1912 }
1913 iface->tag = tag_create_random();
1914 }
1915
1916 moving_active_iface = false;
1917 port->bond_compat_is_stale = true;
1918 }
1919
1920 /* Attempts to make the sum of the bond slaves' statistics appear on the fake
1921 * bond interface. */
1922 static void
1923 bond_update_fake_iface_stats(struct port *port)
1924 {
1925 struct netdev_stats bond_stats;
1926 struct netdev *bond_dev;
1927 size_t i;
1928
1929 memset(&bond_stats, 0, sizeof bond_stats);
1930
1931 for (i = 0; i < port->n_ifaces; i++) {
1932 struct netdev_stats slave_stats;
1933
1934 if (!netdev_get_stats(port->ifaces[i]->netdev, &slave_stats)) {
1935 bond_stats.rx_packets += slave_stats.rx_packets;
1936 bond_stats.rx_bytes += slave_stats.rx_bytes;
1937 bond_stats.tx_packets += slave_stats.tx_packets;
1938 bond_stats.tx_bytes += slave_stats.tx_bytes;
1939 }
1940 }
1941
1942 if (!netdev_open_default(port->name, &bond_dev)) {
1943 netdev_set_stats(bond_dev, &bond_stats);
1944 netdev_close(bond_dev);
1945 }
1946 }
1947
1948 static void
1949 bond_run(struct bridge *br)
1950 {
1951 size_t i, j;
1952
1953 for (i = 0; i < br->n_ports; i++) {
1954 struct port *port = br->ports[i];
1955
1956 if (port->n_ifaces >= 2) {
1957 for (j = 0; j < port->n_ifaces; j++) {
1958 struct iface *iface = port->ifaces[j];
1959 if (time_msec() >= iface->delay_expires) {
1960 bond_enable_slave(iface, !iface->enabled);
1961 }
1962 }
1963
1964 if (port->bond_fake_iface
1965 && time_msec() >= port->bond_next_fake_iface_update) {
1966 bond_update_fake_iface_stats(port);
1967 port->bond_next_fake_iface_update = time_msec() + 1000;
1968 }
1969 }
1970
1971 if (port->bond_compat_is_stale) {
1972 port->bond_compat_is_stale = false;
1973 port_update_bond_compat(port);
1974 }
1975 }
1976 }
1977
1978 static void
1979 bond_wait(struct bridge *br)
1980 {
1981 size_t i, j;
1982
1983 for (i = 0; i < br->n_ports; i++) {
1984 struct port *port = br->ports[i];
1985 if (port->n_ifaces < 2) {
1986 continue;
1987 }
1988 for (j = 0; j < port->n_ifaces; j++) {
1989 struct iface *iface = port->ifaces[j];
1990 if (iface->delay_expires != LLONG_MAX) {
1991 poll_timer_wait(iface->delay_expires - time_msec());
1992 }
1993 }
1994 if (port->bond_fake_iface) {
1995 poll_timer_wait(port->bond_next_fake_iface_update - time_msec());
1996 }
1997 }
1998 }
1999
2000 static bool
2001 set_dst(struct dst *p, const flow_t *flow,
2002 const struct port *in_port, const struct port *out_port,
2003 tag_type *tags)
2004 {
2005 p->vlan = (out_port->vlan >= 0 ? OFP_VLAN_NONE
2006 : in_port->vlan >= 0 ? in_port->vlan
2007 : ntohs(flow->dl_vlan));
2008 return choose_output_iface(out_port, flow->dl_src, &p->dp_ifidx, tags);
2009 }
2010
2011 static void
2012 swap_dst(struct dst *p, struct dst *q)
2013 {
2014 struct dst tmp = *p;
2015 *p = *q;
2016 *q = tmp;
2017 }
2018
2019 /* Moves all the dsts with vlan == 'vlan' to the front of the 'n_dsts' in
2020 * 'dsts'. (This may help performance by reducing the number of VLAN changes
2021 * that we push to the datapath. We could in fact fully sort the array by
2022 * vlan, but in most cases there are at most two different vlan tags so that's
2023 * possibly overkill.) */
2024 static void
2025 partition_dsts(struct dst *dsts, size_t n_dsts, int vlan)
2026 {
2027 struct dst *first = dsts;
2028 struct dst *last = dsts + n_dsts;
2029
2030 while (first != last) {
2031 /* Invariants:
2032 * - All dsts < first have vlan == 'vlan'.
2033 * - All dsts >= last have vlan != 'vlan'.
2034 * - first < last. */
2035 while (first->vlan == vlan) {
2036 if (++first == last) {
2037 return;
2038 }
2039 }
2040
2041 /* Same invariants, plus one additional:
2042 * - first->vlan != vlan.
2043 */
2044 while (last[-1].vlan != vlan) {
2045 if (--last == first) {
2046 return;
2047 }
2048 }
2049
2050 /* Same invariants, plus one additional:
2051 * - last[-1].vlan == vlan.*/
2052 swap_dst(first++, --last);
2053 }
2054 }
2055
2056 static int
2057 mirror_mask_ffs(mirror_mask_t mask)
2058 {
2059 BUILD_ASSERT_DECL(sizeof(unsigned int) >= sizeof(mask));
2060 return ffs(mask);
2061 }
2062
2063 static bool
2064 dst_is_duplicate(const struct dst *dsts, size_t n_dsts,
2065 const struct dst *test)
2066 {
2067 size_t i;
2068 for (i = 0; i < n_dsts; i++) {
2069 if (dsts[i].vlan == test->vlan && dsts[i].dp_ifidx == test->dp_ifidx) {
2070 return true;
2071 }
2072 }
2073 return false;
2074 }
2075
2076 static bool
2077 port_trunks_vlan(const struct port *port, uint16_t vlan)
2078 {
2079 return port->vlan < 0 && bitmap_is_set(port->trunks, vlan);
2080 }
2081
2082 static bool
2083 port_includes_vlan(const struct port *port, uint16_t vlan)
2084 {
2085 return vlan == port->vlan || port_trunks_vlan(port, vlan);
2086 }
2087
2088 static size_t
2089 compose_dsts(const struct bridge *br, const flow_t *flow, uint16_t vlan,
2090 const struct port *in_port, const struct port *out_port,
2091 struct dst dsts[], tag_type *tags, uint16_t *nf_output_iface)
2092 {
2093 mirror_mask_t mirrors = in_port->src_mirrors;
2094 struct dst *dst = dsts;
2095 size_t i;
2096
2097 if (out_port == FLOOD_PORT) {
2098 /* XXX use ODP_FLOOD if no vlans or bonding. */
2099 /* XXX even better, define each VLAN as a datapath port group */
2100 for (i = 0; i < br->n_ports; i++) {
2101 struct port *port = br->ports[i];
2102 if (port != in_port && port_includes_vlan(port, vlan)
2103 && !port->is_mirror_output_port
2104 && set_dst(dst, flow, in_port, port, tags)) {
2105 mirrors |= port->dst_mirrors;
2106 dst++;
2107 }
2108 }
2109 *nf_output_iface = NF_OUT_FLOOD;
2110 } else if (out_port && set_dst(dst, flow, in_port, out_port, tags)) {
2111 *nf_output_iface = dst->dp_ifidx;
2112 mirrors |= out_port->dst_mirrors;
2113 dst++;
2114 }
2115
2116 while (mirrors) {
2117 struct mirror *m = br->mirrors[mirror_mask_ffs(mirrors) - 1];
2118 if (!m->n_vlans || vlan_is_mirrored(m, vlan)) {
2119 if (m->out_port) {
2120 if (set_dst(dst, flow, in_port, m->out_port, tags)
2121 && !dst_is_duplicate(dsts, dst - dsts, dst)) {
2122 dst++;
2123 }
2124 } else {
2125 for (i = 0; i < br->n_ports; i++) {
2126 struct port *port = br->ports[i];
2127 if (port_includes_vlan(port, m->out_vlan)
2128 && set_dst(dst, flow, in_port, port, tags))
2129 {
2130 int flow_vlan;
2131
2132 if (port->vlan < 0) {
2133 dst->vlan = m->out_vlan;
2134 }
2135 if (dst_is_duplicate(dsts, dst - dsts, dst)) {
2136 continue;
2137 }
2138
2139 /* Use the vlan tag on the original flow instead of
2140 * the one passed in the vlan parameter. This ensures
2141 * that we compare the vlan from before any implicit
2142 * tagging tags place. This is necessary because
2143 * dst->vlan is the final vlan, after removing implicit
2144 * tags. */
2145 flow_vlan = ntohs(flow->dl_vlan);
2146 if (flow_vlan == 0) {
2147 flow_vlan = OFP_VLAN_NONE;
2148 }
2149 if (port == in_port && dst->vlan == flow_vlan) {
2150 /* Don't send out input port on same VLAN. */
2151 continue;
2152 }
2153 dst++;
2154 }
2155 }
2156 }
2157 }
2158 mirrors &= mirrors - 1;
2159 }
2160
2161 partition_dsts(dsts, dst - dsts, ntohs(flow->dl_vlan));
2162 return dst - dsts;
2163 }
2164
2165 static void OVS_UNUSED
2166 print_dsts(const struct dst *dsts, size_t n)
2167 {
2168 for (; n--; dsts++) {
2169 printf(">p%"PRIu16, dsts->dp_ifidx);
2170 if (dsts->vlan != OFP_VLAN_NONE) {
2171 printf("v%"PRIu16, dsts->vlan);
2172 }
2173 }
2174 }
2175
2176 static void
2177 compose_actions(struct bridge *br, const flow_t *flow, uint16_t vlan,
2178 const struct port *in_port, const struct port *out_port,
2179 tag_type *tags, struct odp_actions *actions,
2180 uint16_t *nf_output_iface)
2181 {
2182 struct dst dsts[DP_MAX_PORTS * (MAX_MIRRORS + 1)];
2183 size_t n_dsts;
2184 const struct dst *p;
2185 uint16_t cur_vlan;
2186
2187 n_dsts = compose_dsts(br, flow, vlan, in_port, out_port, dsts, tags,
2188 nf_output_iface);
2189
2190 cur_vlan = ntohs(flow->dl_vlan);
2191 for (p = dsts; p < &dsts[n_dsts]; p++) {
2192 union odp_action *a;
2193 if (p->vlan != cur_vlan) {
2194 if (p->vlan == OFP_VLAN_NONE) {
2195 odp_actions_add(actions, ODPAT_STRIP_VLAN);
2196 } else {
2197 a = odp_actions_add(actions, ODPAT_SET_VLAN_VID);
2198 a->vlan_vid.vlan_vid = htons(p->vlan);
2199 }
2200 cur_vlan = p->vlan;
2201 }
2202 a = odp_actions_add(actions, ODPAT_OUTPUT);
2203 a->output.port = p->dp_ifidx;
2204 }
2205 }
2206
2207 /* Returns the effective vlan of a packet, taking into account both the
2208 * 802.1Q header and implicitly tagged ports. A value of 0 indicates that
2209 * the packet is untagged and -1 indicates it has an invalid header and
2210 * should be dropped. */
2211 static int flow_get_vlan(struct bridge *br, const flow_t *flow,
2212 struct port *in_port, bool have_packet)
2213 {
2214 /* Note that dl_vlan of 0 and of OFP_VLAN_NONE both mean that the packet
2215 * belongs to VLAN 0, so we should treat both cases identically. (In the
2216 * former case, the packet has an 802.1Q header that specifies VLAN 0,
2217 * presumably to allow a priority to be specified. In the latter case, the
2218 * packet does not have any 802.1Q header.) */
2219 int vlan = ntohs(flow->dl_vlan);
2220 if (vlan == OFP_VLAN_NONE) {
2221 vlan = 0;
2222 }
2223 if (in_port->vlan >= 0) {
2224 if (vlan) {
2225 /* XXX support double tagging? */
2226 if (have_packet) {
2227 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2228 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %"PRIu16" tagged "
2229 "packet received on port %s configured with "
2230 "implicit VLAN %"PRIu16,
2231 br->name, ntohs(flow->dl_vlan),
2232 in_port->name, in_port->vlan);
2233 }
2234 return -1;
2235 }
2236 vlan = in_port->vlan;
2237 } else {
2238 if (!port_includes_vlan(in_port, vlan)) {
2239 if (have_packet) {
2240 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2241 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %d tagged "
2242 "packet received on port %s not configured for "
2243 "trunking VLAN %d",
2244 br->name, vlan, in_port->name, vlan);
2245 }
2246 return -1;
2247 }
2248 }
2249
2250 return vlan;
2251 }
2252
2253 static void
2254 update_learning_table(struct bridge *br, const flow_t *flow, int vlan,
2255 struct port *in_port)
2256 {
2257 tag_type rev_tag = mac_learning_learn(br->ml, flow->dl_src,
2258 vlan, in_port->port_idx);
2259 if (rev_tag) {
2260 /* The log messages here could actually be useful in debugging,
2261 * so keep the rate limit relatively high. */
2262 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30,
2263 300);
2264 VLOG_DBG_RL(&rl, "bridge %s: learned that "ETH_ADDR_FMT" is "
2265 "on port %s in VLAN %d",
2266 br->name, ETH_ADDR_ARGS(flow->dl_src),
2267 in_port->name, vlan);
2268 ofproto_revalidate(br->ofproto, rev_tag);
2269 }
2270 }
2271
2272 static bool
2273 is_bcast_arp_reply(const flow_t *flow)
2274 {
2275 return (flow->dl_type == htons(ETH_TYPE_ARP)
2276 && flow->nw_proto == ARP_OP_REPLY
2277 && eth_addr_is_broadcast(flow->dl_dst));
2278 }
2279
2280 /* Determines whether packets in 'flow' within 'br' should be forwarded or
2281 * dropped. Returns true if they may be forwarded, false if they should be
2282 * dropped.
2283 *
2284 * If 'have_packet' is true, it indicates that the caller is processing a
2285 * received packet. If 'have_packet' is false, then the caller is just
2286 * revalidating an existing flow because configuration has changed. Either
2287 * way, 'have_packet' only affects logging (there is no point in logging errors
2288 * during revalidation).
2289 *
2290 * Sets '*in_portp' to the input port. This will be a null pointer if
2291 * flow->in_port does not designate a known input port (in which case
2292 * is_admissible() returns false).
2293 *
2294 * When returning true, sets '*vlanp' to the effective VLAN of the input
2295 * packet, as returned by flow_get_vlan().
2296 *
2297 * May also add tags to '*tags', although the current implementation only does
2298 * so in one special case.
2299 */
2300 static bool
2301 is_admissible(struct bridge *br, const flow_t *flow, bool have_packet,
2302 tag_type *tags, int *vlanp, struct port **in_portp)
2303 {
2304 struct iface *in_iface;
2305 struct port *in_port;
2306 int vlan;
2307
2308 /* Find the interface and port structure for the received packet. */
2309 in_iface = iface_from_dp_ifidx(br, flow->in_port);
2310 if (!in_iface) {
2311 /* No interface? Something fishy... */
2312 if (have_packet) {
2313 /* Odd. A few possible reasons here:
2314 *
2315 * - We deleted an interface but there are still a few packets
2316 * queued up from it.
2317 *
2318 * - Someone externally added an interface (e.g. with "ovs-dpctl
2319 * add-if") that we don't know about.
2320 *
2321 * - Packet arrived on the local port but the local port is not
2322 * one of our bridge ports.
2323 */
2324 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2325
2326 VLOG_WARN_RL(&rl, "bridge %s: received packet on unknown "
2327 "interface %"PRIu16, br->name, flow->in_port);
2328 }
2329
2330 *in_portp = NULL;
2331 return false;
2332 }
2333 *in_portp = in_port = in_iface->port;
2334 *vlanp = vlan = flow_get_vlan(br, flow, in_port, have_packet);
2335 if (vlan < 0) {
2336 return false;
2337 }
2338
2339 /* Drop frames for reserved multicast addresses. */
2340 if (eth_addr_is_reserved(flow->dl_dst)) {
2341 return false;
2342 }
2343
2344 /* Drop frames on ports reserved for mirroring. */
2345 if (in_port->is_mirror_output_port) {
2346 if (have_packet) {
2347 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2348 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
2349 "%s, which is reserved exclusively for mirroring",
2350 br->name, in_port->name);
2351 }
2352 return false;
2353 }
2354
2355 /* Packets received on bonds need special attention to avoid duplicates. */
2356 if (in_port->n_ifaces > 1) {
2357 int src_idx;
2358
2359 if (eth_addr_is_multicast(flow->dl_dst)) {
2360 *tags |= in_port->active_iface_tag;
2361 if (in_port->active_iface != in_iface->port_ifidx) {
2362 /* Drop all multicast packets on inactive slaves. */
2363 return false;
2364 }
2365 }
2366
2367 /* Drop all packets for which we have learned a different input
2368 * port, because we probably sent the packet on one slave and got
2369 * it back on the other. Broadcast ARP replies are an exception
2370 * to this rule: the host has moved to another switch. */
2371 src_idx = mac_learning_lookup(br->ml, flow->dl_src, vlan);
2372 if (src_idx != -1 && src_idx != in_port->port_idx &&
2373 !is_bcast_arp_reply(flow)) {
2374 return false;
2375 }
2376 }
2377
2378 return true;
2379 }
2380
2381 /* If the composed actions may be applied to any packet in the given 'flow',
2382 * returns true. Otherwise, the actions should only be applied to 'packet', or
2383 * not at all, if 'packet' was NULL. */
2384 static bool
2385 process_flow(struct bridge *br, const flow_t *flow,
2386 const struct ofpbuf *packet, struct odp_actions *actions,
2387 tag_type *tags, uint16_t *nf_output_iface)
2388 {
2389 struct port *in_port;
2390 struct port *out_port;
2391 int vlan;
2392 int out_port_idx;
2393
2394 /* Check whether we should drop packets in this flow. */
2395 if (!is_admissible(br, flow, packet != NULL, tags, &vlan, &in_port)) {
2396 out_port = NULL;
2397 goto done;
2398 }
2399
2400 /* Learn source MAC (but don't try to learn from revalidation). */
2401 if (packet) {
2402 update_learning_table(br, flow, vlan, in_port);
2403 }
2404
2405 /* Determine output port. */
2406 out_port_idx = mac_learning_lookup_tag(br->ml, flow->dl_dst, vlan, tags);
2407 if (out_port_idx >= 0 && out_port_idx < br->n_ports) {
2408 out_port = br->ports[out_port_idx];
2409 } else if (!packet && !eth_addr_is_multicast(flow->dl_dst)) {
2410 /* If we are revalidating but don't have a learning entry then
2411 * eject the flow. Installing a flow that floods packets opens
2412 * up a window of time where we could learn from a packet reflected
2413 * on a bond and blackhole packets before the learning table is
2414 * updated to reflect the correct port. */
2415 return false;
2416 } else {
2417 out_port = FLOOD_PORT;
2418 }
2419
2420 /* Don't send packets out their input ports. */
2421 if (in_port == out_port) {
2422 out_port = NULL;
2423 }
2424
2425 done:
2426 if (in_port) {
2427 compose_actions(br, flow, vlan, in_port, out_port, tags, actions,
2428 nf_output_iface);
2429 }
2430
2431 return true;
2432 }
2433
2434 /* Careful: 'opp' is in host byte order and opp->port_no is an OFP port
2435 * number. */
2436 static void
2437 bridge_port_changed_ofhook_cb(enum ofp_port_reason reason,
2438 const struct ofp_phy_port *opp,
2439 void *br_)
2440 {
2441 struct bridge *br = br_;
2442 struct iface *iface;
2443 struct port *port;
2444
2445 iface = iface_from_dp_ifidx(br, ofp_port_to_odp_port(opp->port_no));
2446 if (!iface) {
2447 return;
2448 }
2449 port = iface->port;
2450
2451 if (reason == OFPPR_DELETE) {
2452 VLOG_WARN("bridge %s: interface %s deleted unexpectedly",
2453 br->name, iface->name);
2454 iface_destroy(iface);
2455 if (!port->n_ifaces) {
2456 VLOG_WARN("bridge %s: port %s has no interfaces, dropping",
2457 br->name, port->name);
2458 port_destroy(port);
2459 }
2460
2461 bridge_flush(br);
2462 } else {
2463 if (port->n_ifaces > 1) {
2464 bool up = !(opp->state & OFPPS_LINK_DOWN);
2465 bond_link_status_update(iface, up);
2466 port_update_bond_compat(port);
2467 }
2468 }
2469 }
2470
2471 static bool
2472 bridge_normal_ofhook_cb(const flow_t *flow, const struct ofpbuf *packet,
2473 struct odp_actions *actions, tag_type *tags,
2474 uint16_t *nf_output_iface, void *br_)
2475 {
2476 struct bridge *br = br_;
2477
2478 COVERAGE_INC(bridge_process_flow);
2479 return process_flow(br, flow, packet, actions, tags, nf_output_iface);
2480 }
2481
2482 static void
2483 bridge_account_flow_ofhook_cb(const flow_t *flow,
2484 const union odp_action *actions,
2485 size_t n_actions, unsigned long long int n_bytes,
2486 void *br_)
2487 {
2488 struct bridge *br = br_;
2489 const union odp_action *a;
2490 struct port *in_port;
2491 tag_type tags = 0;
2492 int vlan;
2493
2494 /* Feed information from the active flows back into the learning table
2495 * to ensure that table is always in sync with what is actually flowing
2496 * through the datapath. */
2497 if (is_admissible(br, flow, false, &tags, &vlan, &in_port)) {
2498 update_learning_table(br, flow, vlan, in_port);
2499 }
2500
2501 if (!br->has_bonded_ports) {
2502 return;
2503 }
2504
2505 for (a = actions; a < &actions[n_actions]; a++) {
2506 if (a->type == ODPAT_OUTPUT) {
2507 struct port *out_port = port_from_dp_ifidx(br, a->output.port);
2508 if (out_port && out_port->n_ifaces >= 2) {
2509 struct bond_entry *e = lookup_bond_entry(out_port,
2510 flow->dl_src);
2511 e->tx_bytes += n_bytes;
2512 }
2513 }
2514 }
2515 }
2516
2517 static void
2518 bridge_account_checkpoint_ofhook_cb(void *br_)
2519 {
2520 struct bridge *br = br_;
2521 long long int now;
2522 size_t i;
2523
2524 if (!br->has_bonded_ports) {
2525 return;
2526 }
2527
2528 now = time_msec();
2529 for (i = 0; i < br->n_ports; i++) {
2530 struct port *port = br->ports[i];
2531 if (port->n_ifaces > 1 && now >= port->bond_next_rebalance) {
2532 port->bond_next_rebalance = now + port->bond_rebalance_interval;
2533 bond_rebalance_port(port);
2534 }
2535 }
2536 }
2537
2538 static struct ofhooks bridge_ofhooks = {
2539 bridge_port_changed_ofhook_cb,
2540 bridge_normal_ofhook_cb,
2541 bridge_account_flow_ofhook_cb,
2542 bridge_account_checkpoint_ofhook_cb,
2543 };
2544 \f
2545 /* Bonding functions. */
2546
2547 /* Statistics for a single interface on a bonded port, used for load-based
2548 * bond rebalancing. */
2549 struct slave_balance {
2550 struct iface *iface; /* The interface. */
2551 uint64_t tx_bytes; /* Sum of hashes[*]->tx_bytes. */
2552
2553 /* All the "bond_entry"s that are assigned to this interface, in order of
2554 * increasing tx_bytes. */
2555 struct bond_entry **hashes;
2556 size_t n_hashes;
2557 };
2558
2559 /* Sorts pointers to pointers to bond_entries in ascending order by the
2560 * interface to which they are assigned, and within a single interface in
2561 * ascending order of bytes transmitted. */
2562 static int
2563 compare_bond_entries(const void *a_, const void *b_)
2564 {
2565 const struct bond_entry *const *ap = a_;
2566 const struct bond_entry *const *bp = b_;
2567 const struct bond_entry *a = *ap;
2568 const struct bond_entry *b = *bp;
2569 if (a->iface_idx != b->iface_idx) {
2570 return a->iface_idx > b->iface_idx ? 1 : -1;
2571 } else if (a->tx_bytes != b->tx_bytes) {
2572 return a->tx_bytes > b->tx_bytes ? 1 : -1;
2573 } else {
2574 return 0;
2575 }
2576 }
2577
2578 /* Sorts slave_balances so that enabled ports come first, and otherwise in
2579 * *descending* order by number of bytes transmitted. */
2580 static int
2581 compare_slave_balance(const void *a_, const void *b_)
2582 {
2583 const struct slave_balance *a = a_;
2584 const struct slave_balance *b = b_;
2585 if (a->iface->enabled != b->iface->enabled) {
2586 return a->iface->enabled ? -1 : 1;
2587 } else if (a->tx_bytes != b->tx_bytes) {
2588 return a->tx_bytes > b->tx_bytes ? -1 : 1;
2589 } else {
2590 return 0;
2591 }
2592 }
2593
2594 static void
2595 swap_bals(struct slave_balance *a, struct slave_balance *b)
2596 {
2597 struct slave_balance tmp = *a;
2598 *a = *b;
2599 *b = tmp;
2600 }
2601
2602 /* Restores the 'n_bals' slave_balance structures in 'bals' to sorted order
2603 * given that 'p' (and only 'p') might be in the wrong location.
2604 *
2605 * This function invalidates 'p', since it might now be in a different memory
2606 * location. */
2607 static void
2608 resort_bals(struct slave_balance *p,
2609 struct slave_balance bals[], size_t n_bals)
2610 {
2611 if (n_bals > 1) {
2612 for (; p > bals && p->tx_bytes > p[-1].tx_bytes; p--) {
2613 swap_bals(p, p - 1);
2614 }
2615 for (; p < &bals[n_bals - 1] && p->tx_bytes < p[1].tx_bytes; p++) {
2616 swap_bals(p, p + 1);
2617 }
2618 }
2619 }
2620
2621 static void
2622 log_bals(const struct slave_balance *bals, size_t n_bals, struct port *port)
2623 {
2624 if (VLOG_IS_DBG_ENABLED()) {
2625 struct ds ds = DS_EMPTY_INITIALIZER;
2626 const struct slave_balance *b;
2627
2628 for (b = bals; b < bals + n_bals; b++) {
2629 size_t i;
2630
2631 if (b > bals) {
2632 ds_put_char(&ds, ',');
2633 }
2634 ds_put_format(&ds, " %s %"PRIu64"kB",
2635 b->iface->name, b->tx_bytes / 1024);
2636
2637 if (!b->iface->enabled) {
2638 ds_put_cstr(&ds, " (disabled)");
2639 }
2640 if (b->n_hashes > 0) {
2641 ds_put_cstr(&ds, " (");
2642 for (i = 0; i < b->n_hashes; i++) {
2643 const struct bond_entry *e = b->hashes[i];
2644 if (i > 0) {
2645 ds_put_cstr(&ds, " + ");
2646 }
2647 ds_put_format(&ds, "h%td: %"PRIu64"kB",
2648 e - port->bond_hash, e->tx_bytes / 1024);
2649 }
2650 ds_put_cstr(&ds, ")");
2651 }
2652 }
2653 VLOG_DBG("bond %s:%s", port->name, ds_cstr(&ds));
2654 ds_destroy(&ds);
2655 }
2656 }
2657
2658 /* Shifts 'hash' from 'from' to 'to' within 'port'. */
2659 static void
2660 bond_shift_load(struct slave_balance *from, struct slave_balance *to,
2661 int hash_idx)
2662 {
2663 struct bond_entry *hash = from->hashes[hash_idx];
2664 struct port *port = from->iface->port;
2665 uint64_t delta = hash->tx_bytes;
2666
2667 VLOG_INFO("bond %s: shift %"PRIu64"kB of load (with hash %td) "
2668 "from %s to %s (now carrying %"PRIu64"kB and "
2669 "%"PRIu64"kB load, respectively)",
2670 port->name, delta / 1024, hash - port->bond_hash,
2671 from->iface->name, to->iface->name,
2672 (from->tx_bytes - delta) / 1024,
2673 (to->tx_bytes + delta) / 1024);
2674
2675 /* Delete element from from->hashes.
2676 *
2677 * We don't bother to add the element to to->hashes because not only would
2678 * it require more work, the only purpose it would be to allow that hash to
2679 * be migrated to another slave in this rebalancing run, and there is no
2680 * point in doing that. */
2681 if (hash_idx == 0) {
2682 from->hashes++;
2683 } else {
2684 memmove(from->hashes + hash_idx, from->hashes + hash_idx + 1,
2685 (from->n_hashes - (hash_idx + 1)) * sizeof *from->hashes);
2686 }
2687 from->n_hashes--;
2688
2689 /* Shift load away from 'from' to 'to'. */
2690 from->tx_bytes -= delta;
2691 to->tx_bytes += delta;
2692
2693 /* Arrange for flows to be revalidated. */
2694 ofproto_revalidate(port->bridge->ofproto, hash->iface_tag);
2695 hash->iface_idx = to->iface->port_ifidx;
2696 hash->iface_tag = tag_create_random();
2697 }
2698
2699 static void
2700 bond_rebalance_port(struct port *port)
2701 {
2702 struct slave_balance bals[DP_MAX_PORTS];
2703 size_t n_bals;
2704 struct bond_entry *hashes[BOND_MASK + 1];
2705 struct slave_balance *b, *from, *to;
2706 struct bond_entry *e;
2707 size_t i;
2708
2709 /* Sets up 'bals' to describe each of the port's interfaces, sorted in
2710 * descending order of tx_bytes, so that bals[0] represents the most
2711 * heavily loaded slave and bals[n_bals - 1] represents the least heavily
2712 * loaded slave.
2713 *
2714 * The code is a bit tricky: to avoid dynamically allocating a 'hashes'
2715 * array for each slave_balance structure, we sort our local array of
2716 * hashes in order by slave, so that all of the hashes for a given slave
2717 * become contiguous in memory, and then we point each 'hashes' members of
2718 * a slave_balance structure to the start of a contiguous group. */
2719 n_bals = port->n_ifaces;
2720 for (b = bals; b < &bals[n_bals]; b++) {
2721 b->iface = port->ifaces[b - bals];
2722 b->tx_bytes = 0;
2723 b->hashes = NULL;
2724 b->n_hashes = 0;
2725 }
2726 for (i = 0; i <= BOND_MASK; i++) {
2727 hashes[i] = &port->bond_hash[i];
2728 }
2729 qsort(hashes, BOND_MASK + 1, sizeof *hashes, compare_bond_entries);
2730 for (i = 0; i <= BOND_MASK; i++) {
2731 e = hashes[i];
2732 if (e->iface_idx >= 0 && e->iface_idx < port->n_ifaces) {
2733 b = &bals[e->iface_idx];
2734 b->tx_bytes += e->tx_bytes;
2735 if (!b->hashes) {
2736 b->hashes = &hashes[i];
2737 }
2738 b->n_hashes++;
2739 }
2740 }
2741 qsort(bals, n_bals, sizeof *bals, compare_slave_balance);
2742 log_bals(bals, n_bals, port);
2743
2744 /* Discard slaves that aren't enabled (which were sorted to the back of the
2745 * array earlier). */
2746 while (!bals[n_bals - 1].iface->enabled) {
2747 n_bals--;
2748 if (!n_bals) {
2749 return;
2750 }
2751 }
2752
2753 /* Shift load from the most-loaded slaves to the least-loaded slaves. */
2754 to = &bals[n_bals - 1];
2755 for (from = bals; from < to; ) {
2756 uint64_t overload = from->tx_bytes - to->tx_bytes;
2757 if (overload < to->tx_bytes >> 5 || overload < 100000) {
2758 /* The extra load on 'from' (and all less-loaded slaves), compared
2759 * to that of 'to' (the least-loaded slave), is less than ~3%, or
2760 * it is less than ~1Mbps. No point in rebalancing. */
2761 break;
2762 } else if (from->n_hashes == 1) {
2763 /* 'from' only carries a single MAC hash, so we can't shift any
2764 * load away from it, even though we want to. */
2765 from++;
2766 } else {
2767 /* 'from' is carrying significantly more load than 'to', and that
2768 * load is split across at least two different hashes. Pick a hash
2769 * to migrate to 'to' (the least-loaded slave), given that doing so
2770 * must decrease the ratio of the load on the two slaves by at
2771 * least 0.1.
2772 *
2773 * The sort order we use means that we prefer to shift away the
2774 * smallest hashes instead of the biggest ones. There is little
2775 * reason behind this decision; we could use the opposite sort
2776 * order to shift away big hashes ahead of small ones. */
2777 size_t i;
2778 bool order_swapped;
2779
2780 for (i = 0; i < from->n_hashes; i++) {
2781 double old_ratio, new_ratio;
2782 uint64_t delta = from->hashes[i]->tx_bytes;
2783
2784 if (delta == 0 || from->tx_bytes - delta == 0) {
2785 /* Pointless move. */
2786 continue;
2787 }
2788
2789 order_swapped = from->tx_bytes - delta < to->tx_bytes + delta;
2790
2791 if (to->tx_bytes == 0) {
2792 /* Nothing on the new slave, move it. */
2793 break;
2794 }
2795
2796 old_ratio = (double)from->tx_bytes / to->tx_bytes;
2797 new_ratio = (double)(from->tx_bytes - delta) /
2798 (to->tx_bytes + delta);
2799
2800 if (new_ratio == 0) {
2801 /* Should already be covered but check to prevent division
2802 * by zero. */
2803 continue;
2804 }
2805
2806 if (new_ratio < 1) {
2807 new_ratio = 1 / new_ratio;
2808 }
2809
2810 if (old_ratio - new_ratio > 0.1) {
2811 /* Would decrease the ratio, move it. */
2812 break;
2813 }
2814 }
2815 if (i < from->n_hashes) {
2816 bond_shift_load(from, to, i);
2817 port->bond_compat_is_stale = true;
2818
2819 /* If the result of the migration changed the relative order of
2820 * 'from' and 'to' swap them back to maintain invariants. */
2821 if (order_swapped) {
2822 swap_bals(from, to);
2823 }
2824
2825 /* Re-sort 'bals'. Note that this may make 'from' and 'to'
2826 * point to different slave_balance structures. It is only
2827 * valid to do these two operations in a row at all because we
2828 * know that 'from' will not move past 'to' and vice versa. */
2829 resort_bals(from, bals, n_bals);
2830 resort_bals(to, bals, n_bals);
2831 } else {
2832 from++;
2833 }
2834 }
2835 }
2836
2837 /* Implement exponentially weighted moving average. A weight of 1/2 causes
2838 * historical data to decay to <1% in 7 rebalancing runs. */
2839 for (e = &port->bond_hash[0]; e <= &port->bond_hash[BOND_MASK]; e++) {
2840 e->tx_bytes /= 2;
2841 }
2842 }
2843
2844 static void
2845 bond_send_learning_packets(struct port *port)
2846 {
2847 struct bridge *br = port->bridge;
2848 struct mac_entry *e;
2849 struct ofpbuf packet;
2850 int error, n_packets, n_errors;
2851
2852 if (!port->n_ifaces || port->active_iface < 0) {
2853 return;
2854 }
2855
2856 ofpbuf_init(&packet, 128);
2857 error = n_packets = n_errors = 0;
2858 LIST_FOR_EACH (e, struct mac_entry, lru_node, &br->ml->lrus) {
2859 union ofp_action actions[2], *a;
2860 uint16_t dp_ifidx;
2861 tag_type tags = 0;
2862 flow_t flow;
2863 int retval;
2864
2865 if (e->port == port->port_idx
2866 || !choose_output_iface(port, e->mac, &dp_ifidx, &tags)) {
2867 continue;
2868 }
2869
2870 /* Compose actions. */
2871 memset(actions, 0, sizeof actions);
2872 a = actions;
2873 if (e->vlan) {
2874 a->vlan_vid.type = htons(OFPAT_SET_VLAN_VID);
2875 a->vlan_vid.len = htons(sizeof *a);
2876 a->vlan_vid.vlan_vid = htons(e->vlan);
2877 a++;
2878 }
2879 a->output.type = htons(OFPAT_OUTPUT);
2880 a->output.len = htons(sizeof *a);
2881 a->output.port = htons(odp_port_to_ofp_port(dp_ifidx));
2882 a++;
2883
2884 /* Send packet. */
2885 n_packets++;
2886 compose_benign_packet(&packet, "Open vSwitch Bond Failover", 0xf177,
2887 e->mac);
2888 flow_extract(&packet, 0, ODPP_NONE, &flow);
2889 retval = ofproto_send_packet(br->ofproto, &flow, actions, a - actions,
2890 &packet);
2891 if (retval) {
2892 error = retval;
2893 n_errors++;
2894 }
2895 }
2896 ofpbuf_uninit(&packet);
2897
2898 if (n_errors) {
2899 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2900 VLOG_WARN_RL(&rl, "bond %s: %d errors sending %d gratuitous learning "
2901 "packets, last error was: %s",
2902 port->name, n_errors, n_packets, strerror(error));
2903 } else {
2904 VLOG_DBG("bond %s: sent %d gratuitous learning packets",
2905 port->name, n_packets);
2906 }
2907 }
2908 \f
2909 /* Bonding unixctl user interface functions. */
2910
2911 static void
2912 bond_unixctl_list(struct unixctl_conn *conn,
2913 const char *args OVS_UNUSED, void *aux OVS_UNUSED)
2914 {
2915 struct ds ds = DS_EMPTY_INITIALIZER;
2916 const struct bridge *br;
2917
2918 ds_put_cstr(&ds, "bridge\tbond\tslaves\n");
2919
2920 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
2921 size_t i;
2922
2923 for (i = 0; i < br->n_ports; i++) {
2924 const struct port *port = br->ports[i];
2925 if (port->n_ifaces > 1) {
2926 size_t j;
2927
2928 ds_put_format(&ds, "%s\t%s\t", br->name, port->name);
2929 for (j = 0; j < port->n_ifaces; j++) {
2930 const struct iface *iface = port->ifaces[j];
2931 if (j) {
2932 ds_put_cstr(&ds, ", ");
2933 }
2934 ds_put_cstr(&ds, iface->name);
2935 }
2936 ds_put_char(&ds, '\n');
2937 }
2938 }
2939 }
2940 unixctl_command_reply(conn, 200, ds_cstr(&ds));
2941 ds_destroy(&ds);
2942 }
2943
2944 static struct port *
2945 bond_find(const char *name)
2946 {
2947 const struct bridge *br;
2948
2949 LIST_FOR_EACH (br, struct bridge, node, &all_bridges) {
2950 size_t i;
2951
2952 for (i = 0; i < br->n_ports; i++) {
2953 struct port *port = br->ports[i];
2954 if (!strcmp(port->name, name) && port->n_ifaces > 1) {
2955 return port;
2956 }
2957 }
2958 }
2959 return NULL;
2960 }
2961
2962 static void
2963 bond_unixctl_show(struct unixctl_conn *conn,
2964 const char *args, void *aux OVS_UNUSED)
2965 {
2966 struct ds ds = DS_EMPTY_INITIALIZER;
2967 const struct port *port;
2968 size_t j;
2969
2970 port = bond_find(args);
2971 if (!port) {
2972 unixctl_command_reply(conn, 501, "no such bond");
2973 return;
2974 }
2975
2976 ds_put_format(&ds, "updelay: %d ms\n", port->updelay);
2977 ds_put_format(&ds, "downdelay: %d ms\n", port->downdelay);
2978 ds_put_format(&ds, "next rebalance: %lld ms\n",
2979 port->bond_next_rebalance - time_msec());
2980 for (j = 0; j < port->n_ifaces; j++) {
2981 const struct iface *iface = port->ifaces[j];
2982 struct bond_entry *be;
2983
2984 /* Basic info. */
2985 ds_put_format(&ds, "slave %s: %s\n",
2986 iface->name, iface->enabled ? "enabled" : "disabled");
2987 if (j == port->active_iface) {
2988 ds_put_cstr(&ds, "\tactive slave\n");
2989 }
2990 if (iface->delay_expires != LLONG_MAX) {
2991 ds_put_format(&ds, "\t%s expires in %lld ms\n",
2992 iface->enabled ? "downdelay" : "updelay",
2993 iface->delay_expires - time_msec());
2994 }
2995
2996 /* Hashes. */
2997 for (be = port->bond_hash; be <= &port->bond_hash[BOND_MASK]; be++) {
2998 int hash = be - port->bond_hash;
2999 struct mac_entry *me;
3000
3001 if (be->iface_idx != j) {
3002 continue;
3003 }
3004
3005 ds_put_format(&ds, "\thash %d: %"PRIu64" kB load\n",
3006 hash, be->tx_bytes / 1024);
3007
3008 /* MACs. */
3009 LIST_FOR_EACH (me, struct mac_entry, lru_node,
3010 &port->bridge->ml->lrus) {
3011 uint16_t dp_ifidx;
3012 tag_type tags = 0;
3013 if (bond_hash(me->mac) == hash
3014 && me->port != port->port_idx
3015 && choose_output_iface(port, me->mac, &dp_ifidx, &tags)
3016 && dp_ifidx == iface->dp_ifidx)
3017 {
3018 ds_put_format(&ds, "\t\t"ETH_ADDR_FMT"\n",
3019 ETH_ADDR_ARGS(me->mac));
3020 }
3021 }
3022 }
3023 }
3024 unixctl_command_reply(conn, 200, ds_cstr(&ds));
3025 ds_destroy(&ds);
3026 }
3027
3028 static void
3029 bond_unixctl_migrate(struct unixctl_conn *conn, const char *args_,
3030 void *aux OVS_UNUSED)
3031 {
3032 char *args = (char *) args_;
3033 char *save_ptr = NULL;
3034 char *bond_s, *hash_s, *slave_s;
3035 uint8_t mac[ETH_ADDR_LEN];
3036 struct port *port;
3037 struct iface *iface;
3038 struct bond_entry *entry;
3039 int hash;
3040
3041 bond_s = strtok_r(args, " ", &save_ptr);
3042 hash_s = strtok_r(NULL, " ", &save_ptr);
3043 slave_s = strtok_r(NULL, " ", &save_ptr);
3044 if (!slave_s) {
3045 unixctl_command_reply(conn, 501,
3046 "usage: bond/migrate BOND HASH SLAVE");
3047 return;
3048 }
3049
3050 port = bond_find(bond_s);
3051 if (!port) {
3052 unixctl_command_reply(conn, 501, "no such bond");
3053 return;
3054 }
3055
3056 if (sscanf(hash_s, ETH_ADDR_SCAN_FMT, ETH_ADDR_SCAN_ARGS(mac))
3057 == ETH_ADDR_SCAN_COUNT) {
3058 hash = bond_hash(mac);
3059 } else if (strspn(hash_s, "0123456789") == strlen(hash_s)) {
3060 hash = atoi(hash_s) & BOND_MASK;
3061 } else {
3062 unixctl_command_reply(conn, 501, "bad hash");
3063 return;
3064 }
3065
3066 iface = port_lookup_iface(port, slave_s);
3067 if (!iface) {
3068 unixctl_command_reply(conn, 501, "no such slave");
3069 return;
3070 }
3071
3072 if (!iface->enabled) {
3073 unixctl_command_reply(conn, 501, "cannot migrate to disabled slave");
3074 return;
3075 }
3076
3077 entry = &port->bond_hash[hash];
3078 ofproto_revalidate(port->bridge->ofproto, entry->iface_tag);
3079 entry->iface_idx = iface->port_ifidx;
3080 entry->iface_tag = tag_create_random();
3081 port->bond_compat_is_stale = true;
3082 unixctl_command_reply(conn, 200, "migrated");
3083 }
3084
3085 static void
3086 bond_unixctl_set_active_slave(struct unixctl_conn *conn, const char *args_,
3087 void *aux OVS_UNUSED)
3088 {
3089 char *args = (char *) args_;
3090 char *save_ptr = NULL;
3091 char *bond_s, *slave_s;
3092 struct port *port;
3093 struct iface *iface;
3094
3095 bond_s = strtok_r(args, " ", &save_ptr);
3096 slave_s = strtok_r(NULL, " ", &save_ptr);
3097 if (!slave_s) {
3098 unixctl_command_reply(conn, 501,
3099 "usage: bond/set-active-slave BOND SLAVE");
3100 return;
3101 }
3102
3103 port = bond_find(bond_s);
3104 if (!port) {
3105 unixctl_command_reply(conn, 501, "no such bond");
3106 return;
3107 }
3108
3109 iface = port_lookup_iface(port, slave_s);
3110 if (!iface) {
3111 unixctl_command_reply(conn, 501, "no such slave");
3112 return;
3113 }
3114
3115 if (!iface->enabled) {
3116 unixctl_command_reply(conn, 501, "cannot make disabled slave active");
3117 return;
3118 }
3119
3120 if (port->active_iface != iface->port_ifidx) {
3121 ofproto_revalidate(port->bridge->ofproto, port->active_iface_tag);
3122 port->active_iface = iface->port_ifidx;
3123 port->active_iface_tag = tag_create_random();
3124 VLOG_INFO("port %s: active interface is now %s",
3125 port->name, iface->name);
3126 bond_send_learning_packets(port);
3127 unixctl_command_reply(conn, 200, "done");
3128 } else {
3129 unixctl_command_reply(conn, 200, "no change");
3130 }
3131 }
3132
3133 static void
3134 enable_slave(struct unixctl_conn *conn, const char *args_, bool enable)
3135 {
3136 char *args = (char *) args_;
3137 char *save_ptr = NULL;
3138 char *bond_s, *slave_s;
3139 struct port *port;
3140 struct iface *iface;
3141
3142 bond_s = strtok_r(args, " ", &save_ptr);
3143 slave_s = strtok_r(NULL, " ", &save_ptr);
3144 if (!slave_s) {
3145 unixctl_command_reply(conn, 501,
3146 "usage: bond/enable/disable-slave BOND SLAVE");
3147 return;
3148 }
3149
3150 port = bond_find(bond_s);
3151 if (!port) {
3152 unixctl_command_reply(conn, 501, "no such bond");
3153 return;
3154 }
3155
3156 iface = port_lookup_iface(port, slave_s);
3157 if (!iface) {
3158 unixctl_command_reply(conn, 501, "no such slave");
3159 return;
3160 }
3161
3162 bond_enable_slave(iface, enable);
3163 unixctl_command_reply(conn, 501, enable ? "enabled" : "disabled");
3164 }
3165
3166 static void
3167 bond_unixctl_enable_slave(struct unixctl_conn *conn, const char *args,
3168 void *aux OVS_UNUSED)
3169 {
3170 enable_slave(conn, args, true);
3171 }
3172
3173 static void
3174 bond_unixctl_disable_slave(struct unixctl_conn *conn, const char *args,
3175 void *aux OVS_UNUSED)
3176 {
3177 enable_slave(conn, args, false);
3178 }
3179
3180 static void
3181 bond_unixctl_hash(struct unixctl_conn *conn, const char *args,
3182 void *aux OVS_UNUSED)
3183 {
3184 uint8_t mac[ETH_ADDR_LEN];
3185 uint8_t hash;
3186 char *hash_cstr;
3187
3188 if (sscanf(args, ETH_ADDR_SCAN_FMT, ETH_ADDR_SCAN_ARGS(mac))
3189 == ETH_ADDR_SCAN_COUNT) {
3190 hash = bond_hash(mac);
3191
3192 hash_cstr = xasprintf("%u", hash);
3193 unixctl_command_reply(conn, 200, hash_cstr);
3194 free(hash_cstr);
3195 } else {
3196 unixctl_command_reply(conn, 501, "invalid mac");
3197 }
3198 }
3199
3200 static void
3201 bond_init(void)
3202 {
3203 unixctl_command_register("bond/list", bond_unixctl_list, NULL);
3204 unixctl_command_register("bond/show", bond_unixctl_show, NULL);
3205 unixctl_command_register("bond/migrate", bond_unixctl_migrate, NULL);
3206 unixctl_command_register("bond/set-active-slave",
3207 bond_unixctl_set_active_slave, NULL);
3208 unixctl_command_register("bond/enable-slave", bond_unixctl_enable_slave,
3209 NULL);
3210 unixctl_command_register("bond/disable-slave", bond_unixctl_disable_slave,
3211 NULL);
3212 unixctl_command_register("bond/hash", bond_unixctl_hash, NULL);
3213 }
3214 \f
3215 /* Port functions. */
3216
3217 static struct port *
3218 port_create(struct bridge *br, const char *name)
3219 {
3220 struct port *port;
3221
3222 port = xzalloc(sizeof *port);
3223 port->bridge = br;
3224 port->port_idx = br->n_ports;
3225 port->vlan = -1;
3226 port->trunks = NULL;
3227 port->name = xstrdup(name);
3228 port->active_iface = -1;
3229
3230 if (br->n_ports >= br->allocated_ports) {
3231 br->ports = x2nrealloc(br->ports, &br->allocated_ports,
3232 sizeof *br->ports);
3233 }
3234 br->ports[br->n_ports++] = port;
3235
3236 VLOG_INFO("created port %s on bridge %s", port->name, br->name);
3237 bridge_flush(br);
3238
3239 return port;
3240 }
3241
3242 static const char *
3243 get_port_other_config(const struct ovsrec_port *port, const char *key,
3244 const char *default_value)
3245 {
3246 const char *value = get_ovsrec_key_value(key,
3247 port->key_other_config,
3248 port->value_other_config,
3249 port->n_other_config);
3250 return value ? value : default_value;
3251 }
3252
3253 static void
3254 port_reconfigure(struct port *port, const struct ovsrec_port *cfg)
3255 {
3256 struct shash old_ifaces, new_ifaces;
3257 long long int next_rebalance;
3258 struct shash_node *node;
3259 unsigned long *trunks;
3260 int vlan;
3261 size_t i;
3262
3263 port->cfg = cfg;
3264
3265 /* Collect old and new interfaces. */
3266 shash_init(&old_ifaces);
3267 shash_init(&new_ifaces);
3268 for (i = 0; i < port->n_ifaces; i++) {
3269 shash_add(&old_ifaces, port->ifaces[i]->name, port->ifaces[i]);
3270 }
3271 for (i = 0; i < cfg->n_interfaces; i++) {
3272 const char *name = cfg->interfaces[i]->name;
3273 if (!shash_add_once(&new_ifaces, name, cfg->interfaces[i])) {
3274 VLOG_WARN("port %s: %s specified twice as port interface",
3275 port->name, name);
3276 }
3277 }
3278 port->updelay = cfg->bond_updelay;
3279 if (port->updelay < 0) {
3280 port->updelay = 0;
3281 }
3282 port->updelay = cfg->bond_downdelay;
3283 if (port->downdelay < 0) {
3284 port->downdelay = 0;
3285 }
3286 port->bond_rebalance_interval = atoi(
3287 get_port_other_config(cfg, "bond-rebalance-interval", "10000"));
3288 if (port->bond_rebalance_interval < 1000) {
3289 port->bond_rebalance_interval = 1000;
3290 }
3291 next_rebalance = time_msec() + port->bond_rebalance_interval;
3292 if (port->bond_next_rebalance > next_rebalance) {
3293 port->bond_next_rebalance = next_rebalance;
3294 }
3295
3296 /* Get rid of deleted interfaces and add new interfaces. */
3297 SHASH_FOR_EACH (node, &old_ifaces) {
3298 if (!shash_find(&new_ifaces, node->name)) {
3299 iface_destroy(node->data);
3300 }
3301 }
3302 SHASH_FOR_EACH (node, &new_ifaces) {
3303 const struct ovsrec_interface *if_cfg = node->data;
3304 struct iface *iface;
3305
3306 iface = shash_find_data(&old_ifaces, if_cfg->name);
3307 if (!iface) {
3308 iface_create(port, if_cfg);
3309 } else {
3310 iface->cfg = if_cfg;
3311 }
3312 }
3313
3314 /* Get VLAN tag. */
3315 vlan = -1;
3316 if (cfg->tag) {
3317 if (port->n_ifaces < 2) {
3318 vlan = *cfg->tag;
3319 if (vlan >= 0 && vlan <= 4095) {
3320 VLOG_DBG("port %s: assigning VLAN tag %d", port->name, vlan);
3321 } else {
3322 vlan = -1;
3323 }
3324 } else {
3325 /* It's possible that bonded, VLAN-tagged ports make sense. Maybe
3326 * they even work as-is. But they have not been tested. */
3327 VLOG_WARN("port %s: VLAN tags not supported on bonded ports",
3328 port->name);
3329 }
3330 }
3331 if (port->vlan != vlan) {
3332 port->vlan = vlan;
3333 bridge_flush(port->bridge);
3334 }
3335
3336 /* Get trunked VLANs. */
3337 trunks = NULL;
3338 if (vlan < 0) {
3339 size_t n_errors;
3340 size_t i;
3341
3342 trunks = bitmap_allocate(4096);
3343 n_errors = 0;
3344 for (i = 0; i < cfg->n_trunks; i++) {
3345 int trunk = cfg->trunks[i];
3346 if (trunk >= 0) {
3347 bitmap_set1(trunks, trunk);
3348 } else {
3349 n_errors++;
3350 }
3351 }
3352 if (n_errors) {
3353 VLOG_ERR("port %s: invalid values for %zu trunk VLANs",
3354 port->name, cfg->n_trunks);
3355 }
3356 if (n_errors == cfg->n_trunks) {
3357 if (n_errors) {
3358 VLOG_ERR("port %s: no valid trunks, trunking all VLANs",
3359 port->name);
3360 }
3361 bitmap_set_multiple(trunks, 0, 4096, 1);
3362 }
3363 } else {
3364 if (cfg->n_trunks) {
3365 VLOG_ERR("port %s: ignoring trunks in favor of implicit vlan",
3366 port->name);
3367 }
3368 }
3369 if (trunks == NULL
3370 ? port->trunks != NULL
3371 : port->trunks == NULL || !bitmap_equal(trunks, port->trunks, 4096)) {
3372 bridge_flush(port->bridge);
3373 }
3374 bitmap_free(port->trunks);
3375 port->trunks = trunks;
3376
3377 shash_destroy(&old_ifaces);
3378 shash_destroy(&new_ifaces);
3379 }
3380
3381 static void
3382 port_destroy(struct port *port)
3383 {
3384 if (port) {
3385 struct bridge *br = port->bridge;
3386 struct port *del;
3387 int i;
3388
3389 proc_net_compat_update_vlan(port->name, NULL, 0);
3390 proc_net_compat_update_bond(port->name, NULL);
3391
3392 for (i = 0; i < MAX_MIRRORS; i++) {
3393 struct mirror *m = br->mirrors[i];
3394 if (m && m->out_port == port) {
3395 mirror_destroy(m);
3396 }
3397 }
3398
3399 while (port->n_ifaces > 0) {
3400 iface_destroy(port->ifaces[port->n_ifaces - 1]);
3401 }
3402
3403 del = br->ports[port->port_idx] = br->ports[--br->n_ports];
3404 del->port_idx = port->port_idx;
3405
3406 free(port->ifaces);
3407 bitmap_free(port->trunks);
3408 free(port->name);
3409 free(port);
3410 bridge_flush(br);
3411 }
3412 }
3413
3414 static struct port *
3415 port_from_dp_ifidx(const struct bridge *br, uint16_t dp_ifidx)
3416 {
3417 struct iface *iface = iface_from_dp_ifidx(br, dp_ifidx);
3418 return iface ? iface->port : NULL;
3419 }
3420
3421 static struct port *
3422 port_lookup(const struct bridge *br, const char *name)
3423 {
3424 size_t i;
3425
3426 for (i = 0; i < br->n_ports; i++) {
3427 struct port *port = br->ports[i];
3428 if (!strcmp(port->name, name)) {
3429 return port;
3430 }
3431 }
3432 return NULL;
3433 }
3434
3435 static struct iface *
3436 port_lookup_iface(const struct port *port, const char *name)
3437 {
3438 size_t j;
3439
3440 for (j = 0; j < port->n_ifaces; j++) {
3441 struct iface *iface = port->ifaces[j];
3442 if (!strcmp(iface->name, name)) {
3443 return iface;
3444 }
3445 }
3446 return NULL;
3447 }
3448
3449 static void
3450 port_update_bonding(struct port *port)
3451 {
3452 if (port->n_ifaces < 2) {
3453 /* Not a bonded port. */
3454 if (port->bond_hash) {
3455 free(port->bond_hash);
3456 port->bond_hash = NULL;
3457 port->bond_compat_is_stale = true;
3458 port->bond_fake_iface = false;
3459 }
3460 } else {
3461 if (!port->bond_hash) {
3462 size_t i;
3463
3464 port->bond_hash = xcalloc(BOND_MASK + 1, sizeof *port->bond_hash);
3465 for (i = 0; i <= BOND_MASK; i++) {
3466 struct bond_entry *e = &port->bond_hash[i];
3467 e->iface_idx = -1;
3468 e->tx_bytes = 0;
3469 }
3470 port->no_ifaces_tag = tag_create_random();
3471 bond_choose_active_iface(port);
3472 port->bond_next_rebalance
3473 = time_msec() + port->bond_rebalance_interval;
3474
3475 if (port->cfg->bond_fake_iface) {
3476 port->bond_next_fake_iface_update = time_msec();
3477 }
3478 }
3479 port->bond_compat_is_stale = true;
3480 port->bond_fake_iface = port->cfg->bond_fake_iface;
3481 }
3482 }
3483
3484 static void
3485 port_update_bond_compat(struct port *port)
3486 {
3487 struct compat_bond_hash compat_hashes[BOND_MASK + 1];
3488 struct compat_bond bond;
3489 size_t i;
3490
3491 if (port->n_ifaces < 2) {
3492 proc_net_compat_update_bond(port->name, NULL);
3493 return;
3494 }
3495
3496 bond.up = false;
3497 bond.updelay = port->updelay;
3498 bond.downdelay = port->downdelay;
3499
3500 bond.n_hashes = 0;
3501 bond.hashes = compat_hashes;
3502 if (port->bond_hash) {
3503 const struct bond_entry *e;
3504 for (e = port->bond_hash; e <= &port->bond_hash[BOND_MASK]; e++) {
3505 if (e->iface_idx >= 0 && e->iface_idx < port->n_ifaces) {
3506 struct compat_bond_hash *cbh = &bond.hashes[bond.n_hashes++];
3507 cbh->hash = e - port->bond_hash;
3508 cbh->netdev_name = port->ifaces[e->iface_idx]->name;
3509 }
3510 }
3511 }
3512
3513 bond.n_slaves = port->n_ifaces;
3514 bond.slaves = xmalloc(port->n_ifaces * sizeof *bond.slaves);
3515 for (i = 0; i < port->n_ifaces; i++) {
3516 struct iface *iface = port->ifaces[i];
3517 struct compat_bond_slave *slave = &bond.slaves[i];
3518 slave->name = iface->name;
3519
3520 /* We need to make the same determination as the Linux bonding
3521 * code to determine whether a slave should be consider "up".
3522 * The Linux function bond_miimon_inspect() supports four
3523 * BOND_LINK_* states:
3524 *
3525 * - BOND_LINK_UP: carrier detected, updelay has passed.
3526 * - BOND_LINK_FAIL: carrier lost, downdelay in progress.
3527 * - BOND_LINK_DOWN: carrier lost, downdelay has passed.
3528 * - BOND_LINK_BACK: carrier detected, updelay in progress.
3529 *
3530 * The function bond_info_show_slave() only considers BOND_LINK_UP
3531 * to be "up" and anything else to be "down".
3532 */
3533 slave->up = iface->enabled && iface->delay_expires == LLONG_MAX;
3534 if (slave->up) {
3535 bond.up = true;
3536 }
3537 netdev_get_etheraddr(iface->netdev, slave->mac);
3538 }
3539
3540 if (port->bond_fake_iface) {
3541 struct netdev *bond_netdev;
3542
3543 if (!netdev_open_default(port->name, &bond_netdev)) {
3544 if (bond.up) {
3545 netdev_turn_flags_on(bond_netdev, NETDEV_UP, true);
3546 } else {
3547 netdev_turn_flags_off(bond_netdev, NETDEV_UP, true);
3548 }
3549 netdev_close(bond_netdev);
3550 }
3551 }
3552
3553 proc_net_compat_update_bond(port->name, &bond);
3554 free(bond.slaves);
3555 }
3556
3557 static void
3558 port_update_vlan_compat(struct port *port)
3559 {
3560 struct bridge *br = port->bridge;
3561 char *vlandev_name = NULL;
3562
3563 if (port->vlan > 0) {
3564 /* Figure out the name that the VLAN device should actually have, if it
3565 * existed. This takes some work because the VLAN device would not
3566 * have port->name in its name; rather, it would have the trunk port's
3567 * name, and 'port' would be attached to a bridge that also had the
3568 * VLAN device one of its ports. So we need to find a trunk port that
3569 * includes port->vlan.
3570 *
3571 * There might be more than one candidate. This doesn't happen on
3572 * XenServer, so if it happens we just pick the first choice in
3573 * alphabetical order instead of creating multiple VLAN devices. */
3574 size_t i;
3575 for (i = 0; i < br->n_ports; i++) {
3576 struct port *p = br->ports[i];
3577 if (port_trunks_vlan(p, port->vlan)
3578 && p->n_ifaces
3579 && (!vlandev_name || strcmp(p->name, vlandev_name) <= 0))
3580 {
3581 uint8_t ea[ETH_ADDR_LEN];
3582 netdev_get_etheraddr(p->ifaces[0]->netdev, ea);
3583 if (!eth_addr_is_multicast(ea) &&
3584 !eth_addr_is_reserved(ea) &&
3585 !eth_addr_is_zero(ea)) {
3586 vlandev_name = p->name;
3587 }
3588 }
3589 }
3590 }
3591 proc_net_compat_update_vlan(port->name, vlandev_name, port->vlan);
3592 }
3593 \f
3594 /* Interface functions. */
3595
3596 static struct iface *
3597 iface_create(struct port *port, const struct ovsrec_interface *if_cfg)
3598 {
3599 struct iface *iface;
3600 char *name = if_cfg->name;
3601 int error;
3602
3603 iface = xzalloc(sizeof *iface);
3604 iface->port = port;
3605 iface->port_ifidx = port->n_ifaces;
3606 iface->name = xstrdup(name);
3607 iface->dp_ifidx = -1;
3608 iface->tag = tag_create_random();
3609 iface->delay_expires = LLONG_MAX;
3610 iface->netdev = NULL;
3611 iface->cfg = if_cfg;
3612
3613 /* Attempt to create the network interface in case it doesn't exist yet. */
3614 if (!iface_is_internal(port->bridge, iface->name)) {
3615 error = set_up_iface(if_cfg, iface, true);
3616 if (error) {
3617 VLOG_WARN("could not create iface %s: %s", iface->name,
3618 strerror(error));
3619
3620 free(iface->name);
3621 free(iface);
3622 return NULL;
3623 }
3624 }
3625
3626 if (port->n_ifaces >= port->allocated_ifaces) {
3627 port->ifaces = x2nrealloc(port->ifaces, &port->allocated_ifaces,
3628 sizeof *port->ifaces);
3629 }
3630 port->ifaces[port->n_ifaces++] = iface;
3631 if (port->n_ifaces > 1) {
3632 port->bridge->has_bonded_ports = true;
3633 }
3634
3635 VLOG_DBG("attached network device %s to port %s", iface->name, port->name);
3636
3637 bridge_flush(port->bridge);
3638
3639 return iface;
3640 }
3641
3642 static void
3643 iface_destroy(struct iface *iface)
3644 {
3645 if (iface) {
3646 struct port *port = iface->port;
3647 struct bridge *br = port->bridge;
3648 bool del_active = port->active_iface == iface->port_ifidx;
3649 struct iface *del;
3650
3651 if (iface->dp_ifidx >= 0) {
3652 port_array_set(&br->ifaces, iface->dp_ifidx, NULL);
3653 }
3654
3655 del = port->ifaces[iface->port_ifidx] = port->ifaces[--port->n_ifaces];
3656 del->port_ifidx = iface->port_ifidx;
3657
3658 netdev_close(iface->netdev);
3659
3660 if (del_active) {
3661 ofproto_revalidate(port->bridge->ofproto, port->active_iface_tag);
3662 bond_choose_active_iface(port);
3663 bond_send_learning_packets(port);
3664 }
3665
3666 free(iface->name);
3667 free(iface);
3668
3669 bridge_flush(port->bridge);
3670 }
3671 }
3672
3673 static struct iface *
3674 iface_lookup(const struct bridge *br, const char *name)
3675 {
3676 size_t i, j;
3677
3678 for (i = 0; i < br->n_ports; i++) {
3679 struct port *port = br->ports[i];
3680 for (j = 0; j < port->n_ifaces; j++) {
3681 struct iface *iface = port->ifaces[j];
3682 if (!strcmp(iface->name, name)) {
3683 return iface;
3684 }
3685 }
3686 }
3687 return NULL;
3688 }
3689
3690 static struct iface *
3691 iface_from_dp_ifidx(const struct bridge *br, uint16_t dp_ifidx)
3692 {
3693 return port_array_get(&br->ifaces, dp_ifidx);
3694 }
3695
3696 /* Returns true if 'iface' is the name of an "internal" interface on bridge
3697 * 'br', that is, an interface that is entirely simulated within the datapath.
3698 * The local port (ODPP_LOCAL) is always an internal interface. Other local
3699 * interfaces are created by setting "iface.<iface>.internal = true".
3700 *
3701 * In addition, we have a kluge-y feature that creates an internal port with
3702 * the name of a bonded port if "bonding.<bondname>.fake-iface = true" is set.
3703 * This feature needs to go away in the long term. Until then, this is one
3704 * reason why this function takes a name instead of a struct iface: the fake
3705 * interfaces created this way do not have a struct iface. */
3706 static bool
3707 iface_is_internal(const struct bridge *br, const char *if_name)
3708 {
3709 /* XXX wastes time */
3710 struct iface *iface;
3711 struct port *port;
3712
3713 if (!strcmp(if_name, br->name)) {
3714 return true;
3715 }
3716
3717 iface = iface_lookup(br, if_name);
3718 if (iface && !strcmp(iface->cfg->type, "internal")) {
3719 return true;
3720 }
3721
3722 port = port_lookup(br, if_name);
3723 if (port && port->n_ifaces > 1 && port->cfg->bond_fake_iface) {
3724 return true;
3725 }
3726 return false;
3727 }
3728
3729 /* Set Ethernet address of 'iface', if one is specified in the configuration
3730 * file. */
3731 static void
3732 iface_set_mac(struct iface *iface)
3733 {
3734 uint8_t ea[ETH_ADDR_LEN];
3735
3736 if (iface->cfg->mac && eth_addr_from_string(iface->cfg->mac, ea)) {
3737 if (eth_addr_is_multicast(ea)) {
3738 VLOG_ERR("interface %s: cannot set MAC to multicast address",
3739 iface->name);
3740 } else if (iface->dp_ifidx == ODPP_LOCAL) {
3741 VLOG_ERR("ignoring iface.%s.mac; use bridge.%s.mac instead",
3742 iface->name, iface->name);
3743 } else {
3744 int error = netdev_set_etheraddr(iface->netdev, ea);
3745 if (error) {
3746 VLOG_ERR("interface %s: setting MAC failed (%s)",
3747 iface->name, strerror(error));
3748 }
3749 }
3750 }
3751 }
3752 \f
3753 /* Port mirroring. */
3754
3755 static void
3756 mirror_reconfigure(struct bridge *br)
3757 {
3758 struct shash old_mirrors, new_mirrors;
3759 struct shash_node *node;
3760 unsigned long *rspan_vlans;
3761 int i;
3762
3763 /* Collect old mirrors. */
3764 shash_init(&old_mirrors);
3765 for (i = 0; i < MAX_MIRRORS; i++) {
3766 if (br->mirrors[i]) {
3767 shash_add(&old_mirrors, br->mirrors[i]->name, br->mirrors[i]);
3768 }
3769 }
3770
3771 /* Collect new mirrors. */
3772 shash_init(&new_mirrors);
3773 for (i = 0; i < br->cfg->n_mirrors; i++) {
3774 struct ovsrec_mirror *cfg = br->cfg->mirrors[i];
3775 if (!shash_add_once(&new_mirrors, cfg->name, cfg)) {
3776 VLOG_WARN("bridge %s: %s specified twice as mirror",
3777 br->name, cfg->name);
3778 }
3779 }
3780
3781 /* Get rid of deleted mirrors and add new mirrors. */
3782 SHASH_FOR_EACH (node, &old_mirrors) {
3783 if (!shash_find(&new_mirrors, node->name)) {
3784 mirror_destroy(node->data);
3785 }
3786 }
3787 SHASH_FOR_EACH (node, &new_mirrors) {
3788 struct mirror *mirror = shash_find_data(&old_mirrors, node->name);
3789 if (!mirror) {
3790 mirror = mirror_create(br, node->name);
3791 if (!mirror) {
3792 break;
3793 }
3794 }
3795 mirror_reconfigure_one(mirror, node->data);
3796 }
3797 shash_destroy(&old_mirrors);
3798 shash_destroy(&new_mirrors);
3799
3800 /* Update port reserved status. */
3801 for (i = 0; i < br->n_ports; i++) {
3802 br->ports[i]->is_mirror_output_port = false;
3803 }
3804 for (i = 0; i < MAX_MIRRORS; i++) {
3805 struct mirror *m = br->mirrors[i];
3806 if (m && m->out_port) {
3807 m->out_port->is_mirror_output_port = true;
3808 }
3809 }
3810
3811 /* Update flooded vlans (for RSPAN). */
3812 rspan_vlans = NULL;
3813 if (br->cfg->n_flood_vlans) {
3814 rspan_vlans = bitmap_allocate(4096);
3815
3816 for (i = 0; i < br->cfg->n_flood_vlans; i++) {
3817 int64_t vlan = br->cfg->flood_vlans[i];
3818 if (vlan >= 0 && vlan < 4096) {
3819 bitmap_set1(rspan_vlans, vlan);
3820 VLOG_INFO("bridge %s: disabling learning on vlan %"PRId64,
3821 br->name, vlan);
3822 } else {
3823 VLOG_ERR("bridge %s: invalid value %"PRId64 "for flood VLAN",
3824 br->name, vlan);
3825 }
3826 }
3827 }
3828 if (mac_learning_set_flood_vlans(br->ml, rspan_vlans)) {
3829 bridge_flush(br);
3830 }
3831 }
3832
3833 static struct mirror *
3834 mirror_create(struct bridge *br, const char *name)
3835 {
3836 struct mirror *m;
3837 size_t i;
3838
3839 for (i = 0; ; i++) {
3840 if (i >= MAX_MIRRORS) {
3841 VLOG_WARN("bridge %s: maximum of %d port mirrors reached, "
3842 "cannot create %s", br->name, MAX_MIRRORS, name);
3843 return NULL;
3844 }
3845 if (!br->mirrors[i]) {
3846 break;
3847 }
3848 }
3849
3850 VLOG_INFO("created port mirror %s on bridge %s", name, br->name);
3851 bridge_flush(br);
3852
3853 br->mirrors[i] = m = xzalloc(sizeof *m);
3854 m->bridge = br;
3855 m->idx = i;
3856 m->name = xstrdup(name);
3857 shash_init(&m->src_ports);
3858 shash_init(&m->dst_ports);
3859 m->vlans = NULL;
3860 m->n_vlans = 0;
3861 m->out_vlan = -1;
3862 m->out_port = NULL;
3863
3864 return m;
3865 }
3866
3867 static void
3868 mirror_destroy(struct mirror *m)
3869 {
3870 if (m) {
3871 struct bridge *br = m->bridge;
3872 size_t i;
3873
3874 for (i = 0; i < br->n_ports; i++) {
3875 br->ports[i]->src_mirrors &= ~(MIRROR_MASK_C(1) << m->idx);
3876 br->ports[i]->dst_mirrors &= ~(MIRROR_MASK_C(1) << m->idx);
3877 }
3878
3879 shash_destroy(&m->src_ports);
3880 shash_destroy(&m->dst_ports);
3881 free(m->vlans);
3882
3883 m->bridge->mirrors[m->idx] = NULL;
3884 free(m);
3885
3886 bridge_flush(br);
3887 }
3888 }
3889
3890 static void
3891 mirror_collect_ports(struct mirror *m, struct ovsrec_port **ports, int n_ports,
3892 struct shash *names)
3893 {
3894 size_t i;
3895
3896 for (i = 0; i < n_ports; i++) {
3897 const char *name = ports[i]->name;
3898 if (port_lookup(m->bridge, name)) {
3899 shash_add_once(names, name, NULL);
3900 } else {
3901 VLOG_WARN("bridge %s: mirror %s cannot match on nonexistent "
3902 "port %s", m->bridge->name, m->name, name);
3903 }
3904 }
3905 }
3906
3907 static size_t
3908 mirror_collect_vlans(struct mirror *m, const struct ovsrec_mirror *cfg,
3909 int **vlans)
3910 {
3911 size_t n_vlans;
3912 size_t i;
3913
3914 *vlans = xmalloc(sizeof **vlans * cfg->n_select_vlan);
3915 n_vlans = 0;
3916 for (i = 0; i < cfg->n_select_vlan; i++) {
3917 int64_t vlan = cfg->select_vlan[i];
3918 if (vlan < 0 || vlan > 4095) {
3919 VLOG_WARN("bridge %s: mirror %s selects invalid VLAN %"PRId64,
3920 m->bridge->name, m->name, vlan);
3921 } else {
3922 (*vlans)[n_vlans++] = vlan;
3923 }
3924 }
3925 return n_vlans;
3926 }
3927
3928 static bool
3929 vlan_is_mirrored(const struct mirror *m, int vlan)
3930 {
3931 size_t i;
3932
3933 for (i = 0; i < m->n_vlans; i++) {
3934 if (m->vlans[i] == vlan) {
3935 return true;
3936 }
3937 }
3938 return false;
3939 }
3940
3941 static bool
3942 port_trunks_any_mirrored_vlan(const struct mirror *m, const struct port *p)
3943 {
3944 size_t i;
3945
3946 for (i = 0; i < m->n_vlans; i++) {
3947 if (port_trunks_vlan(p, m->vlans[i])) {
3948 return true;
3949 }
3950 }
3951 return false;
3952 }
3953
3954 static void
3955 mirror_reconfigure_one(struct mirror *m, struct ovsrec_mirror *cfg)
3956 {
3957 struct shash src_ports, dst_ports;
3958 mirror_mask_t mirror_bit;
3959 struct port *out_port;
3960 int out_vlan;
3961 size_t n_vlans;
3962 int *vlans;
3963 size_t i;
3964
3965 /* Get output port. */
3966 if (cfg->output_port) {
3967 out_port = port_lookup(m->bridge, cfg->output_port->name);
3968 if (!out_port) {
3969 VLOG_ERR("bridge %s: mirror %s outputs to port not on bridge",
3970 m->bridge->name, m->name);
3971 mirror_destroy(m);
3972 return;
3973 }
3974 out_vlan = -1;
3975
3976 if (cfg->output_vlan) {
3977 VLOG_ERR("bridge %s: mirror %s specifies both output port and "
3978 "output vlan; ignoring output vlan",
3979 m->bridge->name, m->name);
3980 }
3981 } else if (cfg->output_vlan) {
3982 out_port = NULL;
3983 out_vlan = *cfg->output_vlan;
3984 } else {
3985 VLOG_ERR("bridge %s: mirror %s does not specify output; ignoring",
3986 m->bridge->name, m->name);
3987 mirror_destroy(m);
3988 return;
3989 }
3990
3991 shash_init(&src_ports);
3992 shash_init(&dst_ports);
3993 if (cfg->select_all) {
3994 for (i = 0; i < m->bridge->n_ports; i++) {
3995 const char *name = m->bridge->ports[i]->name;
3996 shash_add_once(&src_ports, name, NULL);
3997 shash_add_once(&dst_ports, name, NULL);
3998 }
3999 vlans = NULL;
4000 n_vlans = 0;
4001 } else {
4002 /* Get ports, and drop duplicates and ports that don't exist. */
4003 mirror_collect_ports(m, cfg->select_src_port, cfg->n_select_src_port,
4004 &src_ports);
4005 mirror_collect_ports(m, cfg->select_dst_port, cfg->n_select_dst_port,
4006 &dst_ports);
4007
4008 /* Get all the vlans, and drop duplicate and invalid vlans. */
4009 n_vlans = mirror_collect_vlans(m, cfg, &vlans);
4010 }
4011
4012 /* Update mirror data. */
4013 if (!shash_equal_keys(&m->src_ports, &src_ports)
4014 || !shash_equal_keys(&m->dst_ports, &dst_ports)
4015 || m->n_vlans != n_vlans
4016 || memcmp(m->vlans, vlans, sizeof *vlans * n_vlans)
4017 || m->out_port != out_port
4018 || m->out_vlan != out_vlan) {
4019 bridge_flush(m->bridge);
4020 }
4021 shash_swap(&m->src_ports, &src_ports);
4022 shash_swap(&m->dst_ports, &dst_ports);
4023 free(m->vlans);
4024 m->vlans = vlans;
4025 m->n_vlans = n_vlans;
4026 m->out_port = out_port;
4027 m->out_vlan = out_vlan;
4028
4029 /* Update ports. */
4030 mirror_bit = MIRROR_MASK_C(1) << m->idx;
4031 for (i = 0; i < m->bridge->n_ports; i++) {
4032 struct port *port = m->bridge->ports[i];
4033
4034 if (shash_find(&m->src_ports, port->name)
4035 || (m->n_vlans
4036 && (!port->vlan
4037 ? port_trunks_any_mirrored_vlan(m, port)
4038 : vlan_is_mirrored(m, port->vlan)))) {
4039 port->src_mirrors |= mirror_bit;
4040 } else {
4041 port->src_mirrors &= ~mirror_bit;
4042 }
4043
4044 if (shash_find(&m->dst_ports, port->name)) {
4045 port->dst_mirrors |= mirror_bit;
4046 } else {
4047 port->dst_mirrors &= ~mirror_bit;
4048 }
4049 }
4050
4051 /* Clean up. */
4052 shash_destroy(&src_ports);
4053 shash_destroy(&dst_ports);
4054 }