]> git.proxmox.com Git - ovs.git/blob - ofproto/connmgr.c
connmgr: Make connmgr_wants_packet_in_on_miss() lock-free.
[ovs.git] / ofproto / connmgr.c
1 /*
2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include <errno.h>
19 #include <stdlib.h>
20
21 #include "bundles.h"
22 #include "connmgr.h"
23 #include "coverage.h"
24 #include "fail-open.h"
25 #include "in-band.h"
26 #include "odp-util.h"
27 #include "ofproto-provider.h"
28 #include "openvswitch/dynamic-string.h"
29 #include "openvswitch/ofp-actions.h"
30 #include "openvswitch/ofp-msgs.h"
31 #include "openvswitch/ofp-util.h"
32 #include "openvswitch/ofpbuf.h"
33 #include "openvswitch/vconn.h"
34 #include "openvswitch/vlog.h"
35 #include "ovs-atomic.h"
36 #include "pinsched.h"
37 #include "poll-loop.h"
38 #include "rconn.h"
39 #include "openvswitch/shash.h"
40 #include "simap.h"
41 #include "stream.h"
42 #include "timeval.h"
43 #include "util.h"
44
45 VLOG_DEFINE_THIS_MODULE(connmgr);
46 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
47
48 /* An OpenFlow connection.
49 *
50 *
51 * Thread-safety
52 * =============
53 *
54 * 'ofproto_mutex' must be held whenever an ofconn is created or destroyed or,
55 * more or less equivalently, whenever an ofconn is added to or removed from a
56 * connmgr. 'ofproto_mutex' doesn't protect the data inside the ofconn, except
57 * as specifically noted below. */
58 struct ofconn {
59 /* Configuration that persists from one connection to the next. */
60
61 struct ovs_list node; /* In struct connmgr's "all_conns" list. */
62 struct hmap_node hmap_node; /* In struct connmgr's "controllers" map. */
63
64 struct connmgr *connmgr; /* Connection's manager. */
65 struct rconn *rconn; /* OpenFlow connection. */
66 enum ofconn_type type; /* Type. */
67 enum ofproto_band band; /* In-band or out-of-band? */
68 bool enable_async_msgs; /* Initially enable async messages? */
69 bool want_packet_in_on_miss;
70
71 /* State that should be cleared from one connection to the next. */
72
73 /* OpenFlow state. */
74 enum ofp12_controller_role role; /* Role. */
75 enum ofputil_protocol protocol; /* Current protocol variant. */
76 enum nx_packet_in_format packet_in_format; /* OFPT_PACKET_IN format. */
77
78 /* OFPT_PACKET_IN related data. */
79 struct rconn_packet_counter *packet_in_counter; /* # queued on 'rconn'. */
80 #define N_SCHEDULERS 2
81 struct pinsched *schedulers[N_SCHEDULERS];
82 int miss_send_len; /* Bytes to send of buffered packets. */
83 uint16_t controller_id; /* Connection controller ID. */
84
85 /* Number of OpenFlow messages queued on 'rconn' as replies to OpenFlow
86 * requests, and the maximum number before we stop reading OpenFlow
87 * requests. */
88 #define OFCONN_REPLY_MAX 100
89 struct rconn_packet_counter *reply_counter;
90
91 /* Asynchronous message configuration in each possible role.
92 *
93 * A 1-bit enables sending an asynchronous message for one possible reason
94 * that the message might be generated, a 0-bit disables it. */
95 struct ofputil_async_cfg *async_cfg;
96
97 /* Flow table operation logging. */
98 int n_add, n_delete, n_modify; /* Number of unreported ops of each kind. */
99 long long int first_op, last_op; /* Range of times for unreported ops. */
100 long long int next_op_report; /* Time to report ops, or LLONG_MAX. */
101 long long int op_backoff; /* Earliest time to report ops again. */
102
103 /* Flow monitors (e.g. NXST_FLOW_MONITOR). */
104
105 /* Configuration. Contains "struct ofmonitor"s. */
106 struct hmap monitors OVS_GUARDED_BY(ofproto_mutex);
107
108 /* Flow control.
109 *
110 * When too many flow monitor notifications back up in the transmit buffer,
111 * we pause the transmission of further notifications. These members track
112 * the flow control state.
113 *
114 * When notifications are flowing, 'monitor_paused' is 0. When
115 * notifications are paused, 'monitor_paused' is the value of
116 * 'monitor_seqno' at the point we paused.
117 *
118 * 'monitor_counter' counts the OpenFlow messages and bytes currently in
119 * flight. This value growing too large triggers pausing. */
120 uint64_t monitor_paused OVS_GUARDED_BY(ofproto_mutex);
121 struct rconn_packet_counter *monitor_counter OVS_GUARDED_BY(ofproto_mutex);
122
123 /* State of monitors for a single ongoing flow_mod.
124 *
125 * 'updates' is a list of "struct ofpbuf"s that contain
126 * NXST_FLOW_MONITOR_REPLY messages representing the changes made by the
127 * current flow_mod.
128 *
129 * When 'updates' is nonempty, 'sent_abbrev_update' is true if 'updates'
130 * contains an update event of type NXFME_ABBREV and false otherwise.. */
131 struct ovs_list updates OVS_GUARDED_BY(ofproto_mutex);
132 bool sent_abbrev_update OVS_GUARDED_BY(ofproto_mutex);
133
134 /* Active bundles. Contains "struct ofp_bundle"s. */
135 struct hmap bundles;
136 long long int next_bundle_expiry_check;
137 };
138
139 /* vswitchd/ovs-vswitchd.8.in documents the value of BUNDLE_IDLE_LIFETIME in
140 * seconds. That documentation must be kept in sync with the value below. */
141 enum {
142 BUNDLE_EXPIRY_INTERVAL = 1000, /* Check bundle expiry every 1 sec. */
143 BUNDLE_IDLE_TIMEOUT = 10000, /* Expire idle bundles after 10 seconds. */
144 };
145
146 static struct ofconn *ofconn_create(struct connmgr *, struct rconn *,
147 enum ofconn_type, bool enable_async_msgs)
148 OVS_REQUIRES(ofproto_mutex);
149 static void ofconn_destroy(struct ofconn *) OVS_REQUIRES(ofproto_mutex);
150 static void ofconn_flush(struct ofconn *) OVS_REQUIRES(ofproto_mutex);
151
152 static void ofconn_reconfigure(struct ofconn *,
153 const struct ofproto_controller *);
154
155 static void ofconn_run(struct ofconn *,
156 void (*handle_openflow)(struct ofconn *,
157 const struct ofpbuf *ofp_msg));
158 static void ofconn_wait(struct ofconn *);
159
160 static void ofconn_log_flow_mods(struct ofconn *);
161
162 static const char *ofconn_get_target(const struct ofconn *);
163 static char *ofconn_make_name(const struct connmgr *, const char *target);
164
165 static void ofconn_set_rate_limit(struct ofconn *, int rate, int burst);
166
167 static void ofconn_send(const struct ofconn *, struct ofpbuf *,
168 struct rconn_packet_counter *);
169
170 static void do_send_packet_ins(struct ofconn *, struct ovs_list *txq);
171
172 /* A listener for incoming OpenFlow "service" connections. */
173 struct ofservice {
174 struct hmap_node node; /* In struct connmgr's "services" hmap. */
175 struct pvconn *pvconn; /* OpenFlow connection listener. */
176
177 /* These are not used by ofservice directly. They are settings for
178 * accepted "struct ofconn"s from the pvconn. */
179 int probe_interval; /* Max idle time before probing, in seconds. */
180 int rate_limit; /* Max packet-in rate in packets per second. */
181 int burst_limit; /* Limit on accumulating packet credits. */
182 bool enable_async_msgs; /* Initially enable async messages? */
183 uint8_t dscp; /* DSCP Value for controller connection */
184 uint32_t allowed_versions; /* OpenFlow protocol versions that may
185 * be negotiated for a session. */
186 };
187
188 static void ofservice_reconfigure(struct ofservice *,
189 const struct ofproto_controller *);
190 static int ofservice_create(struct connmgr *mgr, const char *target,
191 uint32_t allowed_versions, uint8_t dscp);
192 static void ofservice_destroy(struct connmgr *, struct ofservice *);
193 static struct ofservice *ofservice_lookup(struct connmgr *,
194 const char *target);
195
196 /* Connection manager for an OpenFlow switch. */
197 struct connmgr {
198 struct ofproto *ofproto;
199 char *name;
200 char *local_port_name;
201
202 /* OpenFlow connections. */
203 struct hmap controllers; /* All OFCONN_PRIMARY controllers. */
204 struct ovs_list all_conns; /* All controllers. All modifications are
205 protected by ofproto_mutex, so that any
206 traversals from other threads can be made
207 safe by holding the ofproto_mutex. */
208 uint64_t master_election_id; /* monotonically increasing sequence number
209 * for master election */
210 bool master_election_id_defined;
211
212 /* OpenFlow listeners. */
213 struct hmap services; /* Contains "struct ofservice"s. */
214 struct pvconn **snoops;
215 size_t n_snoops;
216
217 /* Fail open. */
218 struct fail_open *fail_open;
219 enum ofproto_fail_mode fail_mode;
220
221 /* In-band control. */
222 struct in_band *in_band;
223 struct sockaddr_in *extra_in_band_remotes;
224 size_t n_extra_remotes;
225 int in_band_queue;
226
227 ATOMIC(int) want_packet_in_on_miss; /* Sum of ofconns' values. */
228 };
229
230 static void update_in_band_remotes(struct connmgr *);
231 static void add_snooper(struct connmgr *, struct vconn *);
232 static void ofmonitor_run(struct connmgr *);
233 static void ofmonitor_wait(struct connmgr *);
234
235 /* Creates and returns a new connection manager owned by 'ofproto'. 'name' is
236 * a name for the ofproto suitable for using in log messages.
237 * 'local_port_name' is the name of the local port (OFPP_LOCAL) within
238 * 'ofproto'. */
239 struct connmgr *
240 connmgr_create(struct ofproto *ofproto,
241 const char *name, const char *local_port_name)
242 {
243 struct connmgr *mgr;
244
245 mgr = xmalloc(sizeof *mgr);
246 mgr->ofproto = ofproto;
247 mgr->name = xstrdup(name);
248 mgr->local_port_name = xstrdup(local_port_name);
249
250 hmap_init(&mgr->controllers);
251 ovs_list_init(&mgr->all_conns);
252 mgr->master_election_id = 0;
253 mgr->master_election_id_defined = false;
254
255 hmap_init(&mgr->services);
256 mgr->snoops = NULL;
257 mgr->n_snoops = 0;
258
259 mgr->fail_open = NULL;
260 mgr->fail_mode = OFPROTO_FAIL_SECURE;
261
262 mgr->in_band = NULL;
263 mgr->extra_in_band_remotes = NULL;
264 mgr->n_extra_remotes = 0;
265 mgr->in_band_queue = -1;
266
267 atomic_init(&mgr->want_packet_in_on_miss, 0);
268
269 return mgr;
270 }
271
272 /* The default "table-miss" behaviour for OpenFlow1.3+ is to drop the
273 * packet rather than to send the packet to the controller.
274 *
275 * This function maintains the count of pre-OpenFlow1.3 with controller_id 0,
276 * as we assume these are the controllers that should receive "table-miss"
277 * notifications. */
278 static void
279 update_want_packet_in_on_miss(struct ofconn *ofconn)
280 {
281 /* We want a packet-in on miss when controller_id is zero and OpenFlow is
282 * lower than version 1.3. */
283 enum ofputil_protocol p = ofconn->protocol;
284 int new_want = (ofconn->controller_id == 0 &&
285 (p == OFPUTIL_P_NONE ||
286 ofputil_protocol_to_ofp_version(p) < OFP13_VERSION));
287
288 /* Update the setting and the count if necessary. */
289 int old_want = ofconn->want_packet_in_on_miss;
290 if (old_want != new_want) {
291 atomic_int *dst = &ofconn->connmgr->want_packet_in_on_miss;
292 int count;
293 atomic_read_relaxed(dst, &count);
294 atomic_store_relaxed(dst, count - old_want + new_want);
295
296 ofconn->want_packet_in_on_miss = new_want;
297 }
298 }
299
300 /* Frees 'mgr' and all of its resources. */
301 void
302 connmgr_destroy(struct connmgr *mgr)
303 OVS_REQUIRES(ofproto_mutex)
304 {
305 struct ofservice *ofservice, *next_ofservice;
306 struct ofconn *ofconn, *next_ofconn;
307 size_t i;
308
309 if (!mgr) {
310 return;
311 }
312
313 LIST_FOR_EACH_SAFE (ofconn, next_ofconn, node, &mgr->all_conns) {
314 ofconn_destroy(ofconn);
315 }
316
317 hmap_destroy(&mgr->controllers);
318
319 HMAP_FOR_EACH_SAFE (ofservice, next_ofservice, node, &mgr->services) {
320 ofservice_destroy(mgr, ofservice);
321 }
322 hmap_destroy(&mgr->services);
323
324 for (i = 0; i < mgr->n_snoops; i++) {
325 pvconn_close(mgr->snoops[i]);
326 }
327 free(mgr->snoops);
328
329 fail_open_destroy(mgr->fail_open);
330 mgr->fail_open = NULL;
331
332 in_band_destroy(mgr->in_band);
333 mgr->in_band = NULL;
334 free(mgr->extra_in_band_remotes);
335 free(mgr->name);
336 free(mgr->local_port_name);
337
338 free(mgr);
339 }
340
341 /* Does all of the periodic maintenance required by 'mgr'. Calls
342 * 'handle_openflow' for each message received on an OpenFlow connection,
343 * passing along the OpenFlow connection itself and the message that was sent.
344 * 'handle_openflow' must not modify or free the message. */
345 void
346 connmgr_run(struct connmgr *mgr,
347 void (*handle_openflow)(struct ofconn *,
348 const struct ofpbuf *ofp_msg))
349 OVS_EXCLUDED(ofproto_mutex)
350 {
351 struct ofconn *ofconn, *next_ofconn;
352 struct ofservice *ofservice;
353 size_t i;
354
355 if (mgr->in_band) {
356 if (!in_band_run(mgr->in_band)) {
357 in_band_destroy(mgr->in_band);
358 mgr->in_band = NULL;
359 }
360 }
361
362 LIST_FOR_EACH_SAFE (ofconn, next_ofconn, node, &mgr->all_conns) {
363 ofconn_run(ofconn, handle_openflow);
364 }
365 ofmonitor_run(mgr);
366
367 /* Fail-open maintenance. Do this after processing the ofconns since
368 * fail-open checks the status of the controller rconn. */
369 if (mgr->fail_open) {
370 fail_open_run(mgr->fail_open);
371 }
372
373 HMAP_FOR_EACH (ofservice, node, &mgr->services) {
374 struct vconn *vconn;
375 int retval;
376
377 retval = pvconn_accept(ofservice->pvconn, &vconn);
378 if (!retval) {
379 struct rconn *rconn;
380 char *name;
381
382 /* Passing default value for creation of the rconn */
383 rconn = rconn_create(ofservice->probe_interval, 0, ofservice->dscp,
384 vconn_get_allowed_versions(vconn));
385 name = ofconn_make_name(mgr, vconn_get_name(vconn));
386 rconn_connect_unreliably(rconn, vconn, name);
387 free(name);
388
389 ovs_mutex_lock(&ofproto_mutex);
390 ofconn = ofconn_create(mgr, rconn, OFCONN_SERVICE,
391 ofservice->enable_async_msgs);
392 ovs_mutex_unlock(&ofproto_mutex);
393
394 ofconn_set_rate_limit(ofconn, ofservice->rate_limit,
395 ofservice->burst_limit);
396 } else if (retval != EAGAIN) {
397 VLOG_WARN_RL(&rl, "accept failed (%s)", ovs_strerror(retval));
398 }
399 }
400
401 for (i = 0; i < mgr->n_snoops; i++) {
402 struct vconn *vconn;
403 int retval;
404
405 retval = pvconn_accept(mgr->snoops[i], &vconn);
406 if (!retval) {
407 add_snooper(mgr, vconn);
408 } else if (retval != EAGAIN) {
409 VLOG_WARN_RL(&rl, "accept failed (%s)", ovs_strerror(retval));
410 }
411 }
412 }
413
414 /* Causes the poll loop to wake up when connmgr_run() needs to run. */
415 void
416 connmgr_wait(struct connmgr *mgr)
417 {
418 struct ofservice *ofservice;
419 struct ofconn *ofconn;
420 size_t i;
421
422 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
423 ofconn_wait(ofconn);
424 }
425 ofmonitor_wait(mgr);
426 if (mgr->in_band) {
427 in_band_wait(mgr->in_band);
428 }
429 if (mgr->fail_open) {
430 fail_open_wait(mgr->fail_open);
431 }
432 HMAP_FOR_EACH (ofservice, node, &mgr->services) {
433 pvconn_wait(ofservice->pvconn);
434 }
435 for (i = 0; i < mgr->n_snoops; i++) {
436 pvconn_wait(mgr->snoops[i]);
437 }
438 }
439
440 /* Adds some memory usage statistics for 'mgr' into 'usage', for use with
441 * memory_report(). */
442 void
443 connmgr_get_memory_usage(const struct connmgr *mgr, struct simap *usage)
444 {
445 const struct ofconn *ofconn;
446 unsigned int packets = 0;
447 unsigned int ofconns = 0;
448
449 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
450 int i;
451
452 ofconns++;
453
454 packets += rconn_count_txqlen(ofconn->rconn);
455 for (i = 0; i < N_SCHEDULERS; i++) {
456 struct pinsched_stats stats;
457
458 pinsched_get_stats(ofconn->schedulers[i], &stats);
459 packets += stats.n_queued;
460 }
461 }
462 simap_increase(usage, "ofconns", ofconns);
463 simap_increase(usage, "packets", packets);
464 }
465
466 /* Returns the ofproto that owns 'ofconn''s connmgr. */
467 struct ofproto *
468 ofconn_get_ofproto(const struct ofconn *ofconn)
469 {
470 return ofconn->connmgr->ofproto;
471 }
472 \f
473 /* OpenFlow configuration. */
474
475 static void add_controller(struct connmgr *, const char *target, uint8_t dscp,
476 uint32_t allowed_versions)
477 OVS_REQUIRES(ofproto_mutex);
478 static struct ofconn *find_controller_by_target(struct connmgr *,
479 const char *target);
480 static void update_fail_open(struct connmgr *) OVS_EXCLUDED(ofproto_mutex);
481 static int set_pvconns(struct pvconn ***pvconnsp, size_t *n_pvconnsp,
482 const struct sset *);
483
484 /* Returns true if 'mgr' has any configured primary controllers.
485 *
486 * Service controllers do not count, but configured primary controllers do
487 * count whether or not they are currently connected. */
488 bool
489 connmgr_has_controllers(const struct connmgr *mgr)
490 {
491 return !hmap_is_empty(&mgr->controllers);
492 }
493
494 /* Initializes 'info' and populates it with information about each configured
495 * primary controller. The keys in 'info' are the controllers' targets; the
496 * data values are corresponding "struct ofproto_controller_info".
497 *
498 * The caller owns 'info' and everything in it and should free it when it is no
499 * longer needed. */
500 void
501 connmgr_get_controller_info(struct connmgr *mgr, struct shash *info)
502 {
503 const struct ofconn *ofconn;
504
505 HMAP_FOR_EACH (ofconn, hmap_node, &mgr->controllers) {
506 const struct rconn *rconn = ofconn->rconn;
507 const char *target = rconn_get_target(rconn);
508
509 if (!shash_find(info, target)) {
510 struct ofproto_controller_info *cinfo = xmalloc(sizeof *cinfo);
511 time_t now = time_now();
512 time_t last_connection = rconn_get_last_connection(rconn);
513 time_t last_disconnect = rconn_get_last_disconnect(rconn);
514 int last_error = rconn_get_last_error(rconn);
515 int i;
516
517 shash_add(info, target, cinfo);
518
519 cinfo->is_connected = rconn_is_connected(rconn);
520 cinfo->role = ofconn->role;
521
522 smap_init(&cinfo->pairs);
523 if (last_error) {
524 smap_add(&cinfo->pairs, "last_error",
525 ovs_retval_to_string(last_error));
526 }
527
528 smap_add(&cinfo->pairs, "state", rconn_get_state(rconn));
529
530 if (last_connection != TIME_MIN) {
531 smap_add_format(&cinfo->pairs, "sec_since_connect",
532 "%ld", (long int) (now - last_connection));
533 }
534
535 if (last_disconnect != TIME_MIN) {
536 smap_add_format(&cinfo->pairs, "sec_since_disconnect",
537 "%ld", (long int) (now - last_disconnect));
538 }
539
540 for (i = 0; i < N_SCHEDULERS; i++) {
541 if (ofconn->schedulers[i]) {
542 const char *name = i ? "miss" : "action";
543 struct pinsched_stats stats;
544
545 pinsched_get_stats(ofconn->schedulers[i], &stats);
546 smap_add_nocopy(&cinfo->pairs,
547 xasprintf("packet-in-%s-backlog", name),
548 xasprintf("%u", stats.n_queued));
549 smap_add_nocopy(&cinfo->pairs,
550 xasprintf("packet-in-%s-bypassed", name),
551 xasprintf("%llu", stats.n_normal));
552 smap_add_nocopy(&cinfo->pairs,
553 xasprintf("packet-in-%s-queued", name),
554 xasprintf("%llu", stats.n_limited));
555 smap_add_nocopy(&cinfo->pairs,
556 xasprintf("packet-in-%s-dropped", name),
557 xasprintf("%llu", stats.n_queue_dropped));
558 }
559 }
560 }
561 }
562 }
563
564 void
565 connmgr_free_controller_info(struct shash *info)
566 {
567 struct shash_node *node;
568
569 SHASH_FOR_EACH (node, info) {
570 struct ofproto_controller_info *cinfo = node->data;
571 smap_destroy(&cinfo->pairs);
572 free(cinfo);
573 }
574 shash_destroy(info);
575 }
576
577 /* Changes 'mgr''s set of controllers to the 'n_controllers' controllers in
578 * 'controllers'. */
579 void
580 connmgr_set_controllers(struct connmgr *mgr,
581 const struct ofproto_controller *controllers,
582 size_t n_controllers, uint32_t allowed_versions)
583 OVS_EXCLUDED(ofproto_mutex)
584 {
585 bool had_controllers = connmgr_has_controllers(mgr);
586 struct shash new_controllers;
587 struct ofconn *ofconn, *next_ofconn;
588 struct ofservice *ofservice, *next_ofservice;
589 size_t i;
590
591 /* Required to add and remove ofconns. This could probably be narrowed to
592 * cover a smaller amount of code, if that yielded some benefit. */
593 ovs_mutex_lock(&ofproto_mutex);
594
595 /* Create newly configured controllers and services.
596 * Create a name to ofproto_controller mapping in 'new_controllers'. */
597 shash_init(&new_controllers);
598 for (i = 0; i < n_controllers; i++) {
599 const struct ofproto_controller *c = &controllers[i];
600
601 if (!vconn_verify_name(c->target)) {
602 bool add = false;
603 ofconn = find_controller_by_target(mgr, c->target);
604 if (!ofconn) {
605 VLOG_INFO("%s: added primary controller \"%s\"",
606 mgr->name, c->target);
607 add = true;
608 } else if (rconn_get_allowed_versions(ofconn->rconn) !=
609 allowed_versions) {
610 VLOG_INFO("%s: re-added primary controller \"%s\"",
611 mgr->name, c->target);
612 add = true;
613 ofconn_destroy(ofconn);
614 }
615 if (add) {
616 add_controller(mgr, c->target, c->dscp, allowed_versions);
617 }
618 } else if (!pvconn_verify_name(c->target)) {
619 bool add = false;
620 ofservice = ofservice_lookup(mgr, c->target);
621 if (!ofservice) {
622 VLOG_INFO("%s: added service controller \"%s\"",
623 mgr->name, c->target);
624 add = true;
625 } else if (ofservice->allowed_versions != allowed_versions) {
626 VLOG_INFO("%s: re-added service controller \"%s\"",
627 mgr->name, c->target);
628 ofservice_destroy(mgr, ofservice);
629 add = true;
630 }
631 if (add) {
632 ofservice_create(mgr, c->target, allowed_versions, c->dscp);
633 }
634 } else {
635 VLOG_WARN_RL(&rl, "%s: unsupported controller \"%s\"",
636 mgr->name, c->target);
637 continue;
638 }
639
640 shash_add_once(&new_controllers, c->target, &controllers[i]);
641 }
642
643 /* Delete controllers that are no longer configured.
644 * Update configuration of all now-existing controllers. */
645 HMAP_FOR_EACH_SAFE (ofconn, next_ofconn, hmap_node, &mgr->controllers) {
646 const char *target = ofconn_get_target(ofconn);
647 struct ofproto_controller *c;
648
649 c = shash_find_data(&new_controllers, target);
650 if (!c) {
651 VLOG_INFO("%s: removed primary controller \"%s\"",
652 mgr->name, target);
653 ofconn_destroy(ofconn);
654 } else {
655 ofconn_reconfigure(ofconn, c);
656 }
657 }
658
659 /* Delete services that are no longer configured.
660 * Update configuration of all now-existing services. */
661 HMAP_FOR_EACH_SAFE (ofservice, next_ofservice, node, &mgr->services) {
662 const char *target = pvconn_get_name(ofservice->pvconn);
663 struct ofproto_controller *c;
664
665 c = shash_find_data(&new_controllers, target);
666 if (!c) {
667 VLOG_INFO("%s: removed service controller \"%s\"",
668 mgr->name, target);
669 ofservice_destroy(mgr, ofservice);
670 } else {
671 ofservice_reconfigure(ofservice, c);
672 }
673 }
674
675 shash_destroy(&new_controllers);
676
677 ovs_mutex_unlock(&ofproto_mutex);
678
679 update_in_band_remotes(mgr);
680 update_fail_open(mgr);
681 if (had_controllers != connmgr_has_controllers(mgr)) {
682 ofproto_flush_flows(mgr->ofproto);
683 }
684 }
685
686 /* Drops the connections between 'mgr' and all of its primary and secondary
687 * controllers, forcing them to reconnect. */
688 void
689 connmgr_reconnect(const struct connmgr *mgr)
690 {
691 struct ofconn *ofconn;
692
693 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
694 rconn_reconnect(ofconn->rconn);
695 }
696 }
697
698 /* Sets the "snoops" for 'mgr' to the pvconn targets listed in 'snoops'.
699 *
700 * A "snoop" is a pvconn to which every OpenFlow message to or from the most
701 * important controller on 'mgr' is mirrored. */
702 int
703 connmgr_set_snoops(struct connmgr *mgr, const struct sset *snoops)
704 {
705 return set_pvconns(&mgr->snoops, &mgr->n_snoops, snoops);
706 }
707
708 /* Adds each of the snoops currently configured on 'mgr' to 'snoops'. */
709 void
710 connmgr_get_snoops(const struct connmgr *mgr, struct sset *snoops)
711 {
712 size_t i;
713
714 for (i = 0; i < mgr->n_snoops; i++) {
715 sset_add(snoops, pvconn_get_name(mgr->snoops[i]));
716 }
717 }
718
719 /* Returns true if 'mgr' has at least one snoop, false if it has none. */
720 bool
721 connmgr_has_snoops(const struct connmgr *mgr)
722 {
723 return mgr->n_snoops > 0;
724 }
725
726 /* Creates a new controller for 'target' in 'mgr'. update_controller() needs
727 * to be called later to finish the new ofconn's configuration. */
728 static void
729 add_controller(struct connmgr *mgr, const char *target, uint8_t dscp,
730 uint32_t allowed_versions)
731 OVS_REQUIRES(ofproto_mutex)
732 {
733 char *name = ofconn_make_name(mgr, target);
734 struct ofconn *ofconn;
735
736 ofconn = ofconn_create(mgr, rconn_create(5, 8, dscp, allowed_versions),
737 OFCONN_PRIMARY, true);
738 rconn_connect(ofconn->rconn, target, name);
739 hmap_insert(&mgr->controllers, &ofconn->hmap_node, hash_string(target, 0));
740
741 free(name);
742 }
743
744 static struct ofconn *
745 find_controller_by_target(struct connmgr *mgr, const char *target)
746 {
747 struct ofconn *ofconn;
748
749 HMAP_FOR_EACH_WITH_HASH (ofconn, hmap_node,
750 hash_string(target, 0), &mgr->controllers) {
751 if (!strcmp(ofconn_get_target(ofconn), target)) {
752 return ofconn;
753 }
754 }
755 return NULL;
756 }
757
758 static void
759 update_in_band_remotes(struct connmgr *mgr)
760 {
761 struct sockaddr_in *addrs;
762 size_t max_addrs, n_addrs;
763 struct ofconn *ofconn;
764 size_t i;
765
766 /* Allocate enough memory for as many remotes as we could possibly have. */
767 max_addrs = mgr->n_extra_remotes + hmap_count(&mgr->controllers);
768 addrs = xmalloc(max_addrs * sizeof *addrs);
769 n_addrs = 0;
770
771 /* Add all the remotes. */
772 HMAP_FOR_EACH (ofconn, hmap_node, &mgr->controllers) {
773 const char *target = rconn_get_target(ofconn->rconn);
774 union {
775 struct sockaddr_storage ss;
776 struct sockaddr_in in;
777 } sa;
778
779 if (ofconn->band == OFPROTO_IN_BAND
780 && stream_parse_target_with_default_port(target, OFP_PORT, &sa.ss)
781 && sa.ss.ss_family == AF_INET) {
782 addrs[n_addrs++] = sa.in;
783 }
784 }
785 for (i = 0; i < mgr->n_extra_remotes; i++) {
786 addrs[n_addrs++] = mgr->extra_in_band_remotes[i];
787 }
788
789 /* Create or update or destroy in-band. */
790 if (n_addrs) {
791 if (!mgr->in_band) {
792 in_band_create(mgr->ofproto, mgr->local_port_name, &mgr->in_band);
793 }
794 in_band_set_queue(mgr->in_band, mgr->in_band_queue);
795 } else {
796 /* in_band_run() needs a chance to delete any existing in-band flows.
797 * We will destroy mgr->in_band after it's done with that. */
798 }
799 if (mgr->in_band) {
800 in_band_set_remotes(mgr->in_band, addrs, n_addrs);
801 }
802
803 /* Clean up. */
804 free(addrs);
805 }
806
807 static void
808 update_fail_open(struct connmgr *mgr)
809 OVS_EXCLUDED(ofproto_mutex)
810 {
811 if (connmgr_has_controllers(mgr)
812 && mgr->fail_mode == OFPROTO_FAIL_STANDALONE) {
813 if (!mgr->fail_open) {
814 mgr->fail_open = fail_open_create(mgr->ofproto, mgr);
815 }
816 } else {
817 ovs_mutex_lock(&ofproto_mutex);
818 fail_open_destroy(mgr->fail_open);
819 ovs_mutex_unlock(&ofproto_mutex);
820 mgr->fail_open = NULL;
821 }
822 }
823
824 static int
825 set_pvconns(struct pvconn ***pvconnsp, size_t *n_pvconnsp,
826 const struct sset *sset)
827 {
828 struct pvconn **pvconns = *pvconnsp;
829 size_t n_pvconns = *n_pvconnsp;
830 const char *name;
831 int retval = 0;
832 size_t i;
833
834 for (i = 0; i < n_pvconns; i++) {
835 pvconn_close(pvconns[i]);
836 }
837 free(pvconns);
838
839 pvconns = xmalloc(sset_count(sset) * sizeof *pvconns);
840 n_pvconns = 0;
841 SSET_FOR_EACH (name, sset) {
842 struct pvconn *pvconn;
843 int error;
844 error = pvconn_open(name, 0, 0, &pvconn);
845 if (!error) {
846 pvconns[n_pvconns++] = pvconn;
847 } else {
848 VLOG_ERR("failed to listen on %s: %s", name, ovs_strerror(error));
849 if (!retval) {
850 retval = error;
851 }
852 }
853 }
854
855 *pvconnsp = pvconns;
856 *n_pvconnsp = n_pvconns;
857
858 return retval;
859 }
860
861 /* Returns a "preference level" for snooping 'ofconn'. A higher return value
862 * means that 'ofconn' is more interesting for monitoring than a lower return
863 * value. */
864 static int
865 snoop_preference(const struct ofconn *ofconn)
866 {
867 switch (ofconn->role) {
868 case OFPCR12_ROLE_MASTER:
869 return 3;
870 case OFPCR12_ROLE_EQUAL:
871 return 2;
872 case OFPCR12_ROLE_SLAVE:
873 return 1;
874 case OFPCR12_ROLE_NOCHANGE:
875 default:
876 /* Shouldn't happen. */
877 return 0;
878 }
879 }
880
881 /* One of 'mgr''s "snoop" pvconns has accepted a new connection on 'vconn'.
882 * Connects this vconn to a controller. */
883 static void
884 add_snooper(struct connmgr *mgr, struct vconn *vconn)
885 {
886 struct ofconn *ofconn, *best;
887
888 /* Pick a controller for monitoring. */
889 best = NULL;
890 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
891 if (ofconn->type == OFCONN_PRIMARY
892 && (!best || snoop_preference(ofconn) > snoop_preference(best))) {
893 best = ofconn;
894 }
895 }
896
897 if (best) {
898 rconn_add_monitor(best->rconn, vconn);
899 } else {
900 VLOG_INFO_RL(&rl, "no controller connection to snoop");
901 vconn_close(vconn);
902 }
903 }
904 \f
905 /* Public ofconn functions. */
906
907 /* Returns the connection type, either OFCONN_PRIMARY or OFCONN_SERVICE. */
908 enum ofconn_type
909 ofconn_get_type(const struct ofconn *ofconn)
910 {
911 return ofconn->type;
912 }
913
914 /* If a master election id is defined, stores it into '*idp' and returns
915 * true. Otherwise, stores UINT64_MAX into '*idp' and returns false. */
916 bool
917 ofconn_get_master_election_id(const struct ofconn *ofconn, uint64_t *idp)
918 {
919 *idp = (ofconn->connmgr->master_election_id_defined
920 ? ofconn->connmgr->master_election_id
921 : UINT64_MAX);
922 return ofconn->connmgr->master_election_id_defined;
923 }
924
925 /* Sets the master election id.
926 *
927 * Returns true if successful, false if the id is stale
928 */
929 bool
930 ofconn_set_master_election_id(struct ofconn *ofconn, uint64_t id)
931 {
932 if (ofconn->connmgr->master_election_id_defined
933 &&
934 /* Unsigned difference interpreted as a two's complement signed
935 * value */
936 (int64_t)(id - ofconn->connmgr->master_election_id) < 0) {
937 return false;
938 }
939 ofconn->connmgr->master_election_id = id;
940 ofconn->connmgr->master_election_id_defined = true;
941
942 return true;
943 }
944
945 /* Returns the role configured for 'ofconn'.
946 *
947 * The default role, if no other role has been set, is OFPCR12_ROLE_EQUAL. */
948 enum ofp12_controller_role
949 ofconn_get_role(const struct ofconn *ofconn)
950 {
951 return ofconn->role;
952 }
953
954 void
955 ofconn_send_role_status(struct ofconn *ofconn, uint32_t role, uint8_t reason)
956 {
957 struct ofputil_role_status status;
958 struct ofpbuf *buf;
959
960 status.reason = reason;
961 status.role = role;
962 ofconn_get_master_election_id(ofconn, &status.generation_id);
963
964 buf = ofputil_encode_role_status(&status, ofconn_get_protocol(ofconn));
965 if (buf) {
966 ofconn_send(ofconn, buf, NULL);
967 }
968 }
969
970 /* Changes 'ofconn''s role to 'role'. If 'role' is OFPCR12_ROLE_MASTER then
971 * any existing master is demoted to a slave. */
972 void
973 ofconn_set_role(struct ofconn *ofconn, enum ofp12_controller_role role)
974 {
975 if (role != ofconn->role && role == OFPCR12_ROLE_MASTER) {
976 struct ofconn *other;
977
978 LIST_FOR_EACH (other, node, &ofconn->connmgr->all_conns) {
979 if (other->role == OFPCR12_ROLE_MASTER) {
980 other->role = OFPCR12_ROLE_SLAVE;
981 ofconn_send_role_status(other, OFPCR12_ROLE_SLAVE, OFPCRR_MASTER_REQUEST);
982 }
983 }
984 }
985 ofconn->role = role;
986 }
987
988 void
989 ofconn_set_invalid_ttl_to_controller(struct ofconn *ofconn, bool enable)
990 {
991 struct ofputil_async_cfg ac = ofconn_get_async_config(ofconn);
992 uint32_t bit = 1u << OFPR_INVALID_TTL;
993 if (enable) {
994 ac.master[OAM_PACKET_IN] |= bit;
995 } else {
996 ac.master[OAM_PACKET_IN] &= ~bit;
997 }
998 ofconn_set_async_config(ofconn, &ac);
999 }
1000
1001 bool
1002 ofconn_get_invalid_ttl_to_controller(struct ofconn *ofconn)
1003 {
1004 struct ofputil_async_cfg ac = ofconn_get_async_config(ofconn);
1005 uint32_t bit = 1u << OFPR_INVALID_TTL;
1006 return (ac.master[OAM_PACKET_IN] & bit) != 0;
1007 }
1008
1009 /* Returns the currently configured protocol for 'ofconn', one of OFPUTIL_P_*.
1010 *
1011 * Returns OFPUTIL_P_NONE, which is not a valid protocol, if 'ofconn' hasn't
1012 * completed version negotiation. This can't happen if at least one OpenFlow
1013 * message, other than OFPT_HELLO, has been received on the connection (such as
1014 * in ofproto.c's message handling code), since version negotiation is a
1015 * prerequisite for starting to receive messages. This means that
1016 * OFPUTIL_P_NONE is a special case that most callers need not worry about. */
1017 enum ofputil_protocol
1018 ofconn_get_protocol(const struct ofconn *ofconn)
1019 {
1020 if (ofconn->protocol == OFPUTIL_P_NONE &&
1021 rconn_is_connected(ofconn->rconn)) {
1022 int version = rconn_get_version(ofconn->rconn);
1023 if (version > 0) {
1024 ofconn_set_protocol(CONST_CAST(struct ofconn *, ofconn),
1025 ofputil_protocol_from_ofp_version(version));
1026 }
1027 }
1028
1029 return ofconn->protocol;
1030 }
1031
1032 /* Sets the protocol for 'ofconn' to 'protocol' (one of OFPUTIL_P_*).
1033 *
1034 * (This doesn't actually send anything to accomplish this. Presumably the
1035 * caller already did that.) */
1036 void
1037 ofconn_set_protocol(struct ofconn *ofconn, enum ofputil_protocol protocol)
1038 {
1039 ofconn->protocol = protocol;
1040 update_want_packet_in_on_miss(ofconn);
1041 }
1042
1043 /* Returns the currently configured packet in format for 'ofconn', one of
1044 * NXPIF_*.
1045 *
1046 * The default, if no other format has been set, is NXPIF_STANDARD. */
1047 enum nx_packet_in_format
1048 ofconn_get_packet_in_format(struct ofconn *ofconn)
1049 {
1050 return ofconn->packet_in_format;
1051 }
1052
1053 /* Sets the packet in format for 'ofconn' to 'packet_in_format' (one of
1054 * NXPIF_*). */
1055 void
1056 ofconn_set_packet_in_format(struct ofconn *ofconn,
1057 enum nx_packet_in_format packet_in_format)
1058 {
1059 ofconn->packet_in_format = packet_in_format;
1060 }
1061
1062 /* Sets the controller connection ID for 'ofconn' to 'controller_id'.
1063 *
1064 * The connection controller ID is used for OFPP_CONTROLLER and
1065 * NXAST_CONTROLLER actions. See "struct nx_action_controller" for details. */
1066 void
1067 ofconn_set_controller_id(struct ofconn *ofconn, uint16_t controller_id)
1068 {
1069 ofconn->controller_id = controller_id;
1070 update_want_packet_in_on_miss(ofconn);
1071 }
1072
1073 /* Returns the default miss send length for 'ofconn'. */
1074 int
1075 ofconn_get_miss_send_len(const struct ofconn *ofconn)
1076 {
1077 return ofconn->miss_send_len;
1078 }
1079
1080 /* Sets the default miss send length for 'ofconn' to 'miss_send_len'. */
1081 void
1082 ofconn_set_miss_send_len(struct ofconn *ofconn, int miss_send_len)
1083 {
1084 ofconn->miss_send_len = miss_send_len;
1085 }
1086
1087 void
1088 ofconn_set_async_config(struct ofconn *ofconn,
1089 const struct ofputil_async_cfg *ac)
1090 {
1091 if (!ofconn->async_cfg) {
1092 ofconn->async_cfg = xmalloc(sizeof *ofconn->async_cfg);
1093 }
1094 *ofconn->async_cfg = *ac;
1095 }
1096
1097 struct ofputil_async_cfg
1098 ofconn_get_async_config(const struct ofconn *ofconn)
1099 {
1100 if (ofconn->async_cfg) {
1101 return *ofconn->async_cfg;
1102 }
1103
1104 int version = rconn_get_version(ofconn->rconn);
1105 return (version < 0 || !ofconn->enable_async_msgs
1106 ? OFPUTIL_ASYNC_CFG_INIT
1107 : ofputil_async_cfg_default(version));
1108 }
1109
1110 /* Sends 'msg' on 'ofconn', accounting it as a reply. (If there is a
1111 * sufficient number of OpenFlow replies in-flight on a single ofconn, then the
1112 * connmgr will stop accepting new OpenFlow requests on that ofconn until the
1113 * controller has accepted some of the replies.) */
1114 void
1115 ofconn_send_reply(const struct ofconn *ofconn, struct ofpbuf *msg)
1116 {
1117 ofconn_send(ofconn, msg, ofconn->reply_counter);
1118 }
1119
1120 /* Sends each of the messages in list 'replies' on 'ofconn' in order,
1121 * accounting them as replies. */
1122 void
1123 ofconn_send_replies(const struct ofconn *ofconn, struct ovs_list *replies)
1124 {
1125 struct ofpbuf *reply;
1126
1127 LIST_FOR_EACH_POP (reply, list_node, replies) {
1128 ofconn_send_reply(ofconn, reply);
1129 }
1130 }
1131
1132 /* Sends 'error' on 'ofconn', as a reply to 'request'. Only at most the
1133 * first 64 bytes of 'request' are used. */
1134 void
1135 ofconn_send_error(const struct ofconn *ofconn,
1136 const struct ofp_header *request, enum ofperr error)
1137 {
1138 static struct vlog_rate_limit err_rl = VLOG_RATE_LIMIT_INIT(10, 10);
1139 struct ofpbuf *reply;
1140
1141 reply = ofperr_encode_reply(error, request);
1142 if (!VLOG_DROP_INFO(&err_rl)) {
1143 const char *type_name;
1144 size_t request_len;
1145 enum ofpraw raw;
1146
1147 request_len = ntohs(request->length);
1148 type_name = (!ofpraw_decode_partial(&raw, request,
1149 MIN(64, request_len))
1150 ? ofpraw_get_name(raw)
1151 : "invalid");
1152
1153 VLOG_INFO("%s: sending %s error reply to %s message",
1154 rconn_get_name(ofconn->rconn), ofperr_to_string(error),
1155 type_name);
1156 }
1157 ofconn_send_reply(ofconn, reply);
1158 }
1159
1160 /* Reports that a flow_mod operation of the type specified by 'command' was
1161 * successfully executed by 'ofconn', so that the connmgr can log it. */
1162 void
1163 ofconn_report_flow_mod(struct ofconn *ofconn,
1164 enum ofp_flow_mod_command command)
1165 {
1166 long long int now;
1167
1168 switch (command) {
1169 case OFPFC_ADD:
1170 ofconn->n_add++;
1171 break;
1172
1173 case OFPFC_MODIFY:
1174 case OFPFC_MODIFY_STRICT:
1175 ofconn->n_modify++;
1176 break;
1177
1178 case OFPFC_DELETE:
1179 case OFPFC_DELETE_STRICT:
1180 ofconn->n_delete++;
1181 break;
1182 }
1183
1184 now = time_msec();
1185 if (ofconn->next_op_report == LLONG_MAX) {
1186 ofconn->first_op = now;
1187 ofconn->next_op_report = MAX(now + 10 * 1000, ofconn->op_backoff);
1188 ofconn->op_backoff = ofconn->next_op_report + 60 * 1000;
1189 }
1190 ofconn->last_op = now;
1191 }
1192 \f
1193 /* OpenFlow 1.4 bundles. */
1194
1195 static inline uint32_t
1196 bundle_hash(uint32_t id)
1197 {
1198 return hash_int(id, 0);
1199 }
1200
1201 struct ofp_bundle *
1202 ofconn_get_bundle(struct ofconn *ofconn, uint32_t id)
1203 {
1204 struct ofp_bundle *bundle;
1205
1206 HMAP_FOR_EACH_IN_BUCKET(bundle, node, bundle_hash(id), &ofconn->bundles) {
1207 if (bundle->id == id) {
1208 return bundle;
1209 }
1210 }
1211
1212 return NULL;
1213 }
1214
1215 enum ofperr
1216 ofconn_insert_bundle(struct ofconn *ofconn, struct ofp_bundle *bundle)
1217 {
1218 hmap_insert(&ofconn->bundles, &bundle->node, bundle_hash(bundle->id));
1219
1220 return 0;
1221 }
1222
1223 enum ofperr
1224 ofconn_remove_bundle(struct ofconn *ofconn, struct ofp_bundle *bundle)
1225 {
1226 hmap_remove(&ofconn->bundles, &bundle->node);
1227
1228 return 0;
1229 }
1230
1231 static void
1232 bundle_remove_all(struct ofconn *ofconn)
1233 {
1234 struct ofp_bundle *b, *next;
1235
1236 HMAP_FOR_EACH_SAFE (b, next, node, &ofconn->bundles) {
1237 ofp_bundle_remove__(ofconn, b, false);
1238 }
1239 }
1240
1241 static void
1242 bundle_remove_expired(struct ofconn *ofconn, long long int now)
1243 {
1244 struct ofp_bundle *b, *next;
1245 long long int limit = now - BUNDLE_IDLE_TIMEOUT;
1246
1247 HMAP_FOR_EACH_SAFE (b, next, node, &ofconn->bundles) {
1248 if (b->used <= limit) {
1249 ofconn_send_error(ofconn, &b->ofp_msg, OFPERR_OFPBFC_TIMEOUT);
1250 ofp_bundle_remove__(ofconn, b, false);
1251 }
1252 }
1253 }
1254 \f
1255 /* Private ofconn functions. */
1256
1257 static const char *
1258 ofconn_get_target(const struct ofconn *ofconn)
1259 {
1260 return rconn_get_target(ofconn->rconn);
1261 }
1262
1263 static struct ofconn *
1264 ofconn_create(struct connmgr *mgr, struct rconn *rconn, enum ofconn_type type,
1265 bool enable_async_msgs)
1266 OVS_REQUIRES(ofproto_mutex)
1267 {
1268 struct ofconn *ofconn;
1269
1270 ofconn = xzalloc(sizeof *ofconn);
1271 ofconn->connmgr = mgr;
1272 ovs_list_push_back(&mgr->all_conns, &ofconn->node);
1273 ofconn->rconn = rconn;
1274 ofconn->type = type;
1275 ofconn->enable_async_msgs = enable_async_msgs;
1276
1277 hmap_init(&ofconn->monitors);
1278 ovs_list_init(&ofconn->updates);
1279
1280 hmap_init(&ofconn->bundles);
1281 ofconn->next_bundle_expiry_check = time_msec() + BUNDLE_EXPIRY_INTERVAL;
1282
1283 ofconn_flush(ofconn);
1284
1285 return ofconn;
1286 }
1287
1288 /* Clears all of the state in 'ofconn' that should not persist from one
1289 * connection to the next. */
1290 static void
1291 ofconn_flush(struct ofconn *ofconn)
1292 OVS_REQUIRES(ofproto_mutex)
1293 {
1294 struct ofmonitor *monitor, *next_monitor;
1295 int i;
1296
1297 ofconn_log_flow_mods(ofconn);
1298
1299 ofconn->role = OFPCR12_ROLE_EQUAL;
1300 ofconn_set_protocol(ofconn, OFPUTIL_P_NONE);
1301 ofconn->packet_in_format = NXPIF_STANDARD;
1302
1303 rconn_packet_counter_destroy(ofconn->packet_in_counter);
1304 ofconn->packet_in_counter = rconn_packet_counter_create();
1305 for (i = 0; i < N_SCHEDULERS; i++) {
1306 if (ofconn->schedulers[i]) {
1307 int rate, burst;
1308
1309 pinsched_get_limits(ofconn->schedulers[i], &rate, &burst);
1310 pinsched_destroy(ofconn->schedulers[i]);
1311 ofconn->schedulers[i] = pinsched_create(rate, burst);
1312 }
1313 }
1314 ofconn->miss_send_len = (ofconn->type == OFCONN_PRIMARY
1315 ? OFP_DEFAULT_MISS_SEND_LEN
1316 : 0);
1317 ofconn->controller_id = 0;
1318
1319 rconn_packet_counter_destroy(ofconn->reply_counter);
1320 ofconn->reply_counter = rconn_packet_counter_create();
1321
1322 free(ofconn->async_cfg);
1323 ofconn->async_cfg = NULL;
1324
1325 ofconn->n_add = ofconn->n_delete = ofconn->n_modify = 0;
1326 ofconn->first_op = ofconn->last_op = LLONG_MIN;
1327 ofconn->next_op_report = LLONG_MAX;
1328 ofconn->op_backoff = LLONG_MIN;
1329
1330 HMAP_FOR_EACH_SAFE (monitor, next_monitor, ofconn_node,
1331 &ofconn->monitors) {
1332 ofmonitor_destroy(monitor);
1333 }
1334 rconn_packet_counter_destroy(ofconn->monitor_counter);
1335 ofconn->monitor_counter = rconn_packet_counter_create();
1336 ofpbuf_list_delete(&ofconn->updates); /* ...but it should be empty. */
1337 }
1338
1339 static void
1340 ofconn_destroy(struct ofconn *ofconn)
1341 OVS_REQUIRES(ofproto_mutex)
1342 {
1343 ofconn_flush(ofconn);
1344
1345 /* Force clearing of want_packet_in_on_miss to keep the global count
1346 * accurate. */
1347 ofconn->controller_id = 1;
1348 update_want_packet_in_on_miss(ofconn);
1349
1350 if (ofconn->type == OFCONN_PRIMARY) {
1351 hmap_remove(&ofconn->connmgr->controllers, &ofconn->hmap_node);
1352 }
1353
1354 bundle_remove_all(ofconn);
1355 hmap_destroy(&ofconn->bundles);
1356
1357 hmap_destroy(&ofconn->monitors);
1358 ovs_list_remove(&ofconn->node);
1359 rconn_destroy(ofconn->rconn);
1360 rconn_packet_counter_destroy(ofconn->packet_in_counter);
1361 rconn_packet_counter_destroy(ofconn->reply_counter);
1362 rconn_packet_counter_destroy(ofconn->monitor_counter);
1363 free(ofconn);
1364 }
1365
1366 /* Reconfigures 'ofconn' to match 'c'. 'ofconn' and 'c' must have the same
1367 * target. */
1368 static void
1369 ofconn_reconfigure(struct ofconn *ofconn, const struct ofproto_controller *c)
1370 {
1371 int probe_interval;
1372
1373 ofconn->band = c->band;
1374 ofconn->enable_async_msgs = c->enable_async_msgs;
1375
1376 rconn_set_max_backoff(ofconn->rconn, c->max_backoff);
1377
1378 probe_interval = c->probe_interval ? MAX(c->probe_interval, 5) : 0;
1379 rconn_set_probe_interval(ofconn->rconn, probe_interval);
1380
1381 ofconn_set_rate_limit(ofconn, c->rate_limit, c->burst_limit);
1382
1383 /* If dscp value changed reconnect. */
1384 if (c->dscp != rconn_get_dscp(ofconn->rconn)) {
1385 rconn_set_dscp(ofconn->rconn, c->dscp);
1386 rconn_reconnect(ofconn->rconn);
1387 }
1388 }
1389
1390 /* Returns true if it makes sense for 'ofconn' to receive and process OpenFlow
1391 * messages. */
1392 static bool
1393 ofconn_may_recv(const struct ofconn *ofconn)
1394 {
1395 int count = rconn_packet_counter_n_packets(ofconn->reply_counter);
1396 return count < OFCONN_REPLY_MAX;
1397 }
1398
1399 static void
1400 ofconn_run(struct ofconn *ofconn,
1401 void (*handle_openflow)(struct ofconn *,
1402 const struct ofpbuf *ofp_msg))
1403 {
1404 struct connmgr *mgr = ofconn->connmgr;
1405 size_t i;
1406
1407 for (i = 0; i < N_SCHEDULERS; i++) {
1408 struct ovs_list txq;
1409
1410 pinsched_run(ofconn->schedulers[i], &txq);
1411 do_send_packet_ins(ofconn, &txq);
1412 }
1413
1414 rconn_run(ofconn->rconn);
1415
1416 /* Limit the number of iterations to avoid starving other tasks. */
1417 for (i = 0; i < 50 && ofconn_may_recv(ofconn); i++) {
1418 struct ofpbuf *of_msg = rconn_recv(ofconn->rconn);
1419 if (!of_msg) {
1420 break;
1421 }
1422
1423 if (mgr->fail_open) {
1424 fail_open_maybe_recover(mgr->fail_open);
1425 }
1426
1427 handle_openflow(ofconn, of_msg);
1428 ofpbuf_delete(of_msg);
1429 }
1430
1431 long long int now = time_msec();
1432
1433 if (now >= ofconn->next_bundle_expiry_check) {
1434 ofconn->next_bundle_expiry_check = now + BUNDLE_EXPIRY_INTERVAL;
1435 bundle_remove_expired(ofconn, now);
1436 }
1437
1438 if (now >= ofconn->next_op_report) {
1439 ofconn_log_flow_mods(ofconn);
1440 }
1441
1442 ovs_mutex_lock(&ofproto_mutex);
1443 if (!rconn_is_alive(ofconn->rconn)) {
1444 ofconn_destroy(ofconn);
1445 } else if (!rconn_is_connected(ofconn->rconn)) {
1446 ofconn_flush(ofconn);
1447 }
1448 ovs_mutex_unlock(&ofproto_mutex);
1449 }
1450
1451 static void
1452 ofconn_wait(struct ofconn *ofconn)
1453 {
1454 int i;
1455
1456 for (i = 0; i < N_SCHEDULERS; i++) {
1457 pinsched_wait(ofconn->schedulers[i]);
1458 }
1459 rconn_run_wait(ofconn->rconn);
1460 if (ofconn_may_recv(ofconn)) {
1461 rconn_recv_wait(ofconn->rconn);
1462 }
1463 if (ofconn->next_op_report != LLONG_MAX) {
1464 poll_timer_wait_until(ofconn->next_op_report);
1465 }
1466 }
1467
1468 static void
1469 ofconn_log_flow_mods(struct ofconn *ofconn)
1470 {
1471 int n_flow_mods = ofconn->n_add + ofconn->n_delete + ofconn->n_modify;
1472 if (n_flow_mods) {
1473 long long int ago = (time_msec() - ofconn->first_op) / 1000;
1474 long long int interval = (ofconn->last_op - ofconn->first_op) / 1000;
1475 struct ds s;
1476
1477 ds_init(&s);
1478 ds_put_format(&s, "%d flow_mods ", n_flow_mods);
1479 if (interval == ago) {
1480 ds_put_format(&s, "in the last %lld s", ago);
1481 } else if (interval) {
1482 ds_put_format(&s, "in the %lld s starting %lld s ago",
1483 interval, ago);
1484 } else {
1485 ds_put_format(&s, "%lld s ago", ago);
1486 }
1487
1488 ds_put_cstr(&s, " (");
1489 if (ofconn->n_add) {
1490 ds_put_format(&s, "%d adds, ", ofconn->n_add);
1491 }
1492 if (ofconn->n_delete) {
1493 ds_put_format(&s, "%d deletes, ", ofconn->n_delete);
1494 }
1495 if (ofconn->n_modify) {
1496 ds_put_format(&s, "%d modifications, ", ofconn->n_modify);
1497 }
1498 s.length -= 2;
1499 ds_put_char(&s, ')');
1500
1501 VLOG_INFO("%s: %s", rconn_get_name(ofconn->rconn), ds_cstr(&s));
1502 ds_destroy(&s);
1503
1504 ofconn->n_add = ofconn->n_delete = ofconn->n_modify = 0;
1505 }
1506 ofconn->next_op_report = LLONG_MAX;
1507 }
1508
1509 /* Returns true if 'ofconn' should receive asynchronous messages of the given
1510 * OAM_* 'type' and 'reason', which should be a OFPR_* value for OAM_PACKET_IN,
1511 * a OFPPR_* value for OAM_PORT_STATUS, or an OFPRR_* value for
1512 * OAM_FLOW_REMOVED. Returns false if the message should not be sent on
1513 * 'ofconn'. */
1514 static bool
1515 ofconn_receives_async_msg(const struct ofconn *ofconn,
1516 enum ofputil_async_msg_type type,
1517 unsigned int reason)
1518 {
1519 ovs_assert(reason < 32);
1520 ovs_assert((unsigned int) type < OAM_N_TYPES);
1521
1522 /* Keep the following code in sync with the documentation in the
1523 * "Asynchronous Messages" section in DESIGN. */
1524
1525 if (ofconn->type == OFCONN_SERVICE && !ofconn->miss_send_len) {
1526 /* Service connections don't get asynchronous messages unless they have
1527 * explicitly asked for them by setting a nonzero miss send length. */
1528 return false;
1529 }
1530
1531 struct ofputil_async_cfg ac = ofconn_get_async_config(ofconn);
1532 uint32_t *masks = (ofconn->role == OFPCR12_ROLE_SLAVE
1533 ? ac.slave
1534 : ac.master);
1535 return (masks[type] & (1u << reason)) != 0;
1536 }
1537
1538 /* This function returns true to indicate that a packet_in message
1539 * for a "table-miss" should be sent to at least one controller.
1540 *
1541 * False otherwise. */
1542 bool
1543 connmgr_wants_packet_in_on_miss(struct connmgr *mgr)
1544 {
1545 int count;
1546
1547 atomic_read_relaxed(&mgr->want_packet_in_on_miss, &count);
1548 return count > 0;
1549 }
1550
1551 /* Returns a human-readable name for an OpenFlow connection between 'mgr' and
1552 * 'target', suitable for use in log messages for identifying the connection.
1553 *
1554 * The name is dynamically allocated. The caller should free it (with free())
1555 * when it is no longer needed. */
1556 static char *
1557 ofconn_make_name(const struct connmgr *mgr, const char *target)
1558 {
1559 return xasprintf("%s<->%s", mgr->name, target);
1560 }
1561
1562 static void
1563 ofconn_set_rate_limit(struct ofconn *ofconn, int rate, int burst)
1564 {
1565 int i;
1566
1567 for (i = 0; i < N_SCHEDULERS; i++) {
1568 struct pinsched **s = &ofconn->schedulers[i];
1569
1570 if (rate > 0) {
1571 if (!*s) {
1572 *s = pinsched_create(rate, burst);
1573 } else {
1574 pinsched_set_limits(*s, rate, burst);
1575 }
1576 } else {
1577 pinsched_destroy(*s);
1578 *s = NULL;
1579 }
1580 }
1581 }
1582
1583 static void
1584 ofconn_send(const struct ofconn *ofconn, struct ofpbuf *msg,
1585 struct rconn_packet_counter *counter)
1586 {
1587 ofpmsg_update_length(msg);
1588 rconn_send(ofconn->rconn, msg, counter);
1589 }
1590 \f
1591 /* Sending asynchronous messages. */
1592
1593 /* Sends an OFPT_PORT_STATUS message with 'opp' and 'reason' to appropriate
1594 * controllers managed by 'mgr'. For messages caused by a controller
1595 * OFPT_PORT_MOD, specify 'source' as the controller connection that sent the
1596 * request; otherwise, specify 'source' as NULL. */
1597 void
1598 connmgr_send_port_status(struct connmgr *mgr, struct ofconn *source,
1599 const struct ofputil_phy_port *pp, uint8_t reason)
1600 {
1601 /* XXX Should limit the number of queued port status change messages. */
1602 struct ofputil_port_status ps;
1603 struct ofconn *ofconn;
1604
1605 ps.reason = reason;
1606 ps.desc = *pp;
1607 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
1608 if (ofconn_receives_async_msg(ofconn, OAM_PORT_STATUS, reason)) {
1609 struct ofpbuf *msg;
1610
1611 /* Before 1.5, OpenFlow specified that OFPT_PORT_MOD should not
1612 * generate OFPT_PORT_STATUS messages. That requirement was a
1613 * relic of how OpenFlow originally supported a single controller,
1614 * so that one could expect the controller to already know the
1615 * changes it had made.
1616 *
1617 * EXT-338 changes OpenFlow 1.5 OFPT_PORT_MOD to send
1618 * OFPT_PORT_STATUS messages to every controller. This is
1619 * obviously more useful in the multi-controller case. We could
1620 * always implement it that way in OVS, but that would risk
1621 * confusing controllers that are intended for single-controller
1622 * use only. (Imagine a controller that generates an OFPT_PORT_MOD
1623 * in response to any OFPT_PORT_STATUS!)
1624 *
1625 * So this compromises: for OpenFlow 1.4 and earlier, it generates
1626 * OFPT_PORT_STATUS for OFPT_PORT_MOD, but not back to the
1627 * originating controller. In a single-controller environment, in
1628 * particular, this means that it will never generate
1629 * OFPT_PORT_STATUS for OFPT_PORT_MOD at all. */
1630 if (ofconn == source
1631 && rconn_get_version(ofconn->rconn) < OFP15_VERSION) {
1632 continue;
1633 }
1634
1635 msg = ofputil_encode_port_status(&ps, ofconn_get_protocol(ofconn));
1636 ofconn_send(ofconn, msg, NULL);
1637 }
1638 }
1639 }
1640
1641 /* Sends an OFPT_REQUESTFORWARD message with 'request' and 'reason' to
1642 * appropriate controllers managed by 'mgr'. For messages caused by a
1643 * controller OFPT_GROUP_MOD and OFPT_METER_MOD, specify 'source' as the
1644 * controller connection that sent the request; otherwise, specify 'source'
1645 * as NULL. */
1646 void
1647 connmgr_send_requestforward(struct connmgr *mgr, const struct ofconn *source,
1648 const struct ofputil_requestforward *rf)
1649 {
1650 struct ofconn *ofconn;
1651
1652 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
1653 if (ofconn_receives_async_msg(ofconn, OAM_REQUESTFORWARD, rf->reason)
1654 && rconn_get_version(ofconn->rconn) >= OFP14_VERSION
1655 && ofconn != source) {
1656 enum ofputil_protocol protocol = ofconn_get_protocol(ofconn);
1657 ofconn_send(ofconn, ofputil_encode_requestforward(rf, protocol),
1658 NULL);
1659 }
1660 }
1661 }
1662
1663 /* Sends an OFPT_FLOW_REMOVED or NXT_FLOW_REMOVED message based on 'fr' to
1664 * appropriate controllers managed by 'mgr'.
1665 *
1666 * This may be called from the RCU thread. */
1667 void
1668 connmgr_send_flow_removed(struct connmgr *mgr,
1669 const struct ofputil_flow_removed *fr)
1670 OVS_REQUIRES(ofproto_mutex)
1671 {
1672 struct ofconn *ofconn;
1673
1674 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
1675 if (ofconn_receives_async_msg(ofconn, OAM_FLOW_REMOVED, fr->reason)) {
1676 struct ofpbuf *msg;
1677
1678 /* Account flow expirations as replies to OpenFlow requests. That
1679 * works because preventing OpenFlow requests from being processed
1680 * also prevents new flows from being added (and expiring). (It
1681 * also prevents processing OpenFlow requests that would not add
1682 * new flows, so it is imperfect.) */
1683 msg = ofputil_encode_flow_removed(fr, ofconn_get_protocol(ofconn));
1684 ofconn_send_reply(ofconn, msg);
1685 }
1686 }
1687 }
1688
1689 /* Sends an OFPT_TABLE_STATUS message with 'reason' to appropriate controllers
1690 * managed by 'mgr'. When the table state changes, the controller needs to be
1691 * informed with the OFPT_TABLE_STATUS message. The reason values
1692 * OFPTR_VACANCY_DOWN and OFPTR_VACANCY_UP identify a vacancy message. The
1693 * vacancy events are generated when the remaining space in the flow table
1694 * changes and crosses one of the vacancy thereshold specified by
1695 * OFPT_TABLE_MOD. */
1696 void
1697 connmgr_send_table_status(struct connmgr *mgr,
1698 const struct ofputil_table_desc *td,
1699 uint8_t reason)
1700 {
1701 struct ofputil_table_status ts;
1702 struct ofconn *ofconn;
1703
1704 ts.reason = reason;
1705 ts.desc = *td;
1706
1707 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
1708 if (ofconn_receives_async_msg(ofconn, OAM_TABLE_STATUS, reason)) {
1709 struct ofpbuf *msg;
1710
1711 msg = ofputil_encode_table_status(&ts,
1712 ofconn_get_protocol(ofconn));
1713 if (msg) {
1714 ofconn_send(ofconn, msg, NULL);
1715 }
1716 }
1717 }
1718 }
1719
1720 /* Given 'pin', sends an OFPT_PACKET_IN message to each OpenFlow controller as
1721 * necessary according to their individual configurations. */
1722 void
1723 connmgr_send_async_msg(struct connmgr *mgr,
1724 const struct ofproto_async_msg *am)
1725 {
1726 struct ofconn *ofconn;
1727
1728 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
1729 enum ofputil_protocol protocol = ofconn_get_protocol(ofconn);
1730 if (protocol == OFPUTIL_P_NONE || !rconn_is_connected(ofconn->rconn)
1731 || ofconn->controller_id != am->controller_id
1732 || !ofconn_receives_async_msg(ofconn, am->oam,
1733 am->pin.up.public.reason)) {
1734 continue;
1735 }
1736
1737 struct ofpbuf *msg = ofputil_encode_packet_in_private(
1738 &am->pin.up, protocol, ofconn->packet_in_format);
1739
1740 struct ovs_list txq;
1741 bool is_miss = (am->pin.up.public.reason == OFPR_NO_MATCH ||
1742 am->pin.up.public.reason == OFPR_EXPLICIT_MISS ||
1743 am->pin.up.public.reason == OFPR_IMPLICIT_MISS);
1744 pinsched_send(ofconn->schedulers[is_miss],
1745 am->pin.up.public.flow_metadata.flow.in_port.ofp_port,
1746 msg, &txq);
1747 do_send_packet_ins(ofconn, &txq);
1748 }
1749 }
1750
1751 static void
1752 do_send_packet_ins(struct ofconn *ofconn, struct ovs_list *txq)
1753 {
1754 struct ofpbuf *pin;
1755
1756 LIST_FOR_EACH_POP (pin, list_node, txq) {
1757 if (rconn_send_with_limit(ofconn->rconn, pin,
1758 ofconn->packet_in_counter, 100) == EAGAIN) {
1759 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
1760
1761 VLOG_INFO_RL(&rl, "%s: dropping packet-in due to queue overflow",
1762 rconn_get_name(ofconn->rconn));
1763 }
1764 }
1765 }
1766 \f
1767 /* Fail-open settings. */
1768
1769 /* Returns the failure handling mode (OFPROTO_FAIL_SECURE or
1770 * OFPROTO_FAIL_STANDALONE) for 'mgr'. */
1771 enum ofproto_fail_mode
1772 connmgr_get_fail_mode(const struct connmgr *mgr)
1773 {
1774 return mgr->fail_mode;
1775 }
1776
1777 /* Sets the failure handling mode for 'mgr' to 'fail_mode' (either
1778 * OFPROTO_FAIL_SECURE or OFPROTO_FAIL_STANDALONE). */
1779 void
1780 connmgr_set_fail_mode(struct connmgr *mgr, enum ofproto_fail_mode fail_mode)
1781 {
1782 if (mgr->fail_mode != fail_mode) {
1783 mgr->fail_mode = fail_mode;
1784 update_fail_open(mgr);
1785 if (!connmgr_has_controllers(mgr)) {
1786 ofproto_flush_flows(mgr->ofproto);
1787 }
1788 }
1789 }
1790 \f
1791 /* Fail-open implementation. */
1792
1793 /* Returns the longest probe interval among the primary controllers configured
1794 * on 'mgr'. Returns 0 if there are no primary controllers. */
1795 int
1796 connmgr_get_max_probe_interval(const struct connmgr *mgr)
1797 {
1798 const struct ofconn *ofconn;
1799 int max_probe_interval;
1800
1801 max_probe_interval = 0;
1802 HMAP_FOR_EACH (ofconn, hmap_node, &mgr->controllers) {
1803 int probe_interval = rconn_get_probe_interval(ofconn->rconn);
1804 max_probe_interval = MAX(max_probe_interval, probe_interval);
1805 }
1806 return max_probe_interval;
1807 }
1808
1809 /* Returns the number of seconds for which all of 'mgr's primary controllers
1810 * have been disconnected. Returns 0 if 'mgr' has no primary controllers. */
1811 int
1812 connmgr_failure_duration(const struct connmgr *mgr)
1813 {
1814 const struct ofconn *ofconn;
1815 int min_failure_duration;
1816
1817 if (!connmgr_has_controllers(mgr)) {
1818 return 0;
1819 }
1820
1821 min_failure_duration = INT_MAX;
1822 HMAP_FOR_EACH (ofconn, hmap_node, &mgr->controllers) {
1823 int failure_duration = rconn_failure_duration(ofconn->rconn);
1824 min_failure_duration = MIN(min_failure_duration, failure_duration);
1825 }
1826 return min_failure_duration;
1827 }
1828
1829 /* Returns true if at least one primary controller is connected (regardless of
1830 * whether those controllers are believed to have authenticated and accepted
1831 * this switch), false if none of them are connected. */
1832 bool
1833 connmgr_is_any_controller_connected(const struct connmgr *mgr)
1834 {
1835 const struct ofconn *ofconn;
1836
1837 HMAP_FOR_EACH (ofconn, hmap_node, &mgr->controllers) {
1838 if (rconn_is_connected(ofconn->rconn)) {
1839 return true;
1840 }
1841 }
1842 return false;
1843 }
1844
1845 /* Returns true if at least one primary controller is believed to have
1846 * authenticated and accepted this switch, false otherwise. */
1847 bool
1848 connmgr_is_any_controller_admitted(const struct connmgr *mgr)
1849 {
1850 const struct ofconn *ofconn;
1851
1852 HMAP_FOR_EACH (ofconn, hmap_node, &mgr->controllers) {
1853 if (rconn_is_admitted(ofconn->rconn)) {
1854 return true;
1855 }
1856 }
1857 return false;
1858 }
1859 \f
1860 /* In-band configuration. */
1861
1862 static bool any_extras_changed(const struct connmgr *,
1863 const struct sockaddr_in *extras, size_t n);
1864
1865 /* Sets the 'n' TCP port addresses in 'extras' as ones to which 'mgr''s
1866 * in-band control should guarantee access, in the same way that in-band
1867 * control guarantees access to OpenFlow controllers. */
1868 void
1869 connmgr_set_extra_in_band_remotes(struct connmgr *mgr,
1870 const struct sockaddr_in *extras, size_t n)
1871 {
1872 if (!any_extras_changed(mgr, extras, n)) {
1873 return;
1874 }
1875
1876 free(mgr->extra_in_band_remotes);
1877 mgr->n_extra_remotes = n;
1878 mgr->extra_in_band_remotes = xmemdup(extras, n * sizeof *extras);
1879
1880 update_in_band_remotes(mgr);
1881 }
1882
1883 /* Sets the OpenFlow queue used by flows set up by in-band control on
1884 * 'mgr' to 'queue_id'. If 'queue_id' is negative, then in-band control
1885 * flows will use the default queue. */
1886 void
1887 connmgr_set_in_band_queue(struct connmgr *mgr, int queue_id)
1888 {
1889 if (queue_id != mgr->in_band_queue) {
1890 mgr->in_band_queue = queue_id;
1891 update_in_band_remotes(mgr);
1892 }
1893 }
1894
1895 static bool
1896 any_extras_changed(const struct connmgr *mgr,
1897 const struct sockaddr_in *extras, size_t n)
1898 {
1899 size_t i;
1900
1901 if (n != mgr->n_extra_remotes) {
1902 return true;
1903 }
1904
1905 for (i = 0; i < n; i++) {
1906 const struct sockaddr_in *old = &mgr->extra_in_band_remotes[i];
1907 const struct sockaddr_in *new = &extras[i];
1908
1909 if (old->sin_addr.s_addr != new->sin_addr.s_addr ||
1910 old->sin_port != new->sin_port) {
1911 return true;
1912 }
1913 }
1914
1915 return false;
1916 }
1917 \f
1918 /* In-band implementation. */
1919
1920 bool
1921 connmgr_has_in_band(struct connmgr *mgr)
1922 {
1923 return mgr->in_band != NULL;
1924 }
1925 \f
1926 /* Fail-open and in-band implementation. */
1927
1928 /* Called by 'ofproto' after all flows have been flushed, to allow fail-open
1929 * and standalone mode to re-create their flows.
1930 *
1931 * In-band control has more sophisticated code that manages flows itself. */
1932 void
1933 connmgr_flushed(struct connmgr *mgr)
1934 OVS_EXCLUDED(ofproto_mutex)
1935 {
1936 if (mgr->fail_open) {
1937 fail_open_flushed(mgr->fail_open);
1938 }
1939
1940 /* If there are no controllers and we're in standalone mode, set up a flow
1941 * that matches every packet and directs them to OFPP_NORMAL (which goes to
1942 * us). Otherwise, the switch is in secure mode and we won't pass any
1943 * traffic until a controller has been defined and it tells us to do so. */
1944 if (!connmgr_has_controllers(mgr)
1945 && mgr->fail_mode == OFPROTO_FAIL_STANDALONE) {
1946 struct ofpbuf ofpacts;
1947 struct match match;
1948
1949 ofpbuf_init(&ofpacts, OFPACT_OUTPUT_SIZE);
1950 ofpact_put_OUTPUT(&ofpacts)->port = OFPP_NORMAL;
1951
1952 match_init_catchall(&match);
1953 ofproto_add_flow(mgr->ofproto, &match, 0, ofpacts.data,
1954 ofpacts.size);
1955
1956 ofpbuf_uninit(&ofpacts);
1957 }
1958 }
1959
1960 /* Returns the number of hidden rules created by the in-band and fail-open
1961 * implementations in table 0. (Subtracting this count from the number of
1962 * rules in the table 0 classifier, as maintained in struct oftable, yields
1963 * the number of flows that OVS should report via OpenFlow for table 0.) */
1964 int
1965 connmgr_count_hidden_rules(const struct connmgr *mgr)
1966 {
1967 int n_hidden = 0;
1968 if (mgr->in_band) {
1969 n_hidden += in_band_count_rules(mgr->in_band);
1970 }
1971 if (mgr->fail_open) {
1972 n_hidden += fail_open_count_rules(mgr->fail_open);
1973 }
1974 return n_hidden;
1975 }
1976 \f
1977 /* Creates a new ofservice for 'target' in 'mgr'. Returns 0 if successful,
1978 * otherwise a positive errno value.
1979 *
1980 * ofservice_reconfigure() must be called to fully configure the new
1981 * ofservice. */
1982 static int
1983 ofservice_create(struct connmgr *mgr, const char *target,
1984 uint32_t allowed_versions, uint8_t dscp)
1985 {
1986 struct ofservice *ofservice;
1987 struct pvconn *pvconn;
1988 int error;
1989
1990 error = pvconn_open(target, allowed_versions, dscp, &pvconn);
1991 if (error) {
1992 return error;
1993 }
1994
1995 ofservice = xzalloc(sizeof *ofservice);
1996 hmap_insert(&mgr->services, &ofservice->node, hash_string(target, 0));
1997 ofservice->pvconn = pvconn;
1998 ofservice->allowed_versions = allowed_versions;
1999
2000 return 0;
2001 }
2002
2003 static void
2004 ofservice_destroy(struct connmgr *mgr, struct ofservice *ofservice)
2005 {
2006 hmap_remove(&mgr->services, &ofservice->node);
2007 pvconn_close(ofservice->pvconn);
2008 free(ofservice);
2009 }
2010
2011 static void
2012 ofservice_reconfigure(struct ofservice *ofservice,
2013 const struct ofproto_controller *c)
2014 {
2015 ofservice->probe_interval = c->probe_interval;
2016 ofservice->rate_limit = c->rate_limit;
2017 ofservice->burst_limit = c->burst_limit;
2018 ofservice->enable_async_msgs = c->enable_async_msgs;
2019 ofservice->dscp = c->dscp;
2020 }
2021
2022 /* Finds and returns the ofservice within 'mgr' that has the given
2023 * 'target', or a null pointer if none exists. */
2024 static struct ofservice *
2025 ofservice_lookup(struct connmgr *mgr, const char *target)
2026 {
2027 struct ofservice *ofservice;
2028
2029 HMAP_FOR_EACH_WITH_HASH (ofservice, node, hash_string(target, 0),
2030 &mgr->services) {
2031 if (!strcmp(pvconn_get_name(ofservice->pvconn), target)) {
2032 return ofservice;
2033 }
2034 }
2035 return NULL;
2036 }
2037 \f
2038 /* Flow monitors (NXST_FLOW_MONITOR). */
2039
2040 /* A counter incremented when something significant happens to an OpenFlow
2041 * rule.
2042 *
2043 * - When a rule is added, its 'add_seqno' and 'modify_seqno' are set to
2044 * the current value (which is then incremented).
2045 *
2046 * - When a rule is modified, its 'modify_seqno' is set to the current
2047 * value (which is then incremented).
2048 *
2049 * Thus, by comparing an old value of monitor_seqno against a rule's
2050 * 'add_seqno', one can tell whether the rule was added before or after the old
2051 * value was read, and similarly for 'modify_seqno'.
2052 *
2053 * 32 bits should normally be sufficient (and would be nice, to save space in
2054 * each rule) but then we'd have to have some special cases for wraparound.
2055 *
2056 * We initialize monitor_seqno to 1 to allow 0 to be used as an invalid
2057 * value. */
2058 static uint64_t monitor_seqno = 1;
2059
2060 COVERAGE_DEFINE(ofmonitor_pause);
2061 COVERAGE_DEFINE(ofmonitor_resume);
2062
2063 enum ofperr
2064 ofmonitor_create(const struct ofputil_flow_monitor_request *request,
2065 struct ofconn *ofconn, struct ofmonitor **monitorp)
2066 OVS_REQUIRES(ofproto_mutex)
2067 {
2068 struct ofmonitor *m;
2069
2070 *monitorp = NULL;
2071
2072 m = ofmonitor_lookup(ofconn, request->id);
2073 if (m) {
2074 return OFPERR_OFPMOFC_MONITOR_EXISTS;
2075 }
2076
2077 m = xmalloc(sizeof *m);
2078 m->ofconn = ofconn;
2079 hmap_insert(&ofconn->monitors, &m->ofconn_node, hash_int(request->id, 0));
2080 m->id = request->id;
2081 m->flags = request->flags;
2082 m->out_port = request->out_port;
2083 m->table_id = request->table_id;
2084 minimatch_init(&m->match, &request->match);
2085
2086 *monitorp = m;
2087 return 0;
2088 }
2089
2090 struct ofmonitor *
2091 ofmonitor_lookup(struct ofconn *ofconn, uint32_t id)
2092 OVS_REQUIRES(ofproto_mutex)
2093 {
2094 struct ofmonitor *m;
2095
2096 HMAP_FOR_EACH_IN_BUCKET (m, ofconn_node, hash_int(id, 0),
2097 &ofconn->monitors) {
2098 if (m->id == id) {
2099 return m;
2100 }
2101 }
2102 return NULL;
2103 }
2104
2105 void
2106 ofmonitor_destroy(struct ofmonitor *m)
2107 OVS_REQUIRES(ofproto_mutex)
2108 {
2109 if (m) {
2110 minimatch_destroy(&m->match);
2111 hmap_remove(&m->ofconn->monitors, &m->ofconn_node);
2112 free(m);
2113 }
2114 }
2115
2116 void
2117 ofmonitor_report(struct connmgr *mgr, struct rule *rule,
2118 enum nx_flow_update_event event,
2119 enum ofp_flow_removed_reason reason,
2120 const struct ofconn *abbrev_ofconn, ovs_be32 abbrev_xid,
2121 const struct rule_actions *old_actions)
2122 OVS_REQUIRES(ofproto_mutex)
2123 {
2124 enum nx_flow_monitor_flags update;
2125 struct ofconn *ofconn;
2126
2127 if (rule_is_hidden(rule)) {
2128 return;
2129 }
2130
2131 switch (event) {
2132 case NXFME_ADDED:
2133 update = NXFMF_ADD;
2134 rule->add_seqno = rule->modify_seqno = monitor_seqno++;
2135 break;
2136
2137 case NXFME_DELETED:
2138 update = NXFMF_DELETE;
2139 break;
2140
2141 case NXFME_MODIFIED:
2142 update = NXFMF_MODIFY;
2143 rule->modify_seqno = monitor_seqno++;
2144 break;
2145
2146 default:
2147 case NXFME_ABBREV:
2148 OVS_NOT_REACHED();
2149 }
2150
2151 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
2152 enum nx_flow_monitor_flags flags = 0;
2153 struct ofmonitor *m;
2154
2155 if (ofconn->monitor_paused) {
2156 /* Only send NXFME_DELETED notifications for flows that were added
2157 * before we paused. */
2158 if (event != NXFME_DELETED
2159 || rule->add_seqno > ofconn->monitor_paused) {
2160 continue;
2161 }
2162 }
2163
2164 HMAP_FOR_EACH (m, ofconn_node, &ofconn->monitors) {
2165 if (m->flags & update
2166 && (m->table_id == 0xff || m->table_id == rule->table_id)
2167 && (ofproto_rule_has_out_port(rule, m->out_port)
2168 || (old_actions
2169 && ofpacts_output_to_port(old_actions->ofpacts,
2170 old_actions->ofpacts_len,
2171 m->out_port)))
2172 && cls_rule_is_loose_match(&rule->cr, &m->match)) {
2173 flags |= m->flags;
2174 }
2175 }
2176
2177 if (flags) {
2178 if (ovs_list_is_empty(&ofconn->updates)) {
2179 ofputil_start_flow_update(&ofconn->updates);
2180 ofconn->sent_abbrev_update = false;
2181 }
2182
2183 if (flags & NXFMF_OWN || ofconn != abbrev_ofconn
2184 || ofconn->monitor_paused) {
2185 struct ofputil_flow_update fu;
2186 struct match match;
2187
2188 fu.event = event;
2189 fu.reason = event == NXFME_DELETED ? reason : 0;
2190 fu.table_id = rule->table_id;
2191 fu.cookie = rule->flow_cookie;
2192 minimatch_expand(&rule->cr.match, &match);
2193 fu.match = &match;
2194 fu.priority = rule->cr.priority;
2195
2196 ovs_mutex_lock(&rule->mutex);
2197 fu.idle_timeout = rule->idle_timeout;
2198 fu.hard_timeout = rule->hard_timeout;
2199 ovs_mutex_unlock(&rule->mutex);
2200
2201 if (flags & NXFMF_ACTIONS) {
2202 const struct rule_actions *actions = rule_get_actions(rule);
2203 fu.ofpacts = actions->ofpacts;
2204 fu.ofpacts_len = actions->ofpacts_len;
2205 } else {
2206 fu.ofpacts = NULL;
2207 fu.ofpacts_len = 0;
2208 }
2209 ofputil_append_flow_update(&fu, &ofconn->updates);
2210 } else if (!ofconn->sent_abbrev_update) {
2211 struct ofputil_flow_update fu;
2212
2213 fu.event = NXFME_ABBREV;
2214 fu.xid = abbrev_xid;
2215 ofputil_append_flow_update(&fu, &ofconn->updates);
2216
2217 ofconn->sent_abbrev_update = true;
2218 }
2219 }
2220 }
2221 }
2222
2223 void
2224 ofmonitor_flush(struct connmgr *mgr)
2225 OVS_REQUIRES(ofproto_mutex)
2226 {
2227 struct ofconn *ofconn;
2228
2229 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
2230 struct ofpbuf *msg;
2231
2232 LIST_FOR_EACH_POP (msg, list_node, &ofconn->updates) {
2233 unsigned int n_bytes;
2234
2235 ofconn_send(ofconn, msg, ofconn->monitor_counter);
2236 n_bytes = rconn_packet_counter_n_bytes(ofconn->monitor_counter);
2237 if (!ofconn->monitor_paused && n_bytes > 128 * 1024) {
2238 struct ofpbuf *pause;
2239
2240 COVERAGE_INC(ofmonitor_pause);
2241 ofconn->monitor_paused = monitor_seqno++;
2242 pause = ofpraw_alloc_xid(OFPRAW_NXT_FLOW_MONITOR_PAUSED,
2243 OFP10_VERSION, htonl(0), 0);
2244 ofconn_send(ofconn, pause, ofconn->monitor_counter);
2245 }
2246 }
2247 }
2248 }
2249
2250 static void
2251 ofmonitor_resume(struct ofconn *ofconn)
2252 OVS_REQUIRES(ofproto_mutex)
2253 {
2254 struct rule_collection rules;
2255 struct ofpbuf *resumed;
2256 struct ofmonitor *m;
2257 struct ovs_list msgs;
2258
2259 rule_collection_init(&rules);
2260 HMAP_FOR_EACH (m, ofconn_node, &ofconn->monitors) {
2261 ofmonitor_collect_resume_rules(m, ofconn->monitor_paused, &rules);
2262 }
2263
2264 ovs_list_init(&msgs);
2265 ofmonitor_compose_refresh_updates(&rules, &msgs);
2266
2267 resumed = ofpraw_alloc_xid(OFPRAW_NXT_FLOW_MONITOR_RESUMED, OFP10_VERSION,
2268 htonl(0), 0);
2269 ovs_list_push_back(&msgs, &resumed->list_node);
2270 ofconn_send_replies(ofconn, &msgs);
2271
2272 ofconn->monitor_paused = 0;
2273 }
2274
2275 static bool
2276 ofmonitor_may_resume(const struct ofconn *ofconn)
2277 OVS_REQUIRES(ofproto_mutex)
2278 {
2279 return (ofconn->monitor_paused != 0
2280 && !rconn_packet_counter_n_packets(ofconn->monitor_counter));
2281 }
2282
2283 static void
2284 ofmonitor_run(struct connmgr *mgr)
2285 {
2286 struct ofconn *ofconn;
2287
2288 ovs_mutex_lock(&ofproto_mutex);
2289 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
2290 if (ofmonitor_may_resume(ofconn)) {
2291 COVERAGE_INC(ofmonitor_resume);
2292 ofmonitor_resume(ofconn);
2293 }
2294 }
2295 ovs_mutex_unlock(&ofproto_mutex);
2296 }
2297
2298 static void
2299 ofmonitor_wait(struct connmgr *mgr)
2300 {
2301 struct ofconn *ofconn;
2302
2303 ovs_mutex_lock(&ofproto_mutex);
2304 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
2305 if (ofmonitor_may_resume(ofconn)) {
2306 poll_immediate_wake();
2307 }
2308 }
2309 ovs_mutex_unlock(&ofproto_mutex);
2310 }
2311
2312 void
2313 ofproto_async_msg_free(struct ofproto_async_msg *am)
2314 {
2315 free(am->pin.up.public.packet);
2316 free(am->pin.up.public.userdata);
2317 free(am->pin.up.stack);
2318 free(am->pin.up.actions);
2319 free(am->pin.up.action_set);
2320 free(am);
2321 }