]> git.proxmox.com Git - mirror_ovs.git/blob - ofproto/connmgr.c
ofp-util: Avoid C++ keyword 'public' in name of struct member.
[mirror_ovs.git] / ofproto / connmgr.c
1 /*
2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include <errno.h>
19 #include <stdlib.h>
20
21 #include "bundles.h"
22 #include "connmgr.h"
23 #include "coverage.h"
24 #include "fail-open.h"
25 #include "in-band.h"
26 #include "odp-util.h"
27 #include "ofproto-provider.h"
28 #include "openvswitch/dynamic-string.h"
29 #include "openvswitch/ofp-actions.h"
30 #include "openvswitch/ofp-msgs.h"
31 #include "openvswitch/ofp-util.h"
32 #include "openvswitch/ofpbuf.h"
33 #include "openvswitch/vconn.h"
34 #include "openvswitch/vlog.h"
35 #include "ovs-atomic.h"
36 #include "pinsched.h"
37 #include "poll-loop.h"
38 #include "rconn.h"
39 #include "openvswitch/shash.h"
40 #include "simap.h"
41 #include "stream.h"
42 #include "timeval.h"
43 #include "util.h"
44
45 VLOG_DEFINE_THIS_MODULE(connmgr);
46 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
47
48 /* An OpenFlow connection.
49 *
50 *
51 * Thread-safety
52 * =============
53 *
54 * 'ofproto_mutex' must be held whenever an ofconn is created or destroyed or,
55 * more or less equivalently, whenever an ofconn is added to or removed from a
56 * connmgr. 'ofproto_mutex' doesn't protect the data inside the ofconn, except
57 * as specifically noted below. */
58 struct ofconn {
59 /* Configuration that persists from one connection to the next. */
60
61 struct ovs_list node; /* In struct connmgr's "all_conns" list. */
62 struct hmap_node hmap_node; /* In struct connmgr's "controllers" map. */
63
64 struct connmgr *connmgr; /* Connection's manager. */
65 struct rconn *rconn; /* OpenFlow connection. */
66 enum ofconn_type type; /* Type. */
67 enum ofproto_band band; /* In-band or out-of-band? */
68 bool enable_async_msgs; /* Initially enable async messages? */
69 bool want_packet_in_on_miss;
70
71 /* State that should be cleared from one connection to the next. */
72
73 /* OpenFlow state. */
74 enum ofp12_controller_role role; /* Role. */
75 enum ofputil_protocol protocol; /* Current protocol variant. */
76 enum nx_packet_in_format packet_in_format; /* OFPT_PACKET_IN format. */
77
78 /* OFPT_PACKET_IN related data. */
79 struct rconn_packet_counter *packet_in_counter; /* # queued on 'rconn'. */
80 #define N_SCHEDULERS 2
81 struct pinsched *schedulers[N_SCHEDULERS];
82 int miss_send_len; /* Bytes to send of buffered packets. */
83 uint16_t controller_id; /* Connection controller ID. */
84
85 /* Number of OpenFlow messages queued on 'rconn' as replies to OpenFlow
86 * requests, and the maximum number before we stop reading OpenFlow
87 * requests. */
88 #define OFCONN_REPLY_MAX 100
89 struct rconn_packet_counter *reply_counter;
90
91 /* Asynchronous message configuration in each possible role.
92 *
93 * A 1-bit enables sending an asynchronous message for one possible reason
94 * that the message might be generated, a 0-bit disables it. */
95 struct ofputil_async_cfg *async_cfg;
96
97 /* Flow table operation logging. */
98 int n_add, n_delete, n_modify; /* Number of unreported ops of each kind. */
99 long long int first_op, last_op; /* Range of times for unreported ops. */
100 long long int next_op_report; /* Time to report ops, or LLONG_MAX. */
101 long long int op_backoff; /* Earliest time to report ops again. */
102
103 /* Flow monitors (e.g. NXST_FLOW_MONITOR). */
104
105 /* Configuration. Contains "struct ofmonitor"s. */
106 struct hmap monitors OVS_GUARDED_BY(ofproto_mutex);
107
108 /* Flow control.
109 *
110 * When too many flow monitor notifications back up in the transmit buffer,
111 * we pause the transmission of further notifications. These members track
112 * the flow control state.
113 *
114 * When notifications are flowing, 'monitor_paused' is 0. When
115 * notifications are paused, 'monitor_paused' is the value of
116 * 'monitor_seqno' at the point we paused.
117 *
118 * 'monitor_counter' counts the OpenFlow messages and bytes currently in
119 * flight. This value growing too large triggers pausing. */
120 uint64_t monitor_paused OVS_GUARDED_BY(ofproto_mutex);
121 struct rconn_packet_counter *monitor_counter OVS_GUARDED_BY(ofproto_mutex);
122
123 /* State of monitors for a single ongoing flow_mod.
124 *
125 * 'updates' is a list of "struct ofpbuf"s that contain
126 * NXST_FLOW_MONITOR_REPLY messages representing the changes made by the
127 * current flow_mod.
128 *
129 * When 'updates' is nonempty, 'sent_abbrev_update' is true if 'updates'
130 * contains an update event of type NXFME_ABBREV and false otherwise.. */
131 struct ovs_list updates OVS_GUARDED_BY(ofproto_mutex);
132 bool sent_abbrev_update OVS_GUARDED_BY(ofproto_mutex);
133
134 /* Active bundles. Contains "struct ofp_bundle"s. */
135 struct hmap bundles;
136 long long int next_bundle_expiry_check;
137 };
138
139 /* vswitchd/ovs-vswitchd.8.in documents the value of BUNDLE_IDLE_LIFETIME in
140 * seconds. That documentation must be kept in sync with the value below. */
141 enum {
142 BUNDLE_EXPIRY_INTERVAL = 1000, /* Check bundle expiry every 1 sec. */
143 BUNDLE_IDLE_TIMEOUT = 10000, /* Expire idle bundles after 10 seconds. */
144 };
145
146 static struct ofconn *ofconn_create(struct connmgr *, struct rconn *,
147 enum ofconn_type, bool enable_async_msgs)
148 OVS_REQUIRES(ofproto_mutex);
149 static void ofconn_destroy(struct ofconn *) OVS_REQUIRES(ofproto_mutex);
150 static void ofconn_flush(struct ofconn *) OVS_REQUIRES(ofproto_mutex);
151
152 static void ofconn_reconfigure(struct ofconn *,
153 const struct ofproto_controller *);
154
155 static void ofconn_run(struct ofconn *,
156 void (*handle_openflow)(struct ofconn *,
157 const struct ofpbuf *ofp_msg));
158 static void ofconn_wait(struct ofconn *);
159
160 static void ofconn_log_flow_mods(struct ofconn *);
161
162 static const char *ofconn_get_target(const struct ofconn *);
163 static char *ofconn_make_name(const struct connmgr *, const char *target);
164
165 static void ofconn_set_rate_limit(struct ofconn *, int rate, int burst);
166
167 static void ofconn_send(const struct ofconn *, struct ofpbuf *,
168 struct rconn_packet_counter *);
169
170 static void do_send_packet_ins(struct ofconn *, struct ovs_list *txq);
171
172 /* A listener for incoming OpenFlow "service" connections. */
173 struct ofservice {
174 struct hmap_node node; /* In struct connmgr's "services" hmap. */
175 struct pvconn *pvconn; /* OpenFlow connection listener. */
176
177 /* These are not used by ofservice directly. They are settings for
178 * accepted "struct ofconn"s from the pvconn. */
179 int probe_interval; /* Max idle time before probing, in seconds. */
180 int rate_limit; /* Max packet-in rate in packets per second. */
181 int burst_limit; /* Limit on accumulating packet credits. */
182 bool enable_async_msgs; /* Initially enable async messages? */
183 uint8_t dscp; /* DSCP Value for controller connection */
184 uint32_t allowed_versions; /* OpenFlow protocol versions that may
185 * be negotiated for a session. */
186 };
187
188 static void ofservice_reconfigure(struct ofservice *,
189 const struct ofproto_controller *);
190 static int ofservice_create(struct connmgr *mgr, const char *target,
191 uint32_t allowed_versions, uint8_t dscp);
192 static void ofservice_destroy(struct connmgr *, struct ofservice *);
193 static struct ofservice *ofservice_lookup(struct connmgr *,
194 const char *target);
195
196 /* Connection manager for an OpenFlow switch. */
197 struct connmgr {
198 struct ofproto *ofproto;
199 char *name;
200 char *local_port_name;
201
202 /* OpenFlow connections. */
203 struct hmap controllers; /* All OFCONN_PRIMARY controllers. */
204 struct ovs_list all_conns; /* All controllers. All modifications are
205 protected by ofproto_mutex, so that any
206 traversals from other threads can be made
207 safe by holding the ofproto_mutex. */
208 uint64_t master_election_id; /* monotonically increasing sequence number
209 * for master election */
210 bool master_election_id_defined;
211
212 /* OpenFlow listeners. */
213 struct hmap services; /* Contains "struct ofservice"s. */
214 struct pvconn **snoops;
215 size_t n_snoops;
216
217 /* Fail open. */
218 struct fail_open *fail_open;
219 enum ofproto_fail_mode fail_mode;
220
221 /* In-band control. */
222 struct in_band *in_band;
223 struct sockaddr_in *extra_in_band_remotes;
224 size_t n_extra_remotes;
225 int in_band_queue;
226
227 ATOMIC(int) want_packet_in_on_miss; /* Sum of ofconns' values. */
228 };
229
230 static void update_in_band_remotes(struct connmgr *);
231 static void add_snooper(struct connmgr *, struct vconn *);
232 static void ofmonitor_run(struct connmgr *);
233 static void ofmonitor_wait(struct connmgr *);
234
235 /* Creates and returns a new connection manager owned by 'ofproto'. 'name' is
236 * a name for the ofproto suitable for using in log messages.
237 * 'local_port_name' is the name of the local port (OFPP_LOCAL) within
238 * 'ofproto'. */
239 struct connmgr *
240 connmgr_create(struct ofproto *ofproto,
241 const char *name, const char *local_port_name)
242 {
243 struct connmgr *mgr;
244
245 mgr = xmalloc(sizeof *mgr);
246 mgr->ofproto = ofproto;
247 mgr->name = xstrdup(name);
248 mgr->local_port_name = xstrdup(local_port_name);
249
250 hmap_init(&mgr->controllers);
251 ovs_list_init(&mgr->all_conns);
252 mgr->master_election_id = 0;
253 mgr->master_election_id_defined = false;
254
255 hmap_init(&mgr->services);
256 mgr->snoops = NULL;
257 mgr->n_snoops = 0;
258
259 mgr->fail_open = NULL;
260 mgr->fail_mode = OFPROTO_FAIL_SECURE;
261
262 mgr->in_band = NULL;
263 mgr->extra_in_band_remotes = NULL;
264 mgr->n_extra_remotes = 0;
265 mgr->in_band_queue = -1;
266
267 atomic_init(&mgr->want_packet_in_on_miss, 0);
268
269 return mgr;
270 }
271
272 /* The default "table-miss" behaviour for OpenFlow1.3+ is to drop the
273 * packet rather than to send the packet to the controller.
274 *
275 * This function maintains the count of pre-OpenFlow1.3 with controller_id 0,
276 * as we assume these are the controllers that should receive "table-miss"
277 * notifications. */
278 static void
279 update_want_packet_in_on_miss(struct ofconn *ofconn)
280 {
281 /* We want a packet-in on miss when controller_id is zero and OpenFlow is
282 * lower than version 1.3. */
283 enum ofputil_protocol p = ofconn->protocol;
284 int new_want = (ofconn->controller_id == 0 &&
285 (p == OFPUTIL_P_NONE ||
286 ofputil_protocol_to_ofp_version(p) < OFP13_VERSION));
287
288 /* Update the setting and the count if necessary. */
289 int old_want = ofconn->want_packet_in_on_miss;
290 if (old_want != new_want) {
291 atomic_int *dst = &ofconn->connmgr->want_packet_in_on_miss;
292 int count;
293 atomic_read_relaxed(dst, &count);
294 atomic_store_relaxed(dst, count - old_want + new_want);
295
296 ofconn->want_packet_in_on_miss = new_want;
297 }
298 }
299
300 /* Frees 'mgr' and all of its resources. */
301 void
302 connmgr_destroy(struct connmgr *mgr)
303 OVS_REQUIRES(ofproto_mutex)
304 {
305 struct ofservice *ofservice, *next_ofservice;
306 struct ofconn *ofconn, *next_ofconn;
307 size_t i;
308
309 if (!mgr) {
310 return;
311 }
312
313 LIST_FOR_EACH_SAFE (ofconn, next_ofconn, node, &mgr->all_conns) {
314 ofconn_destroy(ofconn);
315 }
316
317 hmap_destroy(&mgr->controllers);
318
319 HMAP_FOR_EACH_SAFE (ofservice, next_ofservice, node, &mgr->services) {
320 ofservice_destroy(mgr, ofservice);
321 }
322 hmap_destroy(&mgr->services);
323
324 for (i = 0; i < mgr->n_snoops; i++) {
325 pvconn_close(mgr->snoops[i]);
326 }
327 free(mgr->snoops);
328
329 fail_open_destroy(mgr->fail_open);
330 mgr->fail_open = NULL;
331
332 in_band_destroy(mgr->in_band);
333 mgr->in_band = NULL;
334 free(mgr->extra_in_band_remotes);
335 free(mgr->name);
336 free(mgr->local_port_name);
337
338 free(mgr);
339 }
340
341 /* Does all of the periodic maintenance required by 'mgr'. Calls
342 * 'handle_openflow' for each message received on an OpenFlow connection,
343 * passing along the OpenFlow connection itself and the message that was sent.
344 * 'handle_openflow' must not modify or free the message. */
345 void
346 connmgr_run(struct connmgr *mgr,
347 void (*handle_openflow)(struct ofconn *,
348 const struct ofpbuf *ofp_msg))
349 OVS_EXCLUDED(ofproto_mutex)
350 {
351 struct ofconn *ofconn, *next_ofconn;
352 struct ofservice *ofservice;
353 size_t i;
354
355 if (mgr->in_band) {
356 if (!in_band_run(mgr->in_band)) {
357 in_band_destroy(mgr->in_band);
358 mgr->in_band = NULL;
359 }
360 }
361
362 LIST_FOR_EACH_SAFE (ofconn, next_ofconn, node, &mgr->all_conns) {
363 ofconn_run(ofconn, handle_openflow);
364 }
365 ofmonitor_run(mgr);
366
367 /* Fail-open maintenance. Do this after processing the ofconns since
368 * fail-open checks the status of the controller rconn. */
369 if (mgr->fail_open) {
370 fail_open_run(mgr->fail_open);
371 }
372
373 HMAP_FOR_EACH (ofservice, node, &mgr->services) {
374 struct vconn *vconn;
375 int retval;
376
377 retval = pvconn_accept(ofservice->pvconn, &vconn);
378 if (!retval) {
379 struct rconn *rconn;
380 char *name;
381
382 /* Passing default value for creation of the rconn */
383 rconn = rconn_create(ofservice->probe_interval, 0, ofservice->dscp,
384 vconn_get_allowed_versions(vconn));
385 name = ofconn_make_name(mgr, vconn_get_name(vconn));
386 rconn_connect_unreliably(rconn, vconn, name);
387 free(name);
388
389 ovs_mutex_lock(&ofproto_mutex);
390 ofconn = ofconn_create(mgr, rconn, OFCONN_SERVICE,
391 ofservice->enable_async_msgs);
392 ovs_mutex_unlock(&ofproto_mutex);
393
394 ofconn_set_rate_limit(ofconn, ofservice->rate_limit,
395 ofservice->burst_limit);
396 } else if (retval != EAGAIN) {
397 VLOG_WARN_RL(&rl, "accept failed (%s)", ovs_strerror(retval));
398 }
399 }
400
401 for (i = 0; i < mgr->n_snoops; i++) {
402 struct vconn *vconn;
403 int retval;
404
405 retval = pvconn_accept(mgr->snoops[i], &vconn);
406 if (!retval) {
407 add_snooper(mgr, vconn);
408 } else if (retval != EAGAIN) {
409 VLOG_WARN_RL(&rl, "accept failed (%s)", ovs_strerror(retval));
410 }
411 }
412 }
413
414 /* Causes the poll loop to wake up when connmgr_run() needs to run. */
415 void
416 connmgr_wait(struct connmgr *mgr)
417 {
418 struct ofservice *ofservice;
419 struct ofconn *ofconn;
420 size_t i;
421
422 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
423 ofconn_wait(ofconn);
424 }
425 ofmonitor_wait(mgr);
426 if (mgr->in_band) {
427 in_band_wait(mgr->in_band);
428 }
429 if (mgr->fail_open) {
430 fail_open_wait(mgr->fail_open);
431 }
432 HMAP_FOR_EACH (ofservice, node, &mgr->services) {
433 pvconn_wait(ofservice->pvconn);
434 }
435 for (i = 0; i < mgr->n_snoops; i++) {
436 pvconn_wait(mgr->snoops[i]);
437 }
438 }
439
440 /* Adds some memory usage statistics for 'mgr' into 'usage', for use with
441 * memory_report(). */
442 void
443 connmgr_get_memory_usage(const struct connmgr *mgr, struct simap *usage)
444 {
445 const struct ofconn *ofconn;
446 unsigned int packets = 0;
447 unsigned int ofconns = 0;
448
449 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
450 int i;
451
452 ofconns++;
453
454 packets += rconn_count_txqlen(ofconn->rconn);
455 for (i = 0; i < N_SCHEDULERS; i++) {
456 struct pinsched_stats stats;
457
458 pinsched_get_stats(ofconn->schedulers[i], &stats);
459 packets += stats.n_queued;
460 }
461 }
462 simap_increase(usage, "ofconns", ofconns);
463 simap_increase(usage, "packets", packets);
464 }
465
466 /* Returns the ofproto that owns 'ofconn''s connmgr. */
467 struct ofproto *
468 ofconn_get_ofproto(const struct ofconn *ofconn)
469 {
470 return ofconn->connmgr->ofproto;
471 }
472 \f
473 /* OpenFlow configuration. */
474
475 static void add_controller(struct connmgr *, const char *target, uint8_t dscp,
476 uint32_t allowed_versions)
477 OVS_REQUIRES(ofproto_mutex);
478 static struct ofconn *find_controller_by_target(struct connmgr *,
479 const char *target);
480 static void update_fail_open(struct connmgr *) OVS_EXCLUDED(ofproto_mutex);
481 static int set_pvconns(struct pvconn ***pvconnsp, size_t *n_pvconnsp,
482 const struct sset *);
483
484 /* Returns true if 'mgr' has any configured primary controllers.
485 *
486 * Service controllers do not count, but configured primary controllers do
487 * count whether or not they are currently connected. */
488 bool
489 connmgr_has_controllers(const struct connmgr *mgr)
490 {
491 return !hmap_is_empty(&mgr->controllers);
492 }
493
494 /* Initializes 'info' and populates it with information about each configured
495 * primary controller. The keys in 'info' are the controllers' targets; the
496 * data values are corresponding "struct ofproto_controller_info".
497 *
498 * The caller owns 'info' and everything in it and should free it when it is no
499 * longer needed. */
500 void
501 connmgr_get_controller_info(struct connmgr *mgr, struct shash *info)
502 {
503 const struct ofconn *ofconn;
504
505 HMAP_FOR_EACH (ofconn, hmap_node, &mgr->controllers) {
506 const struct rconn *rconn = ofconn->rconn;
507 const char *target = rconn_get_target(rconn);
508
509 if (!shash_find(info, target)) {
510 struct ofproto_controller_info *cinfo = xmalloc(sizeof *cinfo);
511 time_t now = time_now();
512 time_t last_connection = rconn_get_last_connection(rconn);
513 time_t last_disconnect = rconn_get_last_disconnect(rconn);
514 int last_error = rconn_get_last_error(rconn);
515 int i;
516
517 shash_add(info, target, cinfo);
518
519 cinfo->is_connected = rconn_is_connected(rconn);
520 cinfo->role = ofconn->role;
521
522 smap_init(&cinfo->pairs);
523 if (last_error) {
524 smap_add(&cinfo->pairs, "last_error",
525 ovs_retval_to_string(last_error));
526 }
527
528 smap_add(&cinfo->pairs, "state", rconn_get_state(rconn));
529
530 if (last_connection != TIME_MIN) {
531 smap_add_format(&cinfo->pairs, "sec_since_connect",
532 "%ld", (long int) (now - last_connection));
533 }
534
535 if (last_disconnect != TIME_MIN) {
536 smap_add_format(&cinfo->pairs, "sec_since_disconnect",
537 "%ld", (long int) (now - last_disconnect));
538 }
539
540 for (i = 0; i < N_SCHEDULERS; i++) {
541 if (ofconn->schedulers[i]) {
542 const char *name = i ? "miss" : "action";
543 struct pinsched_stats stats;
544
545 pinsched_get_stats(ofconn->schedulers[i], &stats);
546 smap_add_nocopy(&cinfo->pairs,
547 xasprintf("packet-in-%s-backlog", name),
548 xasprintf("%u", stats.n_queued));
549 smap_add_nocopy(&cinfo->pairs,
550 xasprintf("packet-in-%s-bypassed", name),
551 xasprintf("%llu", stats.n_normal));
552 smap_add_nocopy(&cinfo->pairs,
553 xasprintf("packet-in-%s-queued", name),
554 xasprintf("%llu", stats.n_limited));
555 smap_add_nocopy(&cinfo->pairs,
556 xasprintf("packet-in-%s-dropped", name),
557 xasprintf("%llu", stats.n_queue_dropped));
558 }
559 }
560 }
561 }
562 }
563
564 void
565 connmgr_free_controller_info(struct shash *info)
566 {
567 struct shash_node *node;
568
569 SHASH_FOR_EACH (node, info) {
570 struct ofproto_controller_info *cinfo = node->data;
571 smap_destroy(&cinfo->pairs);
572 free(cinfo);
573 }
574 shash_destroy(info);
575 }
576
577 /* Changes 'mgr''s set of controllers to the 'n_controllers' controllers in
578 * 'controllers'. */
579 void
580 connmgr_set_controllers(struct connmgr *mgr,
581 const struct ofproto_controller *controllers,
582 size_t n_controllers, uint32_t allowed_versions)
583 OVS_EXCLUDED(ofproto_mutex)
584 {
585 bool had_controllers = connmgr_has_controllers(mgr);
586 struct shash new_controllers;
587 struct ofconn *ofconn, *next_ofconn;
588 struct ofservice *ofservice, *next_ofservice;
589 size_t i;
590
591 /* Required to add and remove ofconns. This could probably be narrowed to
592 * cover a smaller amount of code, if that yielded some benefit. */
593 ovs_mutex_lock(&ofproto_mutex);
594
595 /* Create newly configured controllers and services.
596 * Create a name to ofproto_controller mapping in 'new_controllers'. */
597 shash_init(&new_controllers);
598 for (i = 0; i < n_controllers; i++) {
599 const struct ofproto_controller *c = &controllers[i];
600
601 if (!vconn_verify_name(c->target)) {
602 bool add = false;
603 ofconn = find_controller_by_target(mgr, c->target);
604 if (!ofconn) {
605 VLOG_INFO("%s: added primary controller \"%s\"",
606 mgr->name, c->target);
607 add = true;
608 } else if (rconn_get_allowed_versions(ofconn->rconn) !=
609 allowed_versions) {
610 VLOG_INFO("%s: re-added primary controller \"%s\"",
611 mgr->name, c->target);
612 add = true;
613 ofconn_destroy(ofconn);
614 }
615 if (add) {
616 add_controller(mgr, c->target, c->dscp, allowed_versions);
617 }
618 } else if (!pvconn_verify_name(c->target)) {
619 bool add = false;
620 ofservice = ofservice_lookup(mgr, c->target);
621 if (!ofservice) {
622 VLOG_INFO("%s: added service controller \"%s\"",
623 mgr->name, c->target);
624 add = true;
625 } else if (ofservice->allowed_versions != allowed_versions) {
626 VLOG_INFO("%s: re-added service controller \"%s\"",
627 mgr->name, c->target);
628 ofservice_destroy(mgr, ofservice);
629 add = true;
630 }
631 if (add) {
632 ofservice_create(mgr, c->target, allowed_versions, c->dscp);
633 }
634 } else {
635 VLOG_WARN_RL(&rl, "%s: unsupported controller \"%s\"",
636 mgr->name, c->target);
637 continue;
638 }
639
640 shash_add_once(&new_controllers, c->target, &controllers[i]);
641 }
642
643 /* Delete controllers that are no longer configured.
644 * Update configuration of all now-existing controllers. */
645 HMAP_FOR_EACH_SAFE (ofconn, next_ofconn, hmap_node, &mgr->controllers) {
646 const char *target = ofconn_get_target(ofconn);
647 struct ofproto_controller *c;
648
649 c = shash_find_data(&new_controllers, target);
650 if (!c) {
651 VLOG_INFO("%s: removed primary controller \"%s\"",
652 mgr->name, target);
653 ofconn_destroy(ofconn);
654 } else {
655 ofconn_reconfigure(ofconn, c);
656 }
657 }
658
659 /* Delete services that are no longer configured.
660 * Update configuration of all now-existing services. */
661 HMAP_FOR_EACH_SAFE (ofservice, next_ofservice, node, &mgr->services) {
662 const char *target = pvconn_get_name(ofservice->pvconn);
663 struct ofproto_controller *c;
664
665 c = shash_find_data(&new_controllers, target);
666 if (!c) {
667 VLOG_INFO("%s: removed service controller \"%s\"",
668 mgr->name, target);
669 ofservice_destroy(mgr, ofservice);
670 } else {
671 ofservice_reconfigure(ofservice, c);
672 }
673 }
674
675 shash_destroy(&new_controllers);
676
677 ovs_mutex_unlock(&ofproto_mutex);
678
679 update_in_band_remotes(mgr);
680 update_fail_open(mgr);
681 if (had_controllers != connmgr_has_controllers(mgr)) {
682 ofproto_flush_flows(mgr->ofproto);
683 }
684 }
685
686 /* Drops the connections between 'mgr' and all of its primary and secondary
687 * controllers, forcing them to reconnect. */
688 void
689 connmgr_reconnect(const struct connmgr *mgr)
690 {
691 struct ofconn *ofconn;
692
693 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
694 rconn_reconnect(ofconn->rconn);
695 }
696 }
697
698 /* Sets the "snoops" for 'mgr' to the pvconn targets listed in 'snoops'.
699 *
700 * A "snoop" is a pvconn to which every OpenFlow message to or from the most
701 * important controller on 'mgr' is mirrored. */
702 int
703 connmgr_set_snoops(struct connmgr *mgr, const struct sset *snoops)
704 {
705 return set_pvconns(&mgr->snoops, &mgr->n_snoops, snoops);
706 }
707
708 /* Adds each of the snoops currently configured on 'mgr' to 'snoops'. */
709 void
710 connmgr_get_snoops(const struct connmgr *mgr, struct sset *snoops)
711 {
712 size_t i;
713
714 for (i = 0; i < mgr->n_snoops; i++) {
715 sset_add(snoops, pvconn_get_name(mgr->snoops[i]));
716 }
717 }
718
719 /* Returns true if 'mgr' has at least one snoop, false if it has none. */
720 bool
721 connmgr_has_snoops(const struct connmgr *mgr)
722 {
723 return mgr->n_snoops > 0;
724 }
725
726 /* Creates a new controller for 'target' in 'mgr'. update_controller() needs
727 * to be called later to finish the new ofconn's configuration. */
728 static void
729 add_controller(struct connmgr *mgr, const char *target, uint8_t dscp,
730 uint32_t allowed_versions)
731 OVS_REQUIRES(ofproto_mutex)
732 {
733 char *name = ofconn_make_name(mgr, target);
734 struct ofconn *ofconn;
735
736 ofconn = ofconn_create(mgr, rconn_create(5, 8, dscp, allowed_versions),
737 OFCONN_PRIMARY, true);
738 rconn_connect(ofconn->rconn, target, name);
739 hmap_insert(&mgr->controllers, &ofconn->hmap_node, hash_string(target, 0));
740
741 free(name);
742 }
743
744 static struct ofconn *
745 find_controller_by_target(struct connmgr *mgr, const char *target)
746 {
747 struct ofconn *ofconn;
748
749 HMAP_FOR_EACH_WITH_HASH (ofconn, hmap_node,
750 hash_string(target, 0), &mgr->controllers) {
751 if (!strcmp(ofconn_get_target(ofconn), target)) {
752 return ofconn;
753 }
754 }
755 return NULL;
756 }
757
758 static void
759 update_in_band_remotes(struct connmgr *mgr)
760 {
761 struct sockaddr_in *addrs;
762 size_t max_addrs, n_addrs;
763 struct ofconn *ofconn;
764 size_t i;
765
766 /* Allocate enough memory for as many remotes as we could possibly have. */
767 max_addrs = mgr->n_extra_remotes + hmap_count(&mgr->controllers);
768 addrs = xmalloc(max_addrs * sizeof *addrs);
769 n_addrs = 0;
770
771 /* Add all the remotes. */
772 HMAP_FOR_EACH (ofconn, hmap_node, &mgr->controllers) {
773 const char *target = rconn_get_target(ofconn->rconn);
774 union {
775 struct sockaddr_storage ss;
776 struct sockaddr_in in;
777 } sa;
778
779 if (ofconn->band == OFPROTO_IN_BAND
780 && stream_parse_target_with_default_port(target, OFP_PORT, &sa.ss)
781 && sa.ss.ss_family == AF_INET) {
782 addrs[n_addrs++] = sa.in;
783 }
784 }
785 for (i = 0; i < mgr->n_extra_remotes; i++) {
786 addrs[n_addrs++] = mgr->extra_in_band_remotes[i];
787 }
788
789 /* Create or update or destroy in-band. */
790 if (n_addrs) {
791 if (!mgr->in_band) {
792 in_band_create(mgr->ofproto, mgr->local_port_name, &mgr->in_band);
793 }
794 } else {
795 /* in_band_run() needs a chance to delete any existing in-band flows.
796 * We will destroy mgr->in_band after it's done with that. */
797 }
798 if (mgr->in_band) {
799 in_band_set_queue(mgr->in_band, mgr->in_band_queue);
800 in_band_set_remotes(mgr->in_band, addrs, n_addrs);
801 }
802
803 /* Clean up. */
804 free(addrs);
805 }
806
807 static void
808 update_fail_open(struct connmgr *mgr)
809 OVS_EXCLUDED(ofproto_mutex)
810 {
811 if (connmgr_has_controllers(mgr)
812 && mgr->fail_mode == OFPROTO_FAIL_STANDALONE) {
813 if (!mgr->fail_open) {
814 mgr->fail_open = fail_open_create(mgr->ofproto, mgr);
815 }
816 } else {
817 ovs_mutex_lock(&ofproto_mutex);
818 fail_open_destroy(mgr->fail_open);
819 ovs_mutex_unlock(&ofproto_mutex);
820 mgr->fail_open = NULL;
821 }
822 }
823
824 static int
825 set_pvconns(struct pvconn ***pvconnsp, size_t *n_pvconnsp,
826 const struct sset *sset)
827 {
828 struct pvconn **pvconns = *pvconnsp;
829 size_t n_pvconns = *n_pvconnsp;
830 const char *name;
831 int retval = 0;
832 size_t i;
833
834 for (i = 0; i < n_pvconns; i++) {
835 pvconn_close(pvconns[i]);
836 }
837 free(pvconns);
838
839 pvconns = xmalloc(sset_count(sset) * sizeof *pvconns);
840 n_pvconns = 0;
841 SSET_FOR_EACH (name, sset) {
842 struct pvconn *pvconn;
843 int error;
844 error = pvconn_open(name, 0, 0, &pvconn);
845 if (!error) {
846 pvconns[n_pvconns++] = pvconn;
847 } else {
848 VLOG_ERR("failed to listen on %s: %s", name, ovs_strerror(error));
849 if (!retval) {
850 retval = error;
851 }
852 }
853 }
854
855 *pvconnsp = pvconns;
856 *n_pvconnsp = n_pvconns;
857
858 return retval;
859 }
860
861 /* Returns a "preference level" for snooping 'ofconn'. A higher return value
862 * means that 'ofconn' is more interesting for monitoring than a lower return
863 * value. */
864 static int
865 snoop_preference(const struct ofconn *ofconn)
866 {
867 switch (ofconn->role) {
868 case OFPCR12_ROLE_MASTER:
869 return 3;
870 case OFPCR12_ROLE_EQUAL:
871 return 2;
872 case OFPCR12_ROLE_SLAVE:
873 return 1;
874 case OFPCR12_ROLE_NOCHANGE:
875 default:
876 /* Shouldn't happen. */
877 return 0;
878 }
879 }
880
881 /* One of 'mgr''s "snoop" pvconns has accepted a new connection on 'vconn'.
882 * Connects this vconn to a controller. */
883 static void
884 add_snooper(struct connmgr *mgr, struct vconn *vconn)
885 {
886 struct ofconn *ofconn, *best;
887
888 /* Pick a controller for monitoring. */
889 best = NULL;
890 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
891 if (ofconn->type == OFCONN_PRIMARY
892 && (!best || snoop_preference(ofconn) > snoop_preference(best))) {
893 best = ofconn;
894 }
895 }
896
897 if (best) {
898 rconn_add_monitor(best->rconn, vconn);
899 } else {
900 VLOG_INFO_RL(&rl, "no controller connection to snoop");
901 vconn_close(vconn);
902 }
903 }
904 \f
905 /* Public ofconn functions. */
906
907 /* Returns the connection type, either OFCONN_PRIMARY or OFCONN_SERVICE. */
908 enum ofconn_type
909 ofconn_get_type(const struct ofconn *ofconn)
910 {
911 return ofconn->type;
912 }
913
914 /* If a master election id is defined, stores it into '*idp' and returns
915 * true. Otherwise, stores UINT64_MAX into '*idp' and returns false. */
916 bool
917 ofconn_get_master_election_id(const struct ofconn *ofconn, uint64_t *idp)
918 {
919 *idp = (ofconn->connmgr->master_election_id_defined
920 ? ofconn->connmgr->master_election_id
921 : UINT64_MAX);
922 return ofconn->connmgr->master_election_id_defined;
923 }
924
925 /* Sets the master election id.
926 *
927 * Returns true if successful, false if the id is stale
928 */
929 bool
930 ofconn_set_master_election_id(struct ofconn *ofconn, uint64_t id)
931 {
932 if (ofconn->connmgr->master_election_id_defined
933 &&
934 /* Unsigned difference interpreted as a two's complement signed
935 * value */
936 (int64_t)(id - ofconn->connmgr->master_election_id) < 0) {
937 return false;
938 }
939 ofconn->connmgr->master_election_id = id;
940 ofconn->connmgr->master_election_id_defined = true;
941
942 return true;
943 }
944
945 /* Returns the role configured for 'ofconn'.
946 *
947 * The default role, if no other role has been set, is OFPCR12_ROLE_EQUAL. */
948 enum ofp12_controller_role
949 ofconn_get_role(const struct ofconn *ofconn)
950 {
951 return ofconn->role;
952 }
953
954 void
955 ofconn_send_role_status(struct ofconn *ofconn, uint32_t role, uint8_t reason)
956 {
957 struct ofputil_role_status status;
958 struct ofpbuf *buf;
959
960 status.reason = reason;
961 status.role = role;
962 ofconn_get_master_election_id(ofconn, &status.generation_id);
963
964 buf = ofputil_encode_role_status(&status, ofconn_get_protocol(ofconn));
965 if (buf) {
966 ofconn_send(ofconn, buf, NULL);
967 }
968 }
969
970 /* Changes 'ofconn''s role to 'role'. If 'role' is OFPCR12_ROLE_MASTER then
971 * any existing master is demoted to a slave. */
972 void
973 ofconn_set_role(struct ofconn *ofconn, enum ofp12_controller_role role)
974 {
975 if (role != ofconn->role && role == OFPCR12_ROLE_MASTER) {
976 struct ofconn *other;
977
978 LIST_FOR_EACH (other, node, &ofconn->connmgr->all_conns) {
979 if (other->role == OFPCR12_ROLE_MASTER) {
980 other->role = OFPCR12_ROLE_SLAVE;
981 ofconn_send_role_status(other, OFPCR12_ROLE_SLAVE, OFPCRR_MASTER_REQUEST);
982 }
983 }
984 }
985 ofconn->role = role;
986 }
987
988 void
989 ofconn_set_invalid_ttl_to_controller(struct ofconn *ofconn, bool enable)
990 {
991 struct ofputil_async_cfg ac = ofconn_get_async_config(ofconn);
992 uint32_t bit = 1u << OFPR_INVALID_TTL;
993 if (enable) {
994 ac.master[OAM_PACKET_IN] |= bit;
995 } else {
996 ac.master[OAM_PACKET_IN] &= ~bit;
997 }
998 ofconn_set_async_config(ofconn, &ac);
999 }
1000
1001 bool
1002 ofconn_get_invalid_ttl_to_controller(struct ofconn *ofconn)
1003 {
1004 struct ofputil_async_cfg ac = ofconn_get_async_config(ofconn);
1005 uint32_t bit = 1u << OFPR_INVALID_TTL;
1006 return (ac.master[OAM_PACKET_IN] & bit) != 0;
1007 }
1008
1009 /* Returns the currently configured protocol for 'ofconn', one of OFPUTIL_P_*.
1010 *
1011 * Returns OFPUTIL_P_NONE, which is not a valid protocol, if 'ofconn' hasn't
1012 * completed version negotiation. This can't happen if at least one OpenFlow
1013 * message, other than OFPT_HELLO, has been received on the connection (such as
1014 * in ofproto.c's message handling code), since version negotiation is a
1015 * prerequisite for starting to receive messages. This means that
1016 * OFPUTIL_P_NONE is a special case that most callers need not worry about. */
1017 enum ofputil_protocol
1018 ofconn_get_protocol(const struct ofconn *ofconn)
1019 {
1020 if (ofconn->protocol == OFPUTIL_P_NONE &&
1021 rconn_is_connected(ofconn->rconn)) {
1022 int version = rconn_get_version(ofconn->rconn);
1023 if (version > 0) {
1024 ofconn_set_protocol(CONST_CAST(struct ofconn *, ofconn),
1025 ofputil_protocol_from_ofp_version(version));
1026 }
1027 }
1028
1029 return ofconn->protocol;
1030 }
1031
1032 /* Sets the protocol for 'ofconn' to 'protocol' (one of OFPUTIL_P_*).
1033 *
1034 * (This doesn't actually send anything to accomplish this. Presumably the
1035 * caller already did that.) */
1036 void
1037 ofconn_set_protocol(struct ofconn *ofconn, enum ofputil_protocol protocol)
1038 {
1039 ofconn->protocol = protocol;
1040 update_want_packet_in_on_miss(ofconn);
1041 }
1042
1043 /* Returns the currently configured packet in format for 'ofconn', one of
1044 * NXPIF_*.
1045 *
1046 * The default, if no other format has been set, is NXPIF_STANDARD. */
1047 enum nx_packet_in_format
1048 ofconn_get_packet_in_format(struct ofconn *ofconn)
1049 {
1050 return ofconn->packet_in_format;
1051 }
1052
1053 /* Sets the packet in format for 'ofconn' to 'packet_in_format' (one of
1054 * NXPIF_*). */
1055 void
1056 ofconn_set_packet_in_format(struct ofconn *ofconn,
1057 enum nx_packet_in_format packet_in_format)
1058 {
1059 ofconn->packet_in_format = packet_in_format;
1060 }
1061
1062 /* Sets the controller connection ID for 'ofconn' to 'controller_id'.
1063 *
1064 * The connection controller ID is used for OFPP_CONTROLLER and
1065 * NXAST_CONTROLLER actions. See "struct nx_action_controller" for details. */
1066 void
1067 ofconn_set_controller_id(struct ofconn *ofconn, uint16_t controller_id)
1068 {
1069 ofconn->controller_id = controller_id;
1070 update_want_packet_in_on_miss(ofconn);
1071 }
1072
1073 /* Returns the default miss send length for 'ofconn'. */
1074 int
1075 ofconn_get_miss_send_len(const struct ofconn *ofconn)
1076 {
1077 return ofconn->miss_send_len;
1078 }
1079
1080 /* Sets the default miss send length for 'ofconn' to 'miss_send_len'. */
1081 void
1082 ofconn_set_miss_send_len(struct ofconn *ofconn, int miss_send_len)
1083 {
1084 ofconn->miss_send_len = miss_send_len;
1085 }
1086
1087 void
1088 ofconn_set_async_config(struct ofconn *ofconn,
1089 const struct ofputil_async_cfg *ac)
1090 {
1091 if (!ofconn->async_cfg) {
1092 ofconn->async_cfg = xmalloc(sizeof *ofconn->async_cfg);
1093 }
1094 *ofconn->async_cfg = *ac;
1095
1096 if (ofputil_protocol_to_ofp_version(ofconn_get_protocol(ofconn))
1097 < OFP14_VERSION) {
1098 if (ofconn->async_cfg->master[OAM_PACKET_IN] & (1u << OFPR_ACTION)) {
1099 ofconn->async_cfg->master[OAM_PACKET_IN] |= OFPR14_ACTION_BITS;
1100 }
1101 if (ofconn->async_cfg->slave[OAM_PACKET_IN] & (1u << OFPR_ACTION)) {
1102 ofconn->async_cfg->slave[OAM_PACKET_IN] |= OFPR14_ACTION_BITS;
1103 }
1104 }
1105 }
1106
1107 struct ofputil_async_cfg
1108 ofconn_get_async_config(const struct ofconn *ofconn)
1109 {
1110 if (ofconn->async_cfg) {
1111 return *ofconn->async_cfg;
1112 }
1113
1114 int version = rconn_get_version(ofconn->rconn);
1115 return (version < 0 || !ofconn->enable_async_msgs
1116 ? OFPUTIL_ASYNC_CFG_INIT
1117 : ofputil_async_cfg_default(version));
1118 }
1119
1120 /* Sends 'msg' on 'ofconn', accounting it as a reply. (If there is a
1121 * sufficient number of OpenFlow replies in-flight on a single ofconn, then the
1122 * connmgr will stop accepting new OpenFlow requests on that ofconn until the
1123 * controller has accepted some of the replies.) */
1124 void
1125 ofconn_send_reply(const struct ofconn *ofconn, struct ofpbuf *msg)
1126 {
1127 ofconn_send(ofconn, msg, ofconn->reply_counter);
1128 }
1129
1130 /* Sends each of the messages in list 'replies' on 'ofconn' in order,
1131 * accounting them as replies. */
1132 void
1133 ofconn_send_replies(const struct ofconn *ofconn, struct ovs_list *replies)
1134 {
1135 struct ofpbuf *reply;
1136
1137 LIST_FOR_EACH_POP (reply, list_node, replies) {
1138 ofconn_send_reply(ofconn, reply);
1139 }
1140 }
1141
1142 /* Sends 'error' on 'ofconn', as a reply to 'request'. Only at most the
1143 * first 64 bytes of 'request' are used. */
1144 void
1145 ofconn_send_error(const struct ofconn *ofconn,
1146 const struct ofp_header *request, enum ofperr error)
1147 {
1148 static struct vlog_rate_limit err_rl = VLOG_RATE_LIMIT_INIT(10, 10);
1149 struct ofpbuf *reply;
1150
1151 reply = ofperr_encode_reply(error, request);
1152 if (!VLOG_DROP_INFO(&err_rl)) {
1153 const char *type_name;
1154 size_t request_len;
1155 enum ofpraw raw;
1156
1157 request_len = ntohs(request->length);
1158 type_name = (!ofpraw_decode_partial(&raw, request,
1159 MIN(64, request_len))
1160 ? ofpraw_get_name(raw)
1161 : "invalid");
1162
1163 VLOG_INFO("%s: sending %s error reply to %s message",
1164 rconn_get_name(ofconn->rconn), ofperr_to_string(error),
1165 type_name);
1166 }
1167 ofconn_send_reply(ofconn, reply);
1168 }
1169
1170 /* Reports that a flow_mod operation of the type specified by 'command' was
1171 * successfully executed by 'ofconn', so that the connmgr can log it. */
1172 void
1173 ofconn_report_flow_mod(struct ofconn *ofconn,
1174 enum ofp_flow_mod_command command)
1175 {
1176 long long int now;
1177
1178 switch (command) {
1179 case OFPFC_ADD:
1180 ofconn->n_add++;
1181 break;
1182
1183 case OFPFC_MODIFY:
1184 case OFPFC_MODIFY_STRICT:
1185 ofconn->n_modify++;
1186 break;
1187
1188 case OFPFC_DELETE:
1189 case OFPFC_DELETE_STRICT:
1190 ofconn->n_delete++;
1191 break;
1192 }
1193
1194 now = time_msec();
1195 if (ofconn->next_op_report == LLONG_MAX) {
1196 ofconn->first_op = now;
1197 ofconn->next_op_report = MAX(now + 10 * 1000, ofconn->op_backoff);
1198 ofconn->op_backoff = ofconn->next_op_report + 60 * 1000;
1199 }
1200 ofconn->last_op = now;
1201 }
1202 \f
1203 /* OpenFlow 1.4 bundles. */
1204
1205 static inline uint32_t
1206 bundle_hash(uint32_t id)
1207 {
1208 return hash_int(id, 0);
1209 }
1210
1211 struct ofp_bundle *
1212 ofconn_get_bundle(struct ofconn *ofconn, uint32_t id)
1213 {
1214 struct ofp_bundle *bundle;
1215
1216 HMAP_FOR_EACH_IN_BUCKET(bundle, node, bundle_hash(id), &ofconn->bundles) {
1217 if (bundle->id == id) {
1218 return bundle;
1219 }
1220 }
1221
1222 return NULL;
1223 }
1224
1225 enum ofperr
1226 ofconn_insert_bundle(struct ofconn *ofconn, struct ofp_bundle *bundle)
1227 {
1228 hmap_insert(&ofconn->bundles, &bundle->node, bundle_hash(bundle->id));
1229
1230 return 0;
1231 }
1232
1233 enum ofperr
1234 ofconn_remove_bundle(struct ofconn *ofconn, struct ofp_bundle *bundle)
1235 {
1236 hmap_remove(&ofconn->bundles, &bundle->node);
1237
1238 return 0;
1239 }
1240
1241 static void
1242 bundle_remove_all(struct ofconn *ofconn)
1243 {
1244 struct ofp_bundle *b, *next;
1245
1246 HMAP_FOR_EACH_SAFE (b, next, node, &ofconn->bundles) {
1247 ofp_bundle_remove__(ofconn, b);
1248 }
1249 }
1250
1251 static void
1252 bundle_remove_expired(struct ofconn *ofconn, long long int now)
1253 {
1254 struct ofp_bundle *b, *next;
1255 long long int limit = now - BUNDLE_IDLE_TIMEOUT;
1256
1257 HMAP_FOR_EACH_SAFE (b, next, node, &ofconn->bundles) {
1258 if (b->used <= limit) {
1259 ofconn_send_error(ofconn, &b->ofp_msg, OFPERR_OFPBFC_TIMEOUT);
1260 ofp_bundle_remove__(ofconn, b);
1261 }
1262 }
1263 }
1264 \f
1265 /* Private ofconn functions. */
1266
1267 static const char *
1268 ofconn_get_target(const struct ofconn *ofconn)
1269 {
1270 return rconn_get_target(ofconn->rconn);
1271 }
1272
1273 static struct ofconn *
1274 ofconn_create(struct connmgr *mgr, struct rconn *rconn, enum ofconn_type type,
1275 bool enable_async_msgs)
1276 OVS_REQUIRES(ofproto_mutex)
1277 {
1278 struct ofconn *ofconn;
1279
1280 ofconn = xzalloc(sizeof *ofconn);
1281 ofconn->connmgr = mgr;
1282 ovs_list_push_back(&mgr->all_conns, &ofconn->node);
1283 ofconn->rconn = rconn;
1284 ofconn->type = type;
1285 ofconn->enable_async_msgs = enable_async_msgs;
1286
1287 hmap_init(&ofconn->monitors);
1288 ovs_list_init(&ofconn->updates);
1289
1290 hmap_init(&ofconn->bundles);
1291 ofconn->next_bundle_expiry_check = time_msec() + BUNDLE_EXPIRY_INTERVAL;
1292
1293 ofconn_flush(ofconn);
1294
1295 return ofconn;
1296 }
1297
1298 /* Clears all of the state in 'ofconn' that should not persist from one
1299 * connection to the next. */
1300 static void
1301 ofconn_flush(struct ofconn *ofconn)
1302 OVS_REQUIRES(ofproto_mutex)
1303 {
1304 struct ofmonitor *monitor, *next_monitor;
1305 int i;
1306
1307 ofconn_log_flow_mods(ofconn);
1308
1309 ofconn->role = OFPCR12_ROLE_EQUAL;
1310 ofconn_set_protocol(ofconn, OFPUTIL_P_NONE);
1311 ofconn->packet_in_format = NXPIF_STANDARD;
1312
1313 rconn_packet_counter_destroy(ofconn->packet_in_counter);
1314 ofconn->packet_in_counter = rconn_packet_counter_create();
1315 for (i = 0; i < N_SCHEDULERS; i++) {
1316 if (ofconn->schedulers[i]) {
1317 int rate, burst;
1318
1319 pinsched_get_limits(ofconn->schedulers[i], &rate, &burst);
1320 pinsched_destroy(ofconn->schedulers[i]);
1321 ofconn->schedulers[i] = pinsched_create(rate, burst);
1322 }
1323 }
1324 ofconn->miss_send_len = (ofconn->type == OFCONN_PRIMARY
1325 ? OFP_DEFAULT_MISS_SEND_LEN
1326 : 0);
1327 ofconn->controller_id = 0;
1328
1329 rconn_packet_counter_destroy(ofconn->reply_counter);
1330 ofconn->reply_counter = rconn_packet_counter_create();
1331
1332 free(ofconn->async_cfg);
1333 ofconn->async_cfg = NULL;
1334
1335 ofconn->n_add = ofconn->n_delete = ofconn->n_modify = 0;
1336 ofconn->first_op = ofconn->last_op = LLONG_MIN;
1337 ofconn->next_op_report = LLONG_MAX;
1338 ofconn->op_backoff = LLONG_MIN;
1339
1340 HMAP_FOR_EACH_SAFE (monitor, next_monitor, ofconn_node,
1341 &ofconn->monitors) {
1342 ofmonitor_destroy(monitor);
1343 }
1344 rconn_packet_counter_destroy(ofconn->monitor_counter);
1345 ofconn->monitor_counter = rconn_packet_counter_create();
1346 ofpbuf_list_delete(&ofconn->updates); /* ...but it should be empty. */
1347 }
1348
1349 static void
1350 ofconn_destroy(struct ofconn *ofconn)
1351 OVS_REQUIRES(ofproto_mutex)
1352 {
1353 ofconn_flush(ofconn);
1354
1355 /* Force clearing of want_packet_in_on_miss to keep the global count
1356 * accurate. */
1357 ofconn->controller_id = 1;
1358 update_want_packet_in_on_miss(ofconn);
1359
1360 if (ofconn->type == OFCONN_PRIMARY) {
1361 hmap_remove(&ofconn->connmgr->controllers, &ofconn->hmap_node);
1362 }
1363
1364 bundle_remove_all(ofconn);
1365 hmap_destroy(&ofconn->bundles);
1366
1367 hmap_destroy(&ofconn->monitors);
1368 ovs_list_remove(&ofconn->node);
1369 rconn_destroy(ofconn->rconn);
1370 rconn_packet_counter_destroy(ofconn->packet_in_counter);
1371 rconn_packet_counter_destroy(ofconn->reply_counter);
1372 rconn_packet_counter_destroy(ofconn->monitor_counter);
1373 free(ofconn);
1374 }
1375
1376 /* Reconfigures 'ofconn' to match 'c'. 'ofconn' and 'c' must have the same
1377 * target. */
1378 static void
1379 ofconn_reconfigure(struct ofconn *ofconn, const struct ofproto_controller *c)
1380 {
1381 int probe_interval;
1382
1383 ofconn->band = c->band;
1384 ofconn->enable_async_msgs = c->enable_async_msgs;
1385
1386 rconn_set_max_backoff(ofconn->rconn, c->max_backoff);
1387
1388 probe_interval = c->probe_interval ? MAX(c->probe_interval, 5) : 0;
1389 rconn_set_probe_interval(ofconn->rconn, probe_interval);
1390
1391 ofconn_set_rate_limit(ofconn, c->rate_limit, c->burst_limit);
1392
1393 /* If dscp value changed reconnect. */
1394 if (c->dscp != rconn_get_dscp(ofconn->rconn)) {
1395 rconn_set_dscp(ofconn->rconn, c->dscp);
1396 rconn_reconnect(ofconn->rconn);
1397 }
1398 }
1399
1400 /* Returns true if it makes sense for 'ofconn' to receive and process OpenFlow
1401 * messages. */
1402 static bool
1403 ofconn_may_recv(const struct ofconn *ofconn)
1404 {
1405 int count = rconn_packet_counter_n_packets(ofconn->reply_counter);
1406 return count < OFCONN_REPLY_MAX;
1407 }
1408
1409 static void
1410 ofconn_run(struct ofconn *ofconn,
1411 void (*handle_openflow)(struct ofconn *,
1412 const struct ofpbuf *ofp_msg))
1413 {
1414 struct connmgr *mgr = ofconn->connmgr;
1415 size_t i;
1416
1417 for (i = 0; i < N_SCHEDULERS; i++) {
1418 struct ovs_list txq;
1419
1420 pinsched_run(ofconn->schedulers[i], &txq);
1421 do_send_packet_ins(ofconn, &txq);
1422 }
1423
1424 rconn_run(ofconn->rconn);
1425
1426 /* Limit the number of iterations to avoid starving other tasks. */
1427 for (i = 0; i < 50 && ofconn_may_recv(ofconn); i++) {
1428 struct ofpbuf *of_msg = rconn_recv(ofconn->rconn);
1429 if (!of_msg) {
1430 break;
1431 }
1432
1433 if (mgr->fail_open) {
1434 fail_open_maybe_recover(mgr->fail_open);
1435 }
1436
1437 handle_openflow(ofconn, of_msg);
1438 ofpbuf_delete(of_msg);
1439 }
1440
1441 long long int now = time_msec();
1442
1443 if (now >= ofconn->next_bundle_expiry_check) {
1444 ofconn->next_bundle_expiry_check = now + BUNDLE_EXPIRY_INTERVAL;
1445 bundle_remove_expired(ofconn, now);
1446 }
1447
1448 if (now >= ofconn->next_op_report) {
1449 ofconn_log_flow_mods(ofconn);
1450 }
1451
1452 ovs_mutex_lock(&ofproto_mutex);
1453 if (!rconn_is_alive(ofconn->rconn)) {
1454 ofconn_destroy(ofconn);
1455 } else if (!rconn_is_connected(ofconn->rconn)) {
1456 ofconn_flush(ofconn);
1457 }
1458 ovs_mutex_unlock(&ofproto_mutex);
1459 }
1460
1461 static void
1462 ofconn_wait(struct ofconn *ofconn)
1463 {
1464 int i;
1465
1466 for (i = 0; i < N_SCHEDULERS; i++) {
1467 pinsched_wait(ofconn->schedulers[i]);
1468 }
1469 rconn_run_wait(ofconn->rconn);
1470 if (ofconn_may_recv(ofconn)) {
1471 rconn_recv_wait(ofconn->rconn);
1472 }
1473 if (ofconn->next_op_report != LLONG_MAX) {
1474 poll_timer_wait_until(ofconn->next_op_report);
1475 }
1476 }
1477
1478 static void
1479 ofconn_log_flow_mods(struct ofconn *ofconn)
1480 {
1481 int n_flow_mods = ofconn->n_add + ofconn->n_delete + ofconn->n_modify;
1482 if (n_flow_mods) {
1483 long long int ago = (time_msec() - ofconn->first_op) / 1000;
1484 long long int interval = (ofconn->last_op - ofconn->first_op) / 1000;
1485 struct ds s;
1486
1487 ds_init(&s);
1488 ds_put_format(&s, "%d flow_mods ", n_flow_mods);
1489 if (interval == ago) {
1490 ds_put_format(&s, "in the last %lld s", ago);
1491 } else if (interval) {
1492 ds_put_format(&s, "in the %lld s starting %lld s ago",
1493 interval, ago);
1494 } else {
1495 ds_put_format(&s, "%lld s ago", ago);
1496 }
1497
1498 ds_put_cstr(&s, " (");
1499 if (ofconn->n_add) {
1500 ds_put_format(&s, "%d adds, ", ofconn->n_add);
1501 }
1502 if (ofconn->n_delete) {
1503 ds_put_format(&s, "%d deletes, ", ofconn->n_delete);
1504 }
1505 if (ofconn->n_modify) {
1506 ds_put_format(&s, "%d modifications, ", ofconn->n_modify);
1507 }
1508 s.length -= 2;
1509 ds_put_char(&s, ')');
1510
1511 VLOG_INFO("%s: %s", rconn_get_name(ofconn->rconn), ds_cstr(&s));
1512 ds_destroy(&s);
1513
1514 ofconn->n_add = ofconn->n_delete = ofconn->n_modify = 0;
1515 }
1516 ofconn->next_op_report = LLONG_MAX;
1517 }
1518
1519 /* Returns true if 'ofconn' should receive asynchronous messages of the given
1520 * OAM_* 'type' and 'reason', which should be a OFPR_* value for OAM_PACKET_IN,
1521 * a OFPPR_* value for OAM_PORT_STATUS, or an OFPRR_* value for
1522 * OAM_FLOW_REMOVED. Returns false if the message should not be sent on
1523 * 'ofconn'. */
1524 static bool
1525 ofconn_receives_async_msg(const struct ofconn *ofconn,
1526 enum ofputil_async_msg_type type,
1527 unsigned int reason)
1528 {
1529 ovs_assert(reason < 32);
1530 ovs_assert((unsigned int) type < OAM_N_TYPES);
1531
1532 /* Keep the following code in sync with the documentation in the
1533 * "Asynchronous Messages" section in 'topics/design' */
1534
1535 if (ofconn->type == OFCONN_SERVICE && !ofconn->miss_send_len) {
1536 /* Service connections don't get asynchronous messages unless they have
1537 * explicitly asked for them by setting a nonzero miss send length. */
1538 return false;
1539 }
1540
1541 struct ofputil_async_cfg ac = ofconn_get_async_config(ofconn);
1542 uint32_t *masks = (ofconn->role == OFPCR12_ROLE_SLAVE
1543 ? ac.slave
1544 : ac.master);
1545 return (masks[type] & (1u << reason)) != 0;
1546 }
1547
1548 /* This function returns true to indicate that a packet_in message
1549 * for a "table-miss" should be sent to at least one controller.
1550 *
1551 * False otherwise. */
1552 bool
1553 connmgr_wants_packet_in_on_miss(struct connmgr *mgr)
1554 {
1555 int count;
1556
1557 atomic_read_relaxed(&mgr->want_packet_in_on_miss, &count);
1558 return count > 0;
1559 }
1560
1561 /* Returns a human-readable name for an OpenFlow connection between 'mgr' and
1562 * 'target', suitable for use in log messages for identifying the connection.
1563 *
1564 * The name is dynamically allocated. The caller should free it (with free())
1565 * when it is no longer needed. */
1566 static char *
1567 ofconn_make_name(const struct connmgr *mgr, const char *target)
1568 {
1569 return xasprintf("%s<->%s", mgr->name, target);
1570 }
1571
1572 static void
1573 ofconn_set_rate_limit(struct ofconn *ofconn, int rate, int burst)
1574 {
1575 int i;
1576
1577 for (i = 0; i < N_SCHEDULERS; i++) {
1578 struct pinsched **s = &ofconn->schedulers[i];
1579
1580 if (rate > 0) {
1581 if (!*s) {
1582 *s = pinsched_create(rate, burst);
1583 } else {
1584 pinsched_set_limits(*s, rate, burst);
1585 }
1586 } else {
1587 pinsched_destroy(*s);
1588 *s = NULL;
1589 }
1590 }
1591 }
1592
1593 static void
1594 ofconn_send(const struct ofconn *ofconn, struct ofpbuf *msg,
1595 struct rconn_packet_counter *counter)
1596 {
1597 ofpmsg_update_length(msg);
1598 rconn_send(ofconn->rconn, msg, counter);
1599 }
1600 \f
1601 /* Sending asynchronous messages. */
1602
1603 /* Sends an OFPT_PORT_STATUS message with 'opp' and 'reason' to appropriate
1604 * controllers managed by 'mgr'. For messages caused by a controller
1605 * OFPT_PORT_MOD, specify 'source' as the controller connection that sent the
1606 * request; otherwise, specify 'source' as NULL. */
1607 void
1608 connmgr_send_port_status(struct connmgr *mgr, struct ofconn *source,
1609 const struct ofputil_phy_port *pp, uint8_t reason)
1610 {
1611 /* XXX Should limit the number of queued port status change messages. */
1612 struct ofputil_port_status ps;
1613 struct ofconn *ofconn;
1614
1615 ps.reason = reason;
1616 ps.desc = *pp;
1617 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
1618 if (ofconn_receives_async_msg(ofconn, OAM_PORT_STATUS, reason)) {
1619 struct ofpbuf *msg;
1620
1621 /* Before 1.5, OpenFlow specified that OFPT_PORT_MOD should not
1622 * generate OFPT_PORT_STATUS messages. That requirement was a
1623 * relic of how OpenFlow originally supported a single controller,
1624 * so that one could expect the controller to already know the
1625 * changes it had made.
1626 *
1627 * EXT-338 changes OpenFlow 1.5 OFPT_PORT_MOD to send
1628 * OFPT_PORT_STATUS messages to every controller. This is
1629 * obviously more useful in the multi-controller case. We could
1630 * always implement it that way in OVS, but that would risk
1631 * confusing controllers that are intended for single-controller
1632 * use only. (Imagine a controller that generates an OFPT_PORT_MOD
1633 * in response to any OFPT_PORT_STATUS!)
1634 *
1635 * So this compromises: for OpenFlow 1.4 and earlier, it generates
1636 * OFPT_PORT_STATUS for OFPT_PORT_MOD, but not back to the
1637 * originating controller. In a single-controller environment, in
1638 * particular, this means that it will never generate
1639 * OFPT_PORT_STATUS for OFPT_PORT_MOD at all. */
1640 if (ofconn == source
1641 && rconn_get_version(ofconn->rconn) < OFP15_VERSION) {
1642 continue;
1643 }
1644
1645 msg = ofputil_encode_port_status(&ps, ofconn_get_protocol(ofconn));
1646 ofconn_send(ofconn, msg, NULL);
1647 }
1648 }
1649 }
1650
1651 /* Sends an OFPT_REQUESTFORWARD message with 'request' and 'reason' to
1652 * appropriate controllers managed by 'mgr'. For messages caused by a
1653 * controller OFPT_GROUP_MOD and OFPT_METER_MOD, specify 'source' as the
1654 * controller connection that sent the request; otherwise, specify 'source'
1655 * as NULL. */
1656 void
1657 connmgr_send_requestforward(struct connmgr *mgr, const struct ofconn *source,
1658 const struct ofputil_requestforward *rf)
1659 {
1660 struct ofconn *ofconn;
1661
1662 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
1663 if (ofconn_receives_async_msg(ofconn, OAM_REQUESTFORWARD, rf->reason)
1664 && rconn_get_version(ofconn->rconn) >= OFP14_VERSION
1665 && ofconn != source) {
1666 enum ofputil_protocol protocol = ofconn_get_protocol(ofconn);
1667 ofconn_send(ofconn, ofputil_encode_requestforward(rf, protocol),
1668 NULL);
1669 }
1670 }
1671 }
1672
1673 /* Sends an OFPT_FLOW_REMOVED or NXT_FLOW_REMOVED message based on 'fr' to
1674 * appropriate controllers managed by 'mgr'.
1675 *
1676 * This may be called from the RCU thread. */
1677 void
1678 connmgr_send_flow_removed(struct connmgr *mgr,
1679 const struct ofputil_flow_removed *fr)
1680 OVS_REQUIRES(ofproto_mutex)
1681 {
1682 struct ofconn *ofconn;
1683
1684 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
1685 if (ofconn_receives_async_msg(ofconn, OAM_FLOW_REMOVED, fr->reason)) {
1686 struct ofpbuf *msg;
1687
1688 /* Account flow expirations as replies to OpenFlow requests. That
1689 * works because preventing OpenFlow requests from being processed
1690 * also prevents new flows from being added (and expiring). (It
1691 * also prevents processing OpenFlow requests that would not add
1692 * new flows, so it is imperfect.) */
1693 msg = ofputil_encode_flow_removed(fr, ofconn_get_protocol(ofconn));
1694 ofconn_send_reply(ofconn, msg);
1695 }
1696 }
1697 }
1698
1699 /* Sends an OFPT_TABLE_STATUS message with 'reason' to appropriate controllers
1700 * managed by 'mgr'. When the table state changes, the controller needs to be
1701 * informed with the OFPT_TABLE_STATUS message. The reason values
1702 * OFPTR_VACANCY_DOWN and OFPTR_VACANCY_UP identify a vacancy message. The
1703 * vacancy events are generated when the remaining space in the flow table
1704 * changes and crosses one of the vacancy thereshold specified by
1705 * OFPT_TABLE_MOD. */
1706 void
1707 connmgr_send_table_status(struct connmgr *mgr,
1708 const struct ofputil_table_desc *td,
1709 uint8_t reason)
1710 {
1711 struct ofputil_table_status ts;
1712 struct ofconn *ofconn;
1713
1714 ts.reason = reason;
1715 ts.desc = *td;
1716
1717 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
1718 if (ofconn_receives_async_msg(ofconn, OAM_TABLE_STATUS, reason)) {
1719 struct ofpbuf *msg;
1720
1721 msg = ofputil_encode_table_status(&ts,
1722 ofconn_get_protocol(ofconn));
1723 if (msg) {
1724 ofconn_send(ofconn, msg, NULL);
1725 }
1726 }
1727 }
1728 }
1729
1730 /* Given 'pin', sends an OFPT_PACKET_IN message to each OpenFlow controller as
1731 * necessary according to their individual configurations. */
1732 void
1733 connmgr_send_async_msg(struct connmgr *mgr,
1734 const struct ofproto_async_msg *am)
1735 {
1736 struct ofconn *ofconn;
1737
1738 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
1739 enum ofputil_protocol protocol = ofconn_get_protocol(ofconn);
1740 if (protocol == OFPUTIL_P_NONE || !rconn_is_connected(ofconn->rconn)
1741 || ofconn->controller_id != am->controller_id
1742 || !ofconn_receives_async_msg(ofconn, am->oam,
1743 am->pin.up.base.reason)) {
1744 continue;
1745 }
1746
1747 struct ofpbuf *msg = ofputil_encode_packet_in_private(
1748 &am->pin.up, protocol, ofconn->packet_in_format);
1749
1750 struct ovs_list txq;
1751 bool is_miss = (am->pin.up.base.reason == OFPR_NO_MATCH ||
1752 am->pin.up.base.reason == OFPR_EXPLICIT_MISS ||
1753 am->pin.up.base.reason == OFPR_IMPLICIT_MISS);
1754 pinsched_send(ofconn->schedulers[is_miss],
1755 am->pin.up.base.flow_metadata.flow.in_port.ofp_port,
1756 msg, &txq);
1757 do_send_packet_ins(ofconn, &txq);
1758 }
1759 }
1760
1761 static void
1762 do_send_packet_ins(struct ofconn *ofconn, struct ovs_list *txq)
1763 {
1764 struct ofpbuf *pin;
1765
1766 LIST_FOR_EACH_POP (pin, list_node, txq) {
1767 if (rconn_send_with_limit(ofconn->rconn, pin,
1768 ofconn->packet_in_counter, 100) == EAGAIN) {
1769 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
1770
1771 VLOG_INFO_RL(&rl, "%s: dropping packet-in due to queue overflow",
1772 rconn_get_name(ofconn->rconn));
1773 }
1774 }
1775 }
1776 \f
1777 /* Fail-open settings. */
1778
1779 /* Returns the failure handling mode (OFPROTO_FAIL_SECURE or
1780 * OFPROTO_FAIL_STANDALONE) for 'mgr'. */
1781 enum ofproto_fail_mode
1782 connmgr_get_fail_mode(const struct connmgr *mgr)
1783 {
1784 return mgr->fail_mode;
1785 }
1786
1787 /* Sets the failure handling mode for 'mgr' to 'fail_mode' (either
1788 * OFPROTO_FAIL_SECURE or OFPROTO_FAIL_STANDALONE). */
1789 void
1790 connmgr_set_fail_mode(struct connmgr *mgr, enum ofproto_fail_mode fail_mode)
1791 {
1792 if (mgr->fail_mode != fail_mode) {
1793 mgr->fail_mode = fail_mode;
1794 update_fail_open(mgr);
1795 if (!connmgr_has_controllers(mgr)) {
1796 ofproto_flush_flows(mgr->ofproto);
1797 }
1798 }
1799 }
1800 \f
1801 /* Fail-open implementation. */
1802
1803 /* Returns the longest probe interval among the primary controllers configured
1804 * on 'mgr'. Returns 0 if there are no primary controllers. */
1805 int
1806 connmgr_get_max_probe_interval(const struct connmgr *mgr)
1807 {
1808 const struct ofconn *ofconn;
1809 int max_probe_interval;
1810
1811 max_probe_interval = 0;
1812 HMAP_FOR_EACH (ofconn, hmap_node, &mgr->controllers) {
1813 int probe_interval = rconn_get_probe_interval(ofconn->rconn);
1814 max_probe_interval = MAX(max_probe_interval, probe_interval);
1815 }
1816 return max_probe_interval;
1817 }
1818
1819 /* Returns the number of seconds for which all of 'mgr's primary controllers
1820 * have been disconnected. Returns 0 if 'mgr' has no primary controllers. */
1821 int
1822 connmgr_failure_duration(const struct connmgr *mgr)
1823 {
1824 const struct ofconn *ofconn;
1825 int min_failure_duration;
1826
1827 if (!connmgr_has_controllers(mgr)) {
1828 return 0;
1829 }
1830
1831 min_failure_duration = INT_MAX;
1832 HMAP_FOR_EACH (ofconn, hmap_node, &mgr->controllers) {
1833 int failure_duration = rconn_failure_duration(ofconn->rconn);
1834 min_failure_duration = MIN(min_failure_duration, failure_duration);
1835 }
1836 return min_failure_duration;
1837 }
1838
1839 /* Returns true if at least one primary controller is connected (regardless of
1840 * whether those controllers are believed to have authenticated and accepted
1841 * this switch), false if none of them are connected. */
1842 bool
1843 connmgr_is_any_controller_connected(const struct connmgr *mgr)
1844 {
1845 const struct ofconn *ofconn;
1846
1847 HMAP_FOR_EACH (ofconn, hmap_node, &mgr->controllers) {
1848 if (rconn_is_connected(ofconn->rconn)) {
1849 return true;
1850 }
1851 }
1852 return false;
1853 }
1854
1855 /* Returns true if at least one primary controller is believed to have
1856 * authenticated and accepted this switch, false otherwise. */
1857 bool
1858 connmgr_is_any_controller_admitted(const struct connmgr *mgr)
1859 {
1860 const struct ofconn *ofconn;
1861
1862 HMAP_FOR_EACH (ofconn, hmap_node, &mgr->controllers) {
1863 if (rconn_is_admitted(ofconn->rconn)) {
1864 return true;
1865 }
1866 }
1867 return false;
1868 }
1869 \f
1870 /* In-band configuration. */
1871
1872 static bool any_extras_changed(const struct connmgr *,
1873 const struct sockaddr_in *extras, size_t n);
1874
1875 /* Sets the 'n' TCP port addresses in 'extras' as ones to which 'mgr''s
1876 * in-band control should guarantee access, in the same way that in-band
1877 * control guarantees access to OpenFlow controllers. */
1878 void
1879 connmgr_set_extra_in_band_remotes(struct connmgr *mgr,
1880 const struct sockaddr_in *extras, size_t n)
1881 {
1882 if (!any_extras_changed(mgr, extras, n)) {
1883 return;
1884 }
1885
1886 free(mgr->extra_in_band_remotes);
1887 mgr->n_extra_remotes = n;
1888 mgr->extra_in_band_remotes = xmemdup(extras, n * sizeof *extras);
1889
1890 update_in_band_remotes(mgr);
1891 }
1892
1893 /* Sets the OpenFlow queue used by flows set up by in-band control on
1894 * 'mgr' to 'queue_id'. If 'queue_id' is negative, then in-band control
1895 * flows will use the default queue. */
1896 void
1897 connmgr_set_in_band_queue(struct connmgr *mgr, int queue_id)
1898 {
1899 if (queue_id != mgr->in_band_queue) {
1900 mgr->in_band_queue = queue_id;
1901 update_in_band_remotes(mgr);
1902 }
1903 }
1904
1905 static bool
1906 any_extras_changed(const struct connmgr *mgr,
1907 const struct sockaddr_in *extras, size_t n)
1908 {
1909 size_t i;
1910
1911 if (n != mgr->n_extra_remotes) {
1912 return true;
1913 }
1914
1915 for (i = 0; i < n; i++) {
1916 const struct sockaddr_in *old = &mgr->extra_in_band_remotes[i];
1917 const struct sockaddr_in *new = &extras[i];
1918
1919 if (old->sin_addr.s_addr != new->sin_addr.s_addr ||
1920 old->sin_port != new->sin_port) {
1921 return true;
1922 }
1923 }
1924
1925 return false;
1926 }
1927 \f
1928 /* In-band implementation. */
1929
1930 bool
1931 connmgr_has_in_band(struct connmgr *mgr)
1932 {
1933 return mgr->in_band != NULL;
1934 }
1935 \f
1936 /* Fail-open and in-band implementation. */
1937
1938 /* Called by 'ofproto' after all flows have been flushed, to allow fail-open
1939 * and standalone mode to re-create their flows.
1940 *
1941 * In-band control has more sophisticated code that manages flows itself. */
1942 void
1943 connmgr_flushed(struct connmgr *mgr)
1944 OVS_EXCLUDED(ofproto_mutex)
1945 {
1946 if (mgr->fail_open) {
1947 fail_open_flushed(mgr->fail_open);
1948 }
1949
1950 /* If there are no controllers and we're in standalone mode, set up a flow
1951 * that matches every packet and directs them to OFPP_NORMAL (which goes to
1952 * us). Otherwise, the switch is in secure mode and we won't pass any
1953 * traffic until a controller has been defined and it tells us to do so. */
1954 if (!connmgr_has_controllers(mgr)
1955 && mgr->fail_mode == OFPROTO_FAIL_STANDALONE) {
1956 struct ofpbuf ofpacts;
1957 struct match match;
1958
1959 ofpbuf_init(&ofpacts, OFPACT_OUTPUT_SIZE);
1960 ofpact_put_OUTPUT(&ofpacts)->port = OFPP_NORMAL;
1961
1962 match_init_catchall(&match);
1963 ofproto_add_flow(mgr->ofproto, &match, 0, ofpacts.data,
1964 ofpacts.size);
1965
1966 ofpbuf_uninit(&ofpacts);
1967 }
1968 }
1969
1970 /* Returns the number of hidden rules created by the in-band and fail-open
1971 * implementations in table 0. (Subtracting this count from the number of
1972 * rules in the table 0 classifier, as maintained in struct oftable, yields
1973 * the number of flows that OVS should report via OpenFlow for table 0.) */
1974 int
1975 connmgr_count_hidden_rules(const struct connmgr *mgr)
1976 {
1977 int n_hidden = 0;
1978 if (mgr->in_band) {
1979 n_hidden += in_band_count_rules(mgr->in_band);
1980 }
1981 if (mgr->fail_open) {
1982 n_hidden += fail_open_count_rules(mgr->fail_open);
1983 }
1984 return n_hidden;
1985 }
1986 \f
1987 /* Creates a new ofservice for 'target' in 'mgr'. Returns 0 if successful,
1988 * otherwise a positive errno value.
1989 *
1990 * ofservice_reconfigure() must be called to fully configure the new
1991 * ofservice. */
1992 static int
1993 ofservice_create(struct connmgr *mgr, const char *target,
1994 uint32_t allowed_versions, uint8_t dscp)
1995 {
1996 struct ofservice *ofservice;
1997 struct pvconn *pvconn;
1998 int error;
1999
2000 error = pvconn_open(target, allowed_versions, dscp, &pvconn);
2001 if (error) {
2002 return error;
2003 }
2004
2005 ofservice = xzalloc(sizeof *ofservice);
2006 hmap_insert(&mgr->services, &ofservice->node, hash_string(target, 0));
2007 ofservice->pvconn = pvconn;
2008 ofservice->allowed_versions = allowed_versions;
2009
2010 return 0;
2011 }
2012
2013 static void
2014 ofservice_destroy(struct connmgr *mgr, struct ofservice *ofservice)
2015 {
2016 hmap_remove(&mgr->services, &ofservice->node);
2017 pvconn_close(ofservice->pvconn);
2018 free(ofservice);
2019 }
2020
2021 static void
2022 ofservice_reconfigure(struct ofservice *ofservice,
2023 const struct ofproto_controller *c)
2024 {
2025 ofservice->probe_interval = c->probe_interval;
2026 ofservice->rate_limit = c->rate_limit;
2027 ofservice->burst_limit = c->burst_limit;
2028 ofservice->enable_async_msgs = c->enable_async_msgs;
2029 ofservice->dscp = c->dscp;
2030 }
2031
2032 /* Finds and returns the ofservice within 'mgr' that has the given
2033 * 'target', or a null pointer if none exists. */
2034 static struct ofservice *
2035 ofservice_lookup(struct connmgr *mgr, const char *target)
2036 {
2037 struct ofservice *ofservice;
2038
2039 HMAP_FOR_EACH_WITH_HASH (ofservice, node, hash_string(target, 0),
2040 &mgr->services) {
2041 if (!strcmp(pvconn_get_name(ofservice->pvconn), target)) {
2042 return ofservice;
2043 }
2044 }
2045 return NULL;
2046 }
2047 \f
2048 /* Flow monitors (NXST_FLOW_MONITOR). */
2049
2050 /* A counter incremented when something significant happens to an OpenFlow
2051 * rule.
2052 *
2053 * - When a rule is added, its 'add_seqno' and 'modify_seqno' are set to
2054 * the current value (which is then incremented).
2055 *
2056 * - When a rule is modified, its 'modify_seqno' is set to the current
2057 * value (which is then incremented).
2058 *
2059 * Thus, by comparing an old value of monitor_seqno against a rule's
2060 * 'add_seqno', one can tell whether the rule was added before or after the old
2061 * value was read, and similarly for 'modify_seqno'.
2062 *
2063 * 32 bits should normally be sufficient (and would be nice, to save space in
2064 * each rule) but then we'd have to have some special cases for wraparound.
2065 *
2066 * We initialize monitor_seqno to 1 to allow 0 to be used as an invalid
2067 * value. */
2068 static uint64_t monitor_seqno = 1;
2069
2070 COVERAGE_DEFINE(ofmonitor_pause);
2071 COVERAGE_DEFINE(ofmonitor_resume);
2072
2073 enum ofperr
2074 ofmonitor_create(const struct ofputil_flow_monitor_request *request,
2075 struct ofconn *ofconn, struct ofmonitor **monitorp)
2076 OVS_REQUIRES(ofproto_mutex)
2077 {
2078 struct ofmonitor *m;
2079
2080 *monitorp = NULL;
2081
2082 m = ofmonitor_lookup(ofconn, request->id);
2083 if (m) {
2084 return OFPERR_OFPMOFC_MONITOR_EXISTS;
2085 }
2086
2087 m = xmalloc(sizeof *m);
2088 m->ofconn = ofconn;
2089 hmap_insert(&ofconn->monitors, &m->ofconn_node, hash_int(request->id, 0));
2090 m->id = request->id;
2091 m->flags = request->flags;
2092 m->out_port = request->out_port;
2093 m->table_id = request->table_id;
2094 minimatch_init(&m->match, &request->match);
2095
2096 *monitorp = m;
2097 return 0;
2098 }
2099
2100 struct ofmonitor *
2101 ofmonitor_lookup(struct ofconn *ofconn, uint32_t id)
2102 OVS_REQUIRES(ofproto_mutex)
2103 {
2104 struct ofmonitor *m;
2105
2106 HMAP_FOR_EACH_IN_BUCKET (m, ofconn_node, hash_int(id, 0),
2107 &ofconn->monitors) {
2108 if (m->id == id) {
2109 return m;
2110 }
2111 }
2112 return NULL;
2113 }
2114
2115 void
2116 ofmonitor_destroy(struct ofmonitor *m)
2117 OVS_REQUIRES(ofproto_mutex)
2118 {
2119 if (m) {
2120 minimatch_destroy(&m->match);
2121 hmap_remove(&m->ofconn->monitors, &m->ofconn_node);
2122 free(m);
2123 }
2124 }
2125
2126 void
2127 ofmonitor_report(struct connmgr *mgr, struct rule *rule,
2128 enum nx_flow_update_event event,
2129 enum ofp_flow_removed_reason reason,
2130 const struct ofconn *abbrev_ofconn, ovs_be32 abbrev_xid,
2131 const struct rule_actions *old_actions)
2132 OVS_REQUIRES(ofproto_mutex)
2133 {
2134 enum nx_flow_monitor_flags update;
2135 struct ofconn *ofconn;
2136
2137 if (rule_is_hidden(rule)) {
2138 return;
2139 }
2140
2141 switch (event) {
2142 case NXFME_ADDED:
2143 update = NXFMF_ADD;
2144 rule->add_seqno = rule->modify_seqno = monitor_seqno++;
2145 break;
2146
2147 case NXFME_DELETED:
2148 update = NXFMF_DELETE;
2149 break;
2150
2151 case NXFME_MODIFIED:
2152 update = NXFMF_MODIFY;
2153 rule->modify_seqno = monitor_seqno++;
2154 break;
2155
2156 default:
2157 case NXFME_ABBREV:
2158 OVS_NOT_REACHED();
2159 }
2160
2161 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
2162 enum nx_flow_monitor_flags flags = 0;
2163 struct ofmonitor *m;
2164
2165 if (ofconn->monitor_paused) {
2166 /* Only send NXFME_DELETED notifications for flows that were added
2167 * before we paused. */
2168 if (event != NXFME_DELETED
2169 || rule->add_seqno > ofconn->monitor_paused) {
2170 continue;
2171 }
2172 }
2173
2174 HMAP_FOR_EACH (m, ofconn_node, &ofconn->monitors) {
2175 if (m->flags & update
2176 && (m->table_id == 0xff || m->table_id == rule->table_id)
2177 && (ofproto_rule_has_out_port(rule, m->out_port)
2178 || (old_actions
2179 && ofpacts_output_to_port(old_actions->ofpacts,
2180 old_actions->ofpacts_len,
2181 m->out_port)))
2182 && cls_rule_is_loose_match(&rule->cr, &m->match)) {
2183 flags |= m->flags;
2184 }
2185 }
2186
2187 if (flags) {
2188 if (ovs_list_is_empty(&ofconn->updates)) {
2189 ofputil_start_flow_update(&ofconn->updates);
2190 ofconn->sent_abbrev_update = false;
2191 }
2192
2193 if (flags & NXFMF_OWN || ofconn != abbrev_ofconn
2194 || ofconn->monitor_paused) {
2195 struct ofputil_flow_update fu;
2196
2197 fu.event = event;
2198 fu.reason = event == NXFME_DELETED ? reason : 0;
2199 fu.table_id = rule->table_id;
2200 fu.cookie = rule->flow_cookie;
2201 minimatch_expand(&rule->cr.match, &fu.match);
2202 fu.priority = rule->cr.priority;
2203
2204 ovs_mutex_lock(&rule->mutex);
2205 fu.idle_timeout = rule->idle_timeout;
2206 fu.hard_timeout = rule->hard_timeout;
2207 ovs_mutex_unlock(&rule->mutex);
2208
2209 if (flags & NXFMF_ACTIONS) {
2210 const struct rule_actions *actions = rule_get_actions(rule);
2211 fu.ofpacts = actions->ofpacts;
2212 fu.ofpacts_len = actions->ofpacts_len;
2213 } else {
2214 fu.ofpacts = NULL;
2215 fu.ofpacts_len = 0;
2216 }
2217 ofputil_append_flow_update(&fu, &ofconn->updates,
2218 ofproto_get_tun_tab(rule->ofproto));
2219 } else if (!ofconn->sent_abbrev_update) {
2220 struct ofputil_flow_update fu;
2221
2222 fu.event = NXFME_ABBREV;
2223 fu.xid = abbrev_xid;
2224 ofputil_append_flow_update(&fu, &ofconn->updates,
2225 ofproto_get_tun_tab(rule->ofproto));
2226
2227 ofconn->sent_abbrev_update = true;
2228 }
2229 }
2230 }
2231 }
2232
2233 void
2234 ofmonitor_flush(struct connmgr *mgr)
2235 OVS_REQUIRES(ofproto_mutex)
2236 {
2237 struct ofconn *ofconn;
2238
2239 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
2240 struct ofpbuf *msg;
2241
2242 LIST_FOR_EACH_POP (msg, list_node, &ofconn->updates) {
2243 unsigned int n_bytes;
2244
2245 ofconn_send(ofconn, msg, ofconn->monitor_counter);
2246 n_bytes = rconn_packet_counter_n_bytes(ofconn->monitor_counter);
2247 if (!ofconn->monitor_paused && n_bytes > 128 * 1024) {
2248 struct ofpbuf *pause;
2249
2250 COVERAGE_INC(ofmonitor_pause);
2251 ofconn->monitor_paused = monitor_seqno++;
2252 pause = ofpraw_alloc_xid(OFPRAW_NXT_FLOW_MONITOR_PAUSED,
2253 OFP10_VERSION, htonl(0), 0);
2254 ofconn_send(ofconn, pause, ofconn->monitor_counter);
2255 }
2256 }
2257 }
2258 }
2259
2260 static void
2261 ofmonitor_resume(struct ofconn *ofconn)
2262 OVS_REQUIRES(ofproto_mutex)
2263 {
2264 struct rule_collection rules;
2265 struct ofpbuf *resumed;
2266 struct ofmonitor *m;
2267 struct ovs_list msgs;
2268
2269 rule_collection_init(&rules);
2270 HMAP_FOR_EACH (m, ofconn_node, &ofconn->monitors) {
2271 ofmonitor_collect_resume_rules(m, ofconn->monitor_paused, &rules);
2272 }
2273
2274 ovs_list_init(&msgs);
2275 ofmonitor_compose_refresh_updates(&rules, &msgs);
2276
2277 resumed = ofpraw_alloc_xid(OFPRAW_NXT_FLOW_MONITOR_RESUMED, OFP10_VERSION,
2278 htonl(0), 0);
2279 ovs_list_push_back(&msgs, &resumed->list_node);
2280 ofconn_send_replies(ofconn, &msgs);
2281
2282 ofconn->monitor_paused = 0;
2283 }
2284
2285 static bool
2286 ofmonitor_may_resume(const struct ofconn *ofconn)
2287 OVS_REQUIRES(ofproto_mutex)
2288 {
2289 return (ofconn->monitor_paused != 0
2290 && !rconn_packet_counter_n_packets(ofconn->monitor_counter));
2291 }
2292
2293 static void
2294 ofmonitor_run(struct connmgr *mgr)
2295 {
2296 struct ofconn *ofconn;
2297
2298 ovs_mutex_lock(&ofproto_mutex);
2299 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
2300 if (ofmonitor_may_resume(ofconn)) {
2301 COVERAGE_INC(ofmonitor_resume);
2302 ofmonitor_resume(ofconn);
2303 }
2304 }
2305 ovs_mutex_unlock(&ofproto_mutex);
2306 }
2307
2308 static void
2309 ofmonitor_wait(struct connmgr *mgr)
2310 {
2311 struct ofconn *ofconn;
2312
2313 ovs_mutex_lock(&ofproto_mutex);
2314 LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
2315 if (ofmonitor_may_resume(ofconn)) {
2316 poll_immediate_wake();
2317 }
2318 }
2319 ovs_mutex_unlock(&ofproto_mutex);
2320 }
2321
2322 void
2323 ofproto_async_msg_free(struct ofproto_async_msg *am)
2324 {
2325 free(am->pin.up.base.packet);
2326 free(am->pin.up.base.userdata);
2327 free(am->pin.up.stack);
2328 free(am->pin.up.actions);
2329 free(am->pin.up.action_set);
2330 free(am);
2331 }