]> git.proxmox.com Git - mirror_ovs.git/blob - ofproto/fail-open.c
bridge: Propagate patch port pairing errors to db.
[mirror_ovs.git] / ofproto / fail-open.c
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2015, 2016 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include <inttypes.h>
19 #include <stdlib.h>
20 #include "classifier.h"
21 #include "connmgr.h"
22 #include "dp-packet.h"
23 #include "fail-open.h"
24 #include "flow.h"
25 #include "mac-learning.h"
26 #include "odp-util.h"
27 #include "openvswitch/ofp-actions.h"
28 #include "openvswitch/ofpbuf.h"
29 #include "openvswitch/vconn.h"
30 #include "openvswitch/vlog.h"
31 #include "ofproto.h"
32 #include "ofproto-provider.h"
33 #include "openvswitch/poll-loop.h"
34 #include "openvswitch/rconn.h"
35 #include "timeval.h"
36
37 VLOG_DEFINE_THIS_MODULE(fail_open);
38
39 /*
40 * Fail-open mode.
41 *
42 * In fail-open mode, the switch detects when the controller cannot be
43 * contacted or when the controller is dropping switch connections because the
44 * switch does not pass its admission control policy. In those situations the
45 * switch sets up flows itself using the "normal" action.
46 *
47 * There is a little subtlety to implementation, to properly handle the case
48 * where the controller allows switch connections but drops them a few seconds
49 * later for admission control reasons. Because of this case, we don't want to
50 * just stop setting up flows when we connect to the controller: if we did,
51 * then new flow setup and existing flows would stop during the duration of
52 * connection to the controller, and thus the whole network would go down for
53 * that period of time.
54 *
55 * So, instead, we add some special cases when we are connected to a
56 * controller, but not yet sure that it has admitted us:
57 *
58 * - We set up flows immediately ourselves, but simultaneously send out an
59 * OFPT_PACKET_IN to the controller. We put a special bogus buffer-id in
60 * these OFPT_PACKET_IN messages so that duplicate packets don't get sent
61 * out to the network when the controller replies.
62 *
63 * - We also send out OFPT_PACKET_IN messages for totally bogus packets
64 * every so often, in case no real new flows are arriving in the network.
65 *
66 * - We don't flush the flow table at the time we connect, because this
67 * could cause network stuttering in a switch with lots of flows or very
68 * high-bandwidth flows by suddenly throwing lots of packets down to
69 * userspace.
70 */
71
72 struct fail_open {
73 struct ofproto *ofproto;
74 struct connmgr *connmgr;
75 int last_disconn_secs;
76 long long int next_bogus_packet_in;
77 struct rconn_packet_counter *bogus_packet_counter;
78 bool fail_open_active;
79 };
80
81 static void fail_open_recover(struct fail_open *) OVS_REQUIRES(ofproto_mutex);
82
83 /* Returns the number of seconds of disconnection after which fail-open mode
84 * should activate. */
85 static int
86 trigger_duration(const struct fail_open *fo)
87 {
88 if (!connmgr_has_controllers(fo->connmgr)) {
89 /* Shouldn't ever arrive here, but if we do, never fail open. */
90 return INT_MAX;
91 } else {
92 /* Otherwise, every controller must have a chance to send an
93 * inactivity probe and reconnect before we fail open, so take the
94 * maximum probe interval and multiply by 3:
95 *
96 * - The first interval is the idle time before sending an inactivity
97 * probe.
98 *
99 * - The second interval is the time allowed for a response to the
100 * inactivity probe.
101 *
102 * - The third interval is the time allowed to reconnect after no
103 * response is received.
104 */
105 return connmgr_get_max_probe_interval(fo->connmgr) * 3;
106 }
107 }
108
109 /* Returns true if 'fo' is currently in fail-open mode, otherwise false. */
110 bool
111 fail_open_is_active(const struct fail_open *fo)
112 {
113 return fo->last_disconn_secs != 0;
114 }
115
116 static void
117 send_bogus_packet_ins(struct fail_open *fo)
118 {
119 struct eth_addr mac;
120 struct dp_packet b;
121
122 dp_packet_init(&b, 128);
123 eth_addr_nicira_random(&mac);
124 compose_rarp(&b, mac);
125
126 struct ofproto_async_msg am = {
127 .oam = OAM_PACKET_IN,
128 .pin = {
129 .up = {
130 .base = {
131 .packet = dp_packet_data(&b),
132 .packet_len = dp_packet_size(&b),
133 .flow_metadata.flow.in_port.ofp_port = OFPP_LOCAL,
134 .flow_metadata.wc.masks.in_port.ofp_port
135 = u16_to_ofp(UINT16_MAX),
136 .reason = OFPR_NO_MATCH,
137 .cookie = OVS_BE64_MAX,
138 },
139 },
140 .max_len = UINT16_MAX,
141 }
142 };
143 connmgr_send_async_msg(fo->connmgr, &am);
144
145 dp_packet_uninit(&b);
146 }
147
148 static void
149 fail_open_del_normal_flow(struct fail_open *fo)
150 OVS_REQUIRES(ofproto_mutex)
151 {
152 struct match match;
153
154 match_init_catchall(&match);
155 ofproto_delete_flow(fo->ofproto, &match, FAIL_OPEN_PRIORITY);
156 }
157
158 static void
159 fail_open_add_normal_flow(struct fail_open *fo)
160 {
161 struct ofpbuf ofpacts;
162 struct match match;
163
164 /* Set up a flow that matches every packet and directs them to
165 * OFPP_NORMAL. */
166 ofpbuf_init(&ofpacts, sizeof(struct ofpact_output));
167 ofpact_put_OUTPUT(&ofpacts)->port = OFPP_NORMAL;
168
169 match_init_catchall(&match);
170 ofproto_add_flow(fo->ofproto, &match, FAIL_OPEN_PRIORITY,
171 ofpacts.data, ofpacts.size);
172
173 ofpbuf_uninit(&ofpacts);
174 }
175
176 /* Enter fail-open mode if we should be in it. */
177 void
178 fail_open_run(struct fail_open *fo)
179 {
180 int disconn_secs = connmgr_failure_duration(fo->connmgr);
181
182 /* Enter fail-open mode if 'fo' is not in it but should be. */
183 if (disconn_secs >= trigger_duration(fo)) {
184 if (!fail_open_is_active(fo)) {
185 VLOG_WARN("Could not connect to controller (or switch failed "
186 "controller's post-connection admission control "
187 "policy) for %d seconds, failing open", disconn_secs);
188 fo->last_disconn_secs = disconn_secs;
189
190 /* Flush all OpenFlow and datapath flows. We will set up our
191 * fail-open rule from fail_open_flushed() when
192 * ofproto_flush_flows() calls back to us. */
193 ofproto_flush_flows(fo->ofproto);
194 } else if (disconn_secs > fo->last_disconn_secs + 60) {
195 VLOG_INFO("Still in fail-open mode after %d seconds disconnected "
196 "from controller", disconn_secs);
197 fo->last_disconn_secs = disconn_secs;
198 }
199 }
200
201 /* Schedule a bogus packet-in if we're connected and in fail-open. */
202 if (fail_open_is_active(fo)) {
203 if (connmgr_is_any_controller_connected(fo->connmgr)) {
204 bool expired = time_msec() >= fo->next_bogus_packet_in;
205 if (expired) {
206 send_bogus_packet_ins(fo);
207 }
208 if (expired || fo->next_bogus_packet_in == LLONG_MAX) {
209 fo->next_bogus_packet_in = time_msec() + 2000;
210 }
211 } else {
212 fo->next_bogus_packet_in = LLONG_MAX;
213 }
214 }
215
216 }
217
218 /* If 'fo' is currently in fail-open mode and its rconn has connected to the
219 * controller, exits fail open mode. */
220 void
221 fail_open_maybe_recover(struct fail_open *fo)
222 OVS_EXCLUDED(ofproto_mutex)
223 {
224 if (fail_open_is_active(fo)
225 && connmgr_is_any_controller_admitted(fo->connmgr)) {
226 ovs_mutex_lock(&ofproto_mutex);
227 fail_open_recover(fo);
228 ovs_mutex_unlock(&ofproto_mutex);
229 }
230 }
231
232 static void
233 fail_open_recover(struct fail_open *fo)
234 OVS_REQUIRES(ofproto_mutex)
235 {
236 VLOG_WARN("No longer in fail-open mode");
237 fo->last_disconn_secs = 0;
238 fo->next_bogus_packet_in = LLONG_MAX;
239
240 fail_open_del_normal_flow(fo);
241 }
242
243 void
244 fail_open_wait(struct fail_open *fo)
245 {
246 if (fo->next_bogus_packet_in != LLONG_MAX) {
247 poll_timer_wait_until(fo->next_bogus_packet_in);
248 }
249 }
250
251 void
252 fail_open_flushed(struct fail_open *fo)
253 OVS_EXCLUDED(ofproto_mutex)
254 {
255 int disconn_secs = connmgr_failure_duration(fo->connmgr);
256 bool open = disconn_secs >= trigger_duration(fo);
257 if (open) {
258 fail_open_add_normal_flow(fo);
259 }
260 fo->fail_open_active = open;
261 }
262
263 /* Returns the number of fail-open rules currently installed in the flow
264 * table. */
265 int
266 fail_open_count_rules(const struct fail_open *fo)
267 {
268 return fo->fail_open_active != 0;
269 }
270
271 /* Creates and returns a new struct fail_open for 'ofproto' and 'mgr'. */
272 struct fail_open *
273 fail_open_create(struct ofproto *ofproto, struct connmgr *mgr)
274 {
275 struct fail_open *fo = xmalloc(sizeof *fo);
276 fo->ofproto = ofproto;
277 fo->connmgr = mgr;
278 fo->last_disconn_secs = 0;
279 fo->next_bogus_packet_in = LLONG_MAX;
280 fo->bogus_packet_counter = rconn_packet_counter_create();
281 fo->fail_open_active = false;
282 return fo;
283 }
284
285 /* Destroys 'fo'. */
286 void
287 fail_open_destroy(struct fail_open *fo)
288 OVS_REQUIRES(ofproto_mutex)
289 {
290 if (fo) {
291 if (fail_open_is_active(fo)) {
292 fail_open_recover(fo);
293 }
294 /* We don't own fo->connmgr. */
295 rconn_packet_counter_destroy(fo->bogus_packet_counter);
296 free(fo);
297 }
298 }