]> git.proxmox.com Git - ovs.git/blob - lib/learning-switch.c
c837fd0f549a061c306b0a2ddc81e448d00e444d
[ovs.git] / lib / learning-switch.c
1 /*
2 * Copyright (c) 2008, 2009, 2010 Nicira Networks.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "learning-switch.h"
19
20 #include <errno.h>
21 #include <inttypes.h>
22 #include <netinet/in.h>
23 #include <stdlib.h>
24 #include <time.h>
25
26 #include "flow.h"
27 #include "mac-learning.h"
28 #include "ofpbuf.h"
29 #include "ofp-print.h"
30 #include "ofp-util.h"
31 #include "openflow/openflow.h"
32 #include "poll-loop.h"
33 #include "queue.h"
34 #include "rconn.h"
35 #include "stp.h"
36 #include "timeval.h"
37 #include "vconn.h"
38 #include "xtoxll.h"
39
40 #define THIS_MODULE VLM_learning_switch
41 #include "vlog.h"
42
43 enum port_state {
44 P_DISABLED = 1 << 0,
45 P_LISTENING = 1 << 1,
46 P_LEARNING = 1 << 2,
47 P_FORWARDING = 1 << 3,
48 P_BLOCKING = 1 << 4
49 };
50
51 struct lswitch {
52 /* If nonnegative, the switch sets up flows that expire after the given
53 * number of seconds (or never expire, if the value is OFP_FLOW_PERMANENT).
54 * Otherwise, the switch processes every packet. */
55 int max_idle;
56
57 unsigned long long int datapath_id;
58 uint32_t capabilities;
59 time_t last_features_request;
60 struct mac_learning *ml; /* NULL to act as hub instead of switch. */
61 uint32_t wildcards; /* Wildcards to apply to flows. */
62 bool action_normal; /* Use OFPP_NORMAL? */
63
64 /* Number of outgoing queued packets on the rconn. */
65 struct rconn_packet_counter *queued;
66
67 /* Spanning tree protocol implementation.
68 *
69 * We implement STP states by, whenever a port's STP state changes,
70 * querying all the flows on the switch and then deleting any of them that
71 * are inappropriate for a port's STP state. */
72 long long int next_query; /* Next time at which to query all flows. */
73 long long int last_query; /* Last time we sent a query. */
74 long long int last_reply; /* Last time we received a query reply. */
75 unsigned int port_states[STP_MAX_PORTS];
76 uint32_t query_xid; /* XID used for query. */
77 int n_flows, n_no_recv, n_no_send;
78 };
79
80 /* The log messages here could actually be useful in debugging, so keep the
81 * rate limit relatively high. */
82 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
83
84 static void queue_tx(struct lswitch *, struct rconn *, struct ofpbuf *);
85 static void send_features_request(struct lswitch *, struct rconn *);
86 static void schedule_query(struct lswitch *, long long int delay);
87 static bool may_learn(const struct lswitch *, uint16_t port_no);
88 static bool may_recv(const struct lswitch *, uint16_t port_no,
89 bool any_actions);
90 static bool may_send(const struct lswitch *, uint16_t port_no);
91
92 typedef void packet_handler_func(struct lswitch *, struct rconn *, void *);
93 static packet_handler_func process_switch_features;
94 static packet_handler_func process_packet_in;
95 static packet_handler_func process_echo_request;
96 static packet_handler_func process_port_status;
97 static packet_handler_func process_phy_port;
98 static packet_handler_func process_stats_reply;
99
100 /* Creates and returns a new learning switch.
101 *
102 * If 'learn_macs' is true, the new switch will learn the ports on which MAC
103 * addresses appear. Otherwise, the new switch will flood all packets.
104 *
105 * If 'max_idle' is nonnegative, the new switch will set up flows that expire
106 * after the given number of seconds (or never expire, if 'max_idle' is
107 * OFP_FLOW_PERMANENT). Otherwise, the new switch will process every packet.
108 *
109 * 'rconn' is used to send out an OpenFlow features request. */
110 struct lswitch *
111 lswitch_create(struct rconn *rconn, bool learn_macs,
112 bool exact_flows, int max_idle, bool action_normal)
113 {
114 struct lswitch *sw;
115 size_t i;
116
117 sw = xzalloc(sizeof *sw);
118 sw->max_idle = max_idle;
119 sw->datapath_id = 0;
120 sw->last_features_request = time_now() - 1;
121 sw->ml = learn_macs ? mac_learning_create() : NULL;
122 sw->action_normal = action_normal;
123 if (exact_flows) {
124 /* Exact match. */
125 sw->wildcards = 0;
126 } else {
127 /* We cannot wildcard all fields.
128 * We need in_port to detect moves.
129 * We need both SA and DA to do learning. */
130 sw->wildcards = (OFPFW_DL_TYPE | OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK
131 | OFPFW_NW_PROTO | OFPFW_TP_SRC | OFPFW_TP_DST);
132 }
133 sw->queued = rconn_packet_counter_create();
134 sw->next_query = LLONG_MIN;
135 sw->last_query = LLONG_MIN;
136 sw->last_reply = LLONG_MIN;
137 for (i = 0; i < STP_MAX_PORTS; i++) {
138 sw->port_states[i] = P_DISABLED;
139 }
140 send_features_request(sw, rconn);
141 return sw;
142 }
143
144 /* Destroys 'sw'. */
145 void
146 lswitch_destroy(struct lswitch *sw)
147 {
148 if (sw) {
149 mac_learning_destroy(sw->ml);
150 rconn_packet_counter_destroy(sw->queued);
151 free(sw);
152 }
153 }
154
155 /* Takes care of necessary 'sw' activity, except for receiving packets (which
156 * the caller must do). */
157 void
158 lswitch_run(struct lswitch *sw, struct rconn *rconn)
159 {
160 long long int now = time_msec();
161
162 if (sw->ml) {
163 mac_learning_run(sw->ml, NULL);
164 }
165
166 /* If we're waiting for more replies, keeping waiting for up to 10 s. */
167 if (sw->last_reply != LLONG_MIN) {
168 if (now - sw->last_reply > 10000) {
169 VLOG_ERR_RL(&rl, "%016llx: No more flow stat replies last 10 s",
170 sw->datapath_id);
171 sw->last_reply = LLONG_MIN;
172 sw->last_query = LLONG_MIN;
173 schedule_query(sw, 0);
174 } else {
175 return;
176 }
177 }
178
179 /* If we're waiting for any reply at all, keep waiting for up to 10 s. */
180 if (sw->last_query != LLONG_MIN) {
181 if (now - sw->last_query > 10000) {
182 VLOG_ERR_RL(&rl, "%016llx: No flow stat replies in last 10 s",
183 sw->datapath_id);
184 sw->last_query = LLONG_MIN;
185 schedule_query(sw, 0);
186 } else {
187 return;
188 }
189 }
190
191 /* If it's time to send another query, do so. */
192 if (sw->next_query != LLONG_MIN && now >= sw->next_query) {
193 sw->next_query = LLONG_MIN;
194 if (!rconn_is_connected(rconn)) {
195 schedule_query(sw, 1000);
196 } else {
197 struct ofp_stats_request *osr;
198 struct ofp_flow_stats_request *ofsr;
199 struct ofpbuf *b;
200 int error;
201
202 VLOG_DBG("%016llx: Sending flow stats request to implement STP",
203 sw->datapath_id);
204
205 sw->last_query = now;
206 sw->query_xid = random_uint32();
207 sw->n_flows = 0;
208 sw->n_no_recv = 0;
209 sw->n_no_send = 0;
210 osr = make_openflow_xid(sizeof *osr + sizeof *ofsr,
211 OFPT_STATS_REQUEST, sw->query_xid, &b);
212 osr->type = htons(OFPST_FLOW);
213 osr->flags = htons(0);
214 ofsr = (struct ofp_flow_stats_request *) osr->body;
215 ofsr->match.wildcards = htonl(OFPFW_ALL);
216 ofsr->table_id = 0xff;
217 ofsr->out_port = htons(OFPP_NONE);
218
219 error = rconn_send(rconn, b, NULL);
220 if (error) {
221 VLOG_WARN_RL(&rl, "%016llx: sending flow stats request "
222 "failed: %s", sw->datapath_id, strerror(error));
223 ofpbuf_delete(b);
224 schedule_query(sw, 1000);
225 }
226 }
227 }
228 }
229
230 static void
231 wait_timeout(long long int started)
232 {
233 poll_timer_wait_until(started + 10000);
234 }
235
236 void
237 lswitch_wait(struct lswitch *sw)
238 {
239 if (sw->ml) {
240 mac_learning_wait(sw->ml);
241 }
242
243 if (sw->last_reply != LLONG_MIN) {
244 wait_timeout(sw->last_reply);
245 } else if (sw->last_query != LLONG_MIN) {
246 wait_timeout(sw->last_query);
247 }
248 }
249
250 /* Processes 'msg', which should be an OpenFlow received on 'rconn', according
251 * to the learning switch state in 'sw'. The most likely result of processing
252 * is that flow-setup and packet-out OpenFlow messages will be sent out on
253 * 'rconn'. */
254 void
255 lswitch_process_packet(struct lswitch *sw, struct rconn *rconn,
256 const struct ofpbuf *msg)
257 {
258 struct processor {
259 uint8_t type;
260 size_t min_size;
261 packet_handler_func *handler;
262 };
263 static const struct processor processors[] = {
264 {
265 OFPT_ECHO_REQUEST,
266 sizeof(struct ofp_header),
267 process_echo_request
268 },
269 {
270 OFPT_FEATURES_REPLY,
271 sizeof(struct ofp_switch_features),
272 process_switch_features
273 },
274 {
275 OFPT_PACKET_IN,
276 offsetof(struct ofp_packet_in, data),
277 process_packet_in
278 },
279 {
280 OFPT_PORT_STATUS,
281 sizeof(struct ofp_port_status),
282 process_port_status
283 },
284 {
285 OFPT_STATS_REPLY,
286 offsetof(struct ofp_stats_reply, body),
287 process_stats_reply
288 },
289 {
290 OFPT_FLOW_REMOVED,
291 sizeof(struct ofp_flow_removed),
292 NULL
293 },
294 };
295 const size_t n_processors = ARRAY_SIZE(processors);
296 const struct processor *p;
297 struct ofp_header *oh;
298
299 oh = msg->data;
300 if (sw->datapath_id == 0
301 && oh->type != OFPT_ECHO_REQUEST
302 && oh->type != OFPT_FEATURES_REPLY) {
303 send_features_request(sw, rconn);
304 return;
305 }
306
307 for (p = processors; p < &processors[n_processors]; p++) {
308 if (oh->type == p->type) {
309 if (msg->size < p->min_size) {
310 VLOG_WARN_RL(&rl, "%016llx: %s: too short (%zu bytes) for "
311 "type %"PRIu8" (min %zu)", sw->datapath_id,
312 rconn_get_name(rconn), msg->size, oh->type,
313 p->min_size);
314 return;
315 }
316 if (p->handler) {
317 (p->handler)(sw, rconn, msg->data);
318 }
319 return;
320 }
321 }
322 if (VLOG_IS_DBG_ENABLED()) {
323 char *p = ofp_to_string(msg->data, msg->size, 2);
324 VLOG_DBG_RL(&rl, "%016llx: OpenFlow packet ignored: %s",
325 sw->datapath_id, p);
326 free(p);
327 }
328 }
329 \f
330 static void
331 send_features_request(struct lswitch *sw, struct rconn *rconn)
332 {
333 time_t now = time_now();
334 if (now >= sw->last_features_request + 1) {
335 struct ofpbuf *b;
336 struct ofp_switch_config *osc;
337
338 /* Send OFPT_FEATURES_REQUEST. */
339 make_openflow(sizeof(struct ofp_header), OFPT_FEATURES_REQUEST, &b);
340 queue_tx(sw, rconn, b);
341
342 /* Send OFPT_SET_CONFIG. */
343 osc = make_openflow(sizeof *osc, OFPT_SET_CONFIG, &b);
344 osc->miss_send_len = htons(OFP_DEFAULT_MISS_SEND_LEN);
345 queue_tx(sw, rconn, b);
346
347 sw->last_features_request = now;
348 }
349 }
350
351 static void
352 queue_tx(struct lswitch *sw, struct rconn *rconn, struct ofpbuf *b)
353 {
354 int retval = rconn_send_with_limit(rconn, b, sw->queued, 10);
355 if (retval && retval != ENOTCONN) {
356 if (retval == EAGAIN) {
357 VLOG_INFO_RL(&rl, "%016llx: %s: tx queue overflow",
358 sw->datapath_id, rconn_get_name(rconn));
359 } else {
360 VLOG_WARN_RL(&rl, "%016llx: %s: send: %s",
361 sw->datapath_id, rconn_get_name(rconn),
362 strerror(retval));
363 }
364 }
365 }
366
367 static void
368 schedule_query(struct lswitch *sw, long long int delay)
369 {
370 long long int now = time_msec();
371 if (sw->next_query == LLONG_MIN || sw->next_query > now + delay) {
372 sw->next_query = now + delay;
373 }
374 }
375
376 static void
377 process_switch_features(struct lswitch *sw, struct rconn *rconn, void *osf_)
378 {
379 struct ofp_switch_features *osf = osf_;
380 size_t n_ports = ((ntohs(osf->header.length)
381 - offsetof(struct ofp_switch_features, ports))
382 / sizeof *osf->ports);
383 size_t i;
384
385 sw->datapath_id = ntohll(osf->datapath_id);
386 sw->capabilities = ntohl(osf->capabilities);
387 for (i = 0; i < n_ports; i++) {
388 process_phy_port(sw, rconn, &osf->ports[i]);
389 }
390 if (sw->capabilities & OFPC_STP) {
391 schedule_query(sw, 1000);
392 }
393 }
394
395 static void
396 process_packet_in(struct lswitch *sw, struct rconn *rconn, void *opi_)
397 {
398 struct ofp_packet_in *opi = opi_;
399 uint16_t in_port = ntohs(opi->in_port);
400 uint16_t out_port = OFPP_FLOOD;
401
402 size_t pkt_ofs, pkt_len;
403 struct ofpbuf pkt;
404 flow_t flow;
405
406 /* Extract flow data from 'opi' into 'flow'. */
407 pkt_ofs = offsetof(struct ofp_packet_in, data);
408 pkt_len = ntohs(opi->header.length) - pkt_ofs;
409 pkt.data = opi->data;
410 pkt.size = pkt_len;
411 flow_extract(&pkt, 0, in_port, &flow);
412
413 if (may_learn(sw, in_port) && sw->ml) {
414 if (mac_learning_learn(sw->ml, flow.dl_src, 0, in_port,
415 GRAT_ARP_LOCK_NONE)) {
416 VLOG_DBG_RL(&rl, "%016llx: learned that "ETH_ADDR_FMT" is on "
417 "port %"PRIu16, sw->datapath_id,
418 ETH_ADDR_ARGS(flow.dl_src), in_port);
419 }
420 }
421
422 /* Drop frames for reserved multicast addresses. */
423 if (eth_addr_is_reserved(flow.dl_dst)) {
424 goto drop_it;
425 }
426
427 if (!may_recv(sw, in_port, false)) {
428 /* STP prevents receiving anything on this port. */
429 goto drop_it;
430 }
431
432 if (sw->ml) {
433 int learned_port = mac_learning_lookup(sw->ml, flow.dl_dst, 0, NULL);
434 if (learned_port >= 0 && may_send(sw, learned_port)) {
435 out_port = learned_port;
436 }
437 }
438
439 if (in_port == out_port) {
440 /* Don't send out packets on their input ports. */
441 goto drop_it;
442 } else if (sw->max_idle >= 0 && (!sw->ml || out_port != OFPP_FLOOD)) {
443 struct ofpbuf *buffer;
444 struct ofp_flow_mod *ofm;
445
446 /* Check if we need to use "NORMAL" action. */
447 if (sw->action_normal && out_port != OFPP_FLOOD) {
448 out_port = OFPP_NORMAL;
449 }
450
451 /* The output port is known, or we always flood everything, so add a
452 * new flow. */
453 buffer = make_add_simple_flow(&flow, ntohl(opi->buffer_id),
454 out_port, sw->max_idle);
455 ofm = buffer->data;
456 ofm->match.wildcards = htonl(sw->wildcards);
457 queue_tx(sw, rconn, buffer);
458
459 /* If the switch didn't buffer the packet, we need to send a copy. */
460 if (ntohl(opi->buffer_id) == UINT32_MAX) {
461 queue_tx(sw, rconn,
462 make_unbuffered_packet_out(&pkt, in_port, out_port));
463 }
464 } else {
465 struct ofpbuf *b;
466
467 /* Check if we need to use "NORMAL" action. */
468 if (sw->action_normal && out_port != OFPP_FLOOD) {
469 out_port = OFPP_NORMAL;
470 }
471
472 /* We don't know that MAC, or we don't set up flows. Send along the
473 * packet without setting up a flow. */
474 if (ntohl(opi->buffer_id) == UINT32_MAX) {
475 b = make_unbuffered_packet_out(&pkt, in_port, out_port);
476 } else {
477 b = make_buffered_packet_out(ntohl(opi->buffer_id),
478 in_port, out_port);
479 }
480 queue_tx(sw, rconn, b);
481 }
482 return;
483
484 drop_it:
485 if (sw->max_idle >= 0) {
486 /* Set up a flow to drop packets. */
487 queue_tx(sw, rconn, make_add_flow(&flow, ntohl(opi->buffer_id),
488 sw->max_idle, 0));
489 } else {
490 /* Just drop the packet, since we don't set up flows at all.
491 * XXX we should send a packet_out with no actions if buffer_id !=
492 * UINT32_MAX, to avoid clogging the kernel buffers. */
493 }
494 return;
495 }
496
497 static void
498 process_echo_request(struct lswitch *sw, struct rconn *rconn, void *rq_)
499 {
500 struct ofp_header *rq = rq_;
501 queue_tx(sw, rconn, make_echo_reply(rq));
502 }
503
504 static void
505 process_port_status(struct lswitch *sw, struct rconn *rconn, void *ops_)
506 {
507 struct ofp_port_status *ops = ops_;
508 process_phy_port(sw, rconn, &ops->desc);
509 }
510
511 static void
512 process_phy_port(struct lswitch *sw, struct rconn *rconn OVS_UNUSED,
513 void *opp_)
514 {
515 const struct ofp_phy_port *opp = opp_;
516 uint16_t port_no = ntohs(opp->port_no);
517 if (sw->capabilities & OFPC_STP && port_no < STP_MAX_PORTS) {
518 uint32_t config = ntohl(opp->config);
519 uint32_t state = ntohl(opp->state);
520 unsigned int *port_state = &sw->port_states[port_no];
521 unsigned int new_port_state;
522
523 if (!(config & (OFPPC_NO_STP | OFPPC_PORT_DOWN))
524 && !(state & OFPPS_LINK_DOWN))
525 {
526 switch (state & OFPPS_STP_MASK) {
527 case OFPPS_STP_LISTEN:
528 new_port_state = P_LISTENING;
529 break;
530 case OFPPS_STP_LEARN:
531 new_port_state = P_LEARNING;
532 break;
533 case OFPPS_STP_FORWARD:
534 new_port_state = P_FORWARDING;
535 break;
536 case OFPPS_STP_BLOCK:
537 new_port_state = P_BLOCKING;
538 break;
539 default:
540 new_port_state = P_DISABLED;
541 break;
542 }
543 } else {
544 new_port_state = P_FORWARDING;
545 }
546 if (*port_state != new_port_state) {
547 *port_state = new_port_state;
548 schedule_query(sw, 1000);
549 }
550 }
551 }
552
553 static unsigned int
554 get_port_state(const struct lswitch *sw, uint16_t port_no)
555 {
556 return (port_no >= STP_MAX_PORTS || !(sw->capabilities & OFPC_STP)
557 ? P_FORWARDING
558 : sw->port_states[port_no]);
559 }
560
561 static bool
562 may_learn(const struct lswitch *sw, uint16_t port_no)
563 {
564 return get_port_state(sw, port_no) & (P_LEARNING | P_FORWARDING);
565 }
566
567 static bool
568 may_recv(const struct lswitch *sw, uint16_t port_no, bool any_actions)
569 {
570 unsigned int state = get_port_state(sw, port_no);
571 return !(any_actions
572 ? state & (P_DISABLED | P_LISTENING | P_BLOCKING)
573 : state & (P_DISABLED | P_LISTENING | P_BLOCKING | P_LEARNING));
574 }
575
576 static bool
577 may_send(const struct lswitch *sw, uint16_t port_no)
578 {
579 return get_port_state(sw, port_no) & P_FORWARDING;
580 }
581
582 static void
583 process_flow_stats(struct lswitch *sw, struct rconn *rconn,
584 const struct ofp_flow_stats *ofs)
585 {
586 const char *end = (char *) ofs + ntohs(ofs->length);
587 bool delete = false;
588
589 /* Decide to delete the flow if it matches on an STP-disabled physical
590 * port. But don't delete it if the flow just drops all received packets,
591 * because that's a perfectly reasonable thing to do for disabled physical
592 * ports. */
593 if (!(ofs->match.wildcards & htonl(OFPFW_IN_PORT))) {
594 if (!may_recv(sw, ntohs(ofs->match.in_port),
595 end > (char *) ofs->actions)) {
596 delete = true;
597 sw->n_no_recv++;
598 }
599 }
600
601 /* Decide to delete the flow if it forwards to an STP-disabled physical
602 * port. */
603 if (!delete) {
604 const struct ofp_action_header *a;
605 size_t len;
606
607 for (a = ofs->actions; (char *) a < end; a += len / 8) {
608 len = ntohs(a->len);
609 if (len > end - (char *) a) {
610 VLOG_DBG_RL(&rl, "%016llx: action exceeds available space "
611 "(%zu > %td)",
612 sw->datapath_id, len, end - (char *) a);
613 break;
614 } else if (len % 8) {
615 VLOG_DBG_RL(&rl, "%016llx: action length (%zu) not multiple "
616 "of 8 bytes", sw->datapath_id, len);
617 break;
618 }
619
620 if (a->type == htons(OFPAT_OUTPUT)) {
621 struct ofp_action_output *oao = (struct ofp_action_output *) a;
622 if (!may_send(sw, ntohs(oao->port))) {
623 delete = true;
624 sw->n_no_send++;
625 break;
626 }
627 }
628 }
629 }
630
631 /* Delete the flow. */
632 if (delete) {
633 struct ofp_flow_mod *ofm;
634 struct ofpbuf *b;
635
636 ofm = make_openflow(offsetof(struct ofp_flow_mod, actions),
637 OFPT_FLOW_MOD, &b);
638 ofm->match = ofs->match;
639 ofm->command = OFPFC_DELETE_STRICT;
640 rconn_send(rconn, b, NULL);
641 }
642 }
643
644 static void
645 process_stats_reply(struct lswitch *sw, struct rconn *rconn, void *osr_)
646 {
647 struct ofp_stats_reply *osr = osr_;
648 struct flow_stats_iterator i;
649 const struct ofp_flow_stats *fs;
650
651 if (sw->last_query == LLONG_MIN
652 || osr->type != htons(OFPST_FLOW)
653 || osr->header.xid != sw->query_xid) {
654 return;
655 }
656 for (fs = flow_stats_first(&i, osr); fs; fs = flow_stats_next(&i)) {
657 sw->n_flows++;
658 process_flow_stats(sw, rconn, fs);
659 }
660 if (!(osr->flags & htons(OFPSF_REPLY_MORE))) {
661 VLOG_DBG("%016llx: Deleted %d of %d received flows to "
662 "implement STP, %d because of no-recv, %d because of "
663 "no-send", sw->datapath_id,
664 sw->n_no_recv + sw->n_no_send, sw->n_flows,
665 sw->n_no_recv, sw->n_no_send);
666 sw->last_query = LLONG_MIN;
667 sw->last_reply = LLONG_MIN;
668 } else {
669 sw->last_reply = time_msec();
670 }
671 }
672