]> git.proxmox.com Git - ovs.git/blob - lib/learning-switch.c
Import from old repository commit 61ef2b42a9c4ba8e1600f15bb0236765edc2ad45.
[ovs.git] / lib / learning-switch.c
1 /*
2 * Copyright (c) 2008, 2009 Nicira Networks.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <config.h>
18 #include "learning-switch.h"
19
20 #include <errno.h>
21 #include <inttypes.h>
22 #include <netinet/in.h>
23 #include <stdlib.h>
24 #include <time.h>
25
26 #include "flow.h"
27 #include "mac-learning.h"
28 #include "ofpbuf.h"
29 #include "ofp-print.h"
30 #include "openflow/openflow.h"
31 #include "poll-loop.h"
32 #include "queue.h"
33 #include "rconn.h"
34 #include "stp.h"
35 #include "timeval.h"
36 #include "vconn.h"
37 #include "xtoxll.h"
38
39 #define THIS_MODULE VLM_learning_switch
40 #include "vlog.h"
41
42 enum port_state {
43 P_DISABLED = 1 << 0,
44 P_LISTENING = 1 << 1,
45 P_LEARNING = 1 << 2,
46 P_FORWARDING = 1 << 3,
47 P_BLOCKING = 1 << 4
48 };
49
50 struct lswitch {
51 /* If nonnegative, the switch sets up flows that expire after the given
52 * number of seconds (or never expire, if the value is OFP_FLOW_PERMANENT).
53 * Otherwise, the switch processes every packet. */
54 int max_idle;
55
56 unsigned long long int datapath_id;
57 uint32_t capabilities;
58 time_t last_features_request;
59 struct mac_learning *ml; /* NULL to act as hub instead of switch. */
60
61 /* Number of outgoing queued packets on the rconn. */
62 struct rconn_packet_counter *queued;
63
64 /* Spanning tree protocol implementation.
65 *
66 * We implement STP states by, whenever a port's STP state changes,
67 * querying all the flows on the switch and then deleting any of them that
68 * are inappropriate for a port's STP state. */
69 long long int next_query; /* Next time at which to query all flows. */
70 long long int last_query; /* Last time we sent a query. */
71 long long int last_reply; /* Last time we received a query reply. */
72 unsigned int port_states[STP_MAX_PORTS];
73 uint32_t query_xid; /* XID used for query. */
74 int n_flows, n_no_recv, n_no_send;
75 };
76
77 /* The log messages here could actually be useful in debugging, so keep the
78 * rate limit relatively high. */
79 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
80
81 static void queue_tx(struct lswitch *, struct rconn *, struct ofpbuf *);
82 static void send_features_request(struct lswitch *, struct rconn *);
83 static void schedule_query(struct lswitch *, long long int delay);
84 static bool may_learn(const struct lswitch *, uint16_t port_no);
85 static bool may_recv(const struct lswitch *, uint16_t port_no,
86 bool any_actions);
87 static bool may_send(const struct lswitch *, uint16_t port_no);
88
89 typedef void packet_handler_func(struct lswitch *, struct rconn *, void *);
90 static packet_handler_func process_switch_features;
91 static packet_handler_func process_packet_in;
92 static packet_handler_func process_echo_request;
93 static packet_handler_func process_port_status;
94 static packet_handler_func process_phy_port;
95 static packet_handler_func process_stats_reply;
96
97 /* Creates and returns a new learning switch.
98 *
99 * If 'learn_macs' is true, the new switch will learn the ports on which MAC
100 * addresses appear. Otherwise, the new switch will flood all packets.
101 *
102 * If 'max_idle' is nonnegative, the new switch will set up flows that expire
103 * after the given number of seconds (or never expire, if 'max_idle' is
104 * OFP_FLOW_PERMANENT). Otherwise, the new switch will process every packet.
105 *
106 * 'rconn' is used to send out an OpenFlow features request. */
107 struct lswitch *
108 lswitch_create(struct rconn *rconn, bool learn_macs, int max_idle)
109 {
110 struct lswitch *sw;
111 size_t i;
112
113 sw = xcalloc(1, sizeof *sw);
114 sw->max_idle = max_idle;
115 sw->datapath_id = 0;
116 sw->last_features_request = time_now() - 1;
117 sw->ml = learn_macs ? mac_learning_create() : NULL;
118 sw->queued = rconn_packet_counter_create();
119 sw->next_query = LLONG_MIN;
120 sw->last_query = LLONG_MIN;
121 sw->last_reply = LLONG_MIN;
122 for (i = 0; i < STP_MAX_PORTS; i++) {
123 sw->port_states[i] = P_DISABLED;
124 }
125 send_features_request(sw, rconn);
126 return sw;
127 }
128
129 /* Destroys 'sw'. */
130 void
131 lswitch_destroy(struct lswitch *sw)
132 {
133 if (sw) {
134 mac_learning_destroy(sw->ml);
135 rconn_packet_counter_destroy(sw->queued);
136 free(sw);
137 }
138 }
139
140 /* Takes care of necessary 'sw' activity, except for receiving packets (which
141 * the caller must do). */
142 void
143 lswitch_run(struct lswitch *sw, struct rconn *rconn)
144 {
145 long long int now = time_msec();
146
147 if (sw->ml) {
148 mac_learning_run(sw->ml, NULL);
149 }
150
151 /* If we're waiting for more replies, keeping waiting for up to 10 s. */
152 if (sw->last_reply != LLONG_MIN) {
153 if (now - sw->last_reply > 10000) {
154 VLOG_ERR_RL(&rl, "%012llx: No more flow stat replies last 10 s",
155 sw->datapath_id);
156 sw->last_reply = LLONG_MIN;
157 sw->last_query = LLONG_MIN;
158 schedule_query(sw, 0);
159 } else {
160 return;
161 }
162 }
163
164 /* If we're waiting for any reply at all, keep waiting for up to 10 s. */
165 if (sw->last_query != LLONG_MIN) {
166 if (now - sw->last_query > 10000) {
167 VLOG_ERR_RL(&rl, "%012llx: No flow stat replies in last 10 s",
168 sw->datapath_id);
169 sw->last_query = LLONG_MIN;
170 schedule_query(sw, 0);
171 } else {
172 return;
173 }
174 }
175
176 /* If it's time to send another query, do so. */
177 if (sw->next_query != LLONG_MIN && now >= sw->next_query) {
178 sw->next_query = LLONG_MIN;
179 if (!rconn_is_connected(rconn)) {
180 schedule_query(sw, 1000);
181 } else {
182 struct ofp_stats_request *osr;
183 struct ofp_flow_stats_request *ofsr;
184 struct ofpbuf *b;
185 int error;
186
187 VLOG_DBG("%012llx: Sending flow stats request to implement STP",
188 sw->datapath_id);
189
190 sw->last_query = now;
191 sw->query_xid = random_uint32();
192 sw->n_flows = 0;
193 sw->n_no_recv = 0;
194 sw->n_no_send = 0;
195 osr = make_openflow_xid(sizeof *osr + sizeof *ofsr,
196 OFPT_STATS_REQUEST, sw->query_xid, &b);
197 osr->type = htons(OFPST_FLOW);
198 osr->flags = htons(0);
199 ofsr = (struct ofp_flow_stats_request *) osr->body;
200 ofsr->match.wildcards = htonl(OFPFW_ALL);
201 ofsr->table_id = 0xff;
202 ofsr->out_port = htons(OFPP_NONE);
203
204 error = rconn_send(rconn, b, NULL);
205 if (error) {
206 VLOG_WARN_RL(&rl, "%012llx: sending flow stats request "
207 "failed: %s", sw->datapath_id, strerror(error));
208 ofpbuf_delete(b);
209 schedule_query(sw, 1000);
210 }
211 }
212 }
213 }
214
215 static void
216 wait_timeout(long long int started)
217 {
218 long long int now = time_msec();
219 long long int timeout = 10000 - (now - started);
220 if (timeout <= 0) {
221 poll_immediate_wake();
222 } else {
223 poll_timer_wait(timeout);
224 }
225 }
226
227 void
228 lswitch_wait(struct lswitch *sw)
229 {
230 if (sw->ml) {
231 mac_learning_wait(sw->ml);
232 }
233
234 if (sw->last_reply != LLONG_MIN) {
235 wait_timeout(sw->last_reply);
236 } else if (sw->last_query != LLONG_MIN) {
237 wait_timeout(sw->last_query);
238 }
239 }
240
241 /* Processes 'msg', which should be an OpenFlow received on 'rconn', according
242 * to the learning switch state in 'sw'. The most likely result of processing
243 * is that flow-setup and packet-out OpenFlow messages will be sent out on
244 * 'rconn'. */
245 void
246 lswitch_process_packet(struct lswitch *sw, struct rconn *rconn,
247 const struct ofpbuf *msg)
248 {
249 struct processor {
250 uint8_t type;
251 size_t min_size;
252 packet_handler_func *handler;
253 };
254 static const struct processor processors[] = {
255 {
256 OFPT_ECHO_REQUEST,
257 sizeof(struct ofp_header),
258 process_echo_request
259 },
260 {
261 OFPT_FEATURES_REPLY,
262 sizeof(struct ofp_switch_features),
263 process_switch_features
264 },
265 {
266 OFPT_PACKET_IN,
267 offsetof(struct ofp_packet_in, data),
268 process_packet_in
269 },
270 {
271 OFPT_PORT_STATUS,
272 sizeof(struct ofp_port_status),
273 process_port_status
274 },
275 {
276 OFPT_STATS_REPLY,
277 offsetof(struct ofp_stats_reply, body),
278 process_stats_reply
279 },
280 {
281 OFPT_FLOW_EXPIRED,
282 sizeof(struct ofp_flow_expired),
283 NULL
284 },
285 };
286 const size_t n_processors = ARRAY_SIZE(processors);
287 const struct processor *p;
288 struct ofp_header *oh;
289
290 oh = msg->data;
291 if (sw->datapath_id == 0
292 && oh->type != OFPT_ECHO_REQUEST
293 && oh->type != OFPT_FEATURES_REPLY) {
294 send_features_request(sw, rconn);
295 return;
296 }
297
298 for (p = processors; p < &processors[n_processors]; p++) {
299 if (oh->type == p->type) {
300 if (msg->size < p->min_size) {
301 VLOG_WARN_RL(&rl, "%012llx: %s: too short (%zu bytes) for "
302 "type %"PRIu8" (min %zu)", sw->datapath_id,
303 rconn_get_name(rconn), msg->size, oh->type,
304 p->min_size);
305 return;
306 }
307 if (p->handler) {
308 (p->handler)(sw, rconn, msg->data);
309 }
310 return;
311 }
312 }
313 if (VLOG_IS_DBG_ENABLED()) {
314 char *p = ofp_to_string(msg->data, msg->size, 2);
315 VLOG_DBG_RL(&rl, "%012llx: OpenFlow packet ignored: %s",
316 sw->datapath_id, p);
317 free(p);
318 }
319 }
320 \f
321 static void
322 send_features_request(struct lswitch *sw, struct rconn *rconn)
323 {
324 time_t now = time_now();
325 if (now >= sw->last_features_request + 1) {
326 struct ofpbuf *b;
327 struct ofp_switch_config *osc;
328
329 /* Send OFPT_FEATURES_REQUEST. */
330 make_openflow(sizeof(struct ofp_header), OFPT_FEATURES_REQUEST, &b);
331 queue_tx(sw, rconn, b);
332
333 /* Send OFPT_SET_CONFIG. */
334 osc = make_openflow(sizeof *osc, OFPT_SET_CONFIG, &b);
335 osc->flags = htons(OFPC_SEND_FLOW_EXP);
336 osc->miss_send_len = htons(OFP_DEFAULT_MISS_SEND_LEN);
337 queue_tx(sw, rconn, b);
338
339 sw->last_features_request = now;
340 }
341 }
342
343 static void
344 queue_tx(struct lswitch *sw, struct rconn *rconn, struct ofpbuf *b)
345 {
346 int retval = rconn_send_with_limit(rconn, b, sw->queued, 10);
347 if (retval && retval != ENOTCONN) {
348 if (retval == EAGAIN) {
349 VLOG_INFO_RL(&rl, "%012llx: %s: tx queue overflow",
350 sw->datapath_id, rconn_get_name(rconn));
351 } else {
352 VLOG_WARN_RL(&rl, "%012llx: %s: send: %s",
353 sw->datapath_id, rconn_get_name(rconn),
354 strerror(retval));
355 }
356 }
357 }
358
359 static void
360 schedule_query(struct lswitch *sw, long long int delay)
361 {
362 long long int now = time_msec();
363 if (sw->next_query == LLONG_MIN || sw->next_query > now + delay) {
364 sw->next_query = now + delay;
365 }
366 }
367
368 static void
369 process_switch_features(struct lswitch *sw, struct rconn *rconn, void *osf_)
370 {
371 struct ofp_switch_features *osf = osf_;
372 size_t n_ports = ((ntohs(osf->header.length)
373 - offsetof(struct ofp_switch_features, ports))
374 / sizeof *osf->ports);
375 size_t i;
376
377 sw->datapath_id = ntohll(osf->datapath_id);
378 sw->capabilities = ntohl(osf->capabilities);
379 for (i = 0; i < n_ports; i++) {
380 process_phy_port(sw, rconn, &osf->ports[i]);
381 }
382 if (sw->capabilities & OFPC_STP) {
383 schedule_query(sw, 1000);
384 }
385 }
386
387 static void
388 process_packet_in(struct lswitch *sw, struct rconn *rconn, void *opi_)
389 {
390 struct ofp_packet_in *opi = opi_;
391 uint16_t in_port = ntohs(opi->in_port);
392 uint16_t out_port = OFPP_FLOOD;
393
394 size_t pkt_ofs, pkt_len;
395 struct ofpbuf pkt;
396 flow_t flow;
397
398 /* Extract flow data from 'opi' into 'flow'. */
399 pkt_ofs = offsetof(struct ofp_packet_in, data);
400 pkt_len = ntohs(opi->header.length) - pkt_ofs;
401 pkt.data = opi->data;
402 pkt.size = pkt_len;
403 flow_extract(&pkt, in_port, &flow);
404
405 if (may_learn(sw, in_port) && sw->ml) {
406 if (mac_learning_learn(sw->ml, flow.dl_src, 0, in_port)) {
407 VLOG_DBG_RL(&rl, "%012llx: learned that "ETH_ADDR_FMT" is on "
408 "port %"PRIu16, sw->datapath_id,
409 ETH_ADDR_ARGS(flow.dl_src), in_port);
410 }
411 }
412
413 if (eth_addr_is_reserved(flow.dl_src)) {
414 goto drop_it;
415 }
416
417 if (!may_recv(sw, in_port, false)) {
418 /* STP prevents receiving anything on this port. */
419 goto drop_it;
420 }
421
422 if (sw->ml) {
423 int learned_port = mac_learning_lookup(sw->ml, flow.dl_dst, 0);
424 if (learned_port >= 0 && may_send(sw, learned_port)) {
425 out_port = learned_port;
426 }
427 }
428
429 if (in_port == out_port) {
430 /* Don't send out packets on their input ports. */
431 goto drop_it;
432 } else if (sw->max_idle >= 0 && (!sw->ml || out_port != OFPP_FLOOD)) {
433 /* The output port is known, or we always flood everything, so add a
434 * new flow. */
435 queue_tx(sw, rconn, make_add_simple_flow(&flow, ntohl(opi->buffer_id),
436 out_port, sw->max_idle));
437
438 /* If the switch didn't buffer the packet, we need to send a copy. */
439 if (ntohl(opi->buffer_id) == UINT32_MAX) {
440 queue_tx(sw, rconn,
441 make_unbuffered_packet_out(&pkt, in_port, out_port));
442 }
443 } else {
444 /* We don't know that MAC, or we don't set up flows. Send along the
445 * packet without setting up a flow. */
446 struct ofpbuf *b;
447 if (ntohl(opi->buffer_id) == UINT32_MAX) {
448 b = make_unbuffered_packet_out(&pkt, in_port, out_port);
449 } else {
450 b = make_buffered_packet_out(ntohl(opi->buffer_id),
451 in_port, out_port);
452 }
453 queue_tx(sw, rconn, b);
454 }
455 return;
456
457 drop_it:
458 if (sw->max_idle >= 0) {
459 /* Set up a flow to drop packets. */
460 queue_tx(sw, rconn, make_add_flow(&flow, ntohl(opi->buffer_id),
461 sw->max_idle, 0));
462 } else {
463 /* Just drop the packet, since we don't set up flows at all.
464 * XXX we should send a packet_out with no actions if buffer_id !=
465 * UINT32_MAX, to avoid clogging the kernel buffers. */
466 }
467 return;
468 }
469
470 static void
471 process_echo_request(struct lswitch *sw, struct rconn *rconn, void *rq_)
472 {
473 struct ofp_header *rq = rq_;
474 queue_tx(sw, rconn, make_echo_reply(rq));
475 }
476
477 static void
478 process_port_status(struct lswitch *sw, struct rconn *rconn, void *ops_)
479 {
480 struct ofp_port_status *ops = ops_;
481 process_phy_port(sw, rconn, &ops->desc);
482 }
483
484 static void
485 process_phy_port(struct lswitch *sw, struct rconn *rconn UNUSED, void *opp_)
486 {
487 const struct ofp_phy_port *opp = opp_;
488 uint16_t port_no = ntohs(opp->port_no);
489 if (sw->capabilities & OFPC_STP && port_no < STP_MAX_PORTS) {
490 uint32_t config = ntohl(opp->config);
491 uint32_t state = ntohl(opp->state);
492 unsigned int *port_state = &sw->port_states[port_no];
493 unsigned int new_port_state;
494
495 if (!(config & (OFPPC_NO_STP | OFPPC_PORT_DOWN))
496 && !(state & OFPPS_LINK_DOWN))
497 {
498 switch (state & OFPPS_STP_MASK) {
499 case OFPPS_STP_LISTEN:
500 new_port_state = P_LISTENING;
501 break;
502 case OFPPS_STP_LEARN:
503 new_port_state = P_LEARNING;
504 break;
505 case OFPPS_STP_FORWARD:
506 new_port_state = P_FORWARDING;
507 break;
508 case OFPPS_STP_BLOCK:
509 new_port_state = P_BLOCKING;
510 break;
511 default:
512 new_port_state = P_DISABLED;
513 break;
514 }
515 } else {
516 new_port_state = P_FORWARDING;
517 }
518 if (*port_state != new_port_state) {
519 *port_state = new_port_state;
520 schedule_query(sw, 1000);
521 }
522 }
523 }
524
525 static unsigned int
526 get_port_state(const struct lswitch *sw, uint16_t port_no)
527 {
528 return (port_no >= STP_MAX_PORTS || !(sw->capabilities & OFPC_STP)
529 ? P_FORWARDING
530 : sw->port_states[port_no]);
531 }
532
533 static bool
534 may_learn(const struct lswitch *sw, uint16_t port_no)
535 {
536 return get_port_state(sw, port_no) & (P_LEARNING | P_FORWARDING);
537 }
538
539 static bool
540 may_recv(const struct lswitch *sw, uint16_t port_no, bool any_actions)
541 {
542 unsigned int state = get_port_state(sw, port_no);
543 return !(any_actions
544 ? state & (P_DISABLED | P_LISTENING | P_BLOCKING)
545 : state & (P_DISABLED | P_LISTENING | P_BLOCKING | P_LEARNING));
546 }
547
548 static bool
549 may_send(const struct lswitch *sw, uint16_t port_no)
550 {
551 return get_port_state(sw, port_no) & P_FORWARDING;
552 }
553
554 static void
555 process_flow_stats(struct lswitch *sw, struct rconn *rconn,
556 const struct ofp_flow_stats *ofs)
557 {
558 const char *end = (char *) ofs + ntohs(ofs->length);
559 bool delete = false;
560
561 /* Decide to delete the flow if it matches on an STP-disabled physical
562 * port. But don't delete it if the flow just drops all received packets,
563 * because that's a perfectly reasonable thing to do for disabled physical
564 * ports. */
565 if (!(ofs->match.wildcards & htonl(OFPFW_IN_PORT))) {
566 if (!may_recv(sw, ntohs(ofs->match.in_port),
567 end > (char *) ofs->actions)) {
568 delete = true;
569 sw->n_no_recv++;
570 }
571 }
572
573 /* Decide to delete the flow if it forwards to an STP-disabled physical
574 * port. */
575 if (!delete) {
576 const struct ofp_action_header *a;
577 size_t len;
578
579 for (a = ofs->actions; (char *) a < end; a += len / 8) {
580 len = ntohs(a->len);
581 if (len > end - (char *) a) {
582 VLOG_DBG_RL(&rl, "%012llx: action exceeds available space "
583 "(%zu > %td)",
584 sw->datapath_id, len, end - (char *) a);
585 break;
586 } else if (len % 8) {
587 VLOG_DBG_RL(&rl, "%012llx: action length (%zu) not multiple "
588 "of 8 bytes", sw->datapath_id, len);
589 break;
590 }
591
592 if (a->type == htons(OFPAT_OUTPUT)) {
593 struct ofp_action_output *oao = (struct ofp_action_output *) a;
594 if (!may_send(sw, ntohs(oao->port))) {
595 delete = true;
596 sw->n_no_send++;
597 break;
598 }
599 }
600 }
601 }
602
603 /* Delete the flow. */
604 if (delete) {
605 struct ofp_flow_mod *ofm;
606 struct ofpbuf *b;
607
608 ofm = make_openflow(offsetof(struct ofp_flow_mod, actions),
609 OFPT_FLOW_MOD, &b);
610 ofm->match = ofs->match;
611 ofm->command = OFPFC_DELETE_STRICT;
612 rconn_send(rconn, b, NULL);
613 }
614 }
615
616 static void
617 process_stats_reply(struct lswitch *sw, struct rconn *rconn, void *osr_)
618 {
619 struct ofp_stats_reply *osr = osr_;
620 struct flow_stats_iterator i;
621 const struct ofp_flow_stats *fs;
622
623 if (sw->last_query == LLONG_MIN
624 || osr->type != htons(OFPST_FLOW)
625 || osr->header.xid != sw->query_xid) {
626 return;
627 }
628 for (fs = flow_stats_first(&i, osr); fs; fs = flow_stats_next(&i)) {
629 sw->n_flows++;
630 process_flow_stats(sw, rconn, fs);
631 }
632 if (!(osr->flags & htons(OFPSF_REPLY_MORE))) {
633 VLOG_DBG("%012llx: Deleted %d of %d received flows to "
634 "implement STP, %d because of no-recv, %d because of "
635 "no-send", sw->datapath_id,
636 sw->n_no_recv + sw->n_no_send, sw->n_flows,
637 sw->n_no_recv, sw->n_no_send);
638 sw->last_query = LLONG_MIN;
639 sw->last_reply = LLONG_MIN;
640 } else {
641 sw->last_reply = time_msec();
642 }
643 }
644