2 * Copyright (c) 2008, 2009 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "learning-switch.h"
22 #include <netinet/in.h>
27 #include "mac-learning.h"
29 #include "ofp-print.h"
30 #include "openflow/openflow.h"
31 #include "poll-loop.h"
39 #define THIS_MODULE VLM_learning_switch
46 P_FORWARDING
= 1 << 3,
51 /* If nonnegative, the switch sets up flows that expire after the given
52 * number of seconds (or never expire, if the value is OFP_FLOW_PERMANENT).
53 * Otherwise, the switch processes every packet. */
56 unsigned long long int datapath_id
;
57 uint32_t capabilities
;
58 time_t last_features_request
;
59 struct mac_learning
*ml
; /* NULL to act as hub instead of switch. */
61 /* Number of outgoing queued packets on the rconn. */
62 struct rconn_packet_counter
*queued
;
64 /* Spanning tree protocol implementation.
66 * We implement STP states by, whenever a port's STP state changes,
67 * querying all the flows on the switch and then deleting any of them that
68 * are inappropriate for a port's STP state. */
69 long long int next_query
; /* Next time at which to query all flows. */
70 long long int last_query
; /* Last time we sent a query. */
71 long long int last_reply
; /* Last time we received a query reply. */
72 unsigned int port_states
[STP_MAX_PORTS
];
73 uint32_t query_xid
; /* XID used for query. */
74 int n_flows
, n_no_recv
, n_no_send
;
77 /* The log messages here could actually be useful in debugging, so keep the
78 * rate limit relatively high. */
79 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(30, 300);
81 static void queue_tx(struct lswitch
*, struct rconn
*, struct ofpbuf
*);
82 static void send_features_request(struct lswitch
*, struct rconn
*);
83 static void schedule_query(struct lswitch
*, long long int delay
);
84 static bool may_learn(const struct lswitch
*, uint16_t port_no
);
85 static bool may_recv(const struct lswitch
*, uint16_t port_no
,
87 static bool may_send(const struct lswitch
*, uint16_t port_no
);
89 typedef void packet_handler_func(struct lswitch
*, struct rconn
*, void *);
90 static packet_handler_func process_switch_features
;
91 static packet_handler_func process_packet_in
;
92 static packet_handler_func process_echo_request
;
93 static packet_handler_func process_port_status
;
94 static packet_handler_func process_phy_port
;
95 static packet_handler_func process_stats_reply
;
97 /* Creates and returns a new learning switch.
99 * If 'learn_macs' is true, the new switch will learn the ports on which MAC
100 * addresses appear. Otherwise, the new switch will flood all packets.
102 * If 'max_idle' is nonnegative, the new switch will set up flows that expire
103 * after the given number of seconds (or never expire, if 'max_idle' is
104 * OFP_FLOW_PERMANENT). Otherwise, the new switch will process every packet.
106 * 'rconn' is used to send out an OpenFlow features request. */
108 lswitch_create(struct rconn
*rconn
, bool learn_macs
, int max_idle
)
113 sw
= xcalloc(1, sizeof *sw
);
114 sw
->max_idle
= max_idle
;
116 sw
->last_features_request
= time_now() - 1;
117 sw
->ml
= learn_macs
? mac_learning_create() : NULL
;
118 sw
->queued
= rconn_packet_counter_create();
119 sw
->next_query
= LLONG_MIN
;
120 sw
->last_query
= LLONG_MIN
;
121 sw
->last_reply
= LLONG_MIN
;
122 for (i
= 0; i
< STP_MAX_PORTS
; i
++) {
123 sw
->port_states
[i
] = P_DISABLED
;
125 send_features_request(sw
, rconn
);
131 lswitch_destroy(struct lswitch
*sw
)
134 mac_learning_destroy(sw
->ml
);
135 rconn_packet_counter_destroy(sw
->queued
);
140 /* Takes care of necessary 'sw' activity, except for receiving packets (which
141 * the caller must do). */
143 lswitch_run(struct lswitch
*sw
, struct rconn
*rconn
)
145 long long int now
= time_msec();
148 mac_learning_run(sw
->ml
, NULL
);
151 /* If we're waiting for more replies, keeping waiting for up to 10 s. */
152 if (sw
->last_reply
!= LLONG_MIN
) {
153 if (now
- sw
->last_reply
> 10000) {
154 VLOG_ERR_RL(&rl
, "%012llx: No more flow stat replies last 10 s",
156 sw
->last_reply
= LLONG_MIN
;
157 sw
->last_query
= LLONG_MIN
;
158 schedule_query(sw
, 0);
164 /* If we're waiting for any reply at all, keep waiting for up to 10 s. */
165 if (sw
->last_query
!= LLONG_MIN
) {
166 if (now
- sw
->last_query
> 10000) {
167 VLOG_ERR_RL(&rl
, "%012llx: No flow stat replies in last 10 s",
169 sw
->last_query
= LLONG_MIN
;
170 schedule_query(sw
, 0);
176 /* If it's time to send another query, do so. */
177 if (sw
->next_query
!= LLONG_MIN
&& now
>= sw
->next_query
) {
178 sw
->next_query
= LLONG_MIN
;
179 if (!rconn_is_connected(rconn
)) {
180 schedule_query(sw
, 1000);
182 struct ofp_stats_request
*osr
;
183 struct ofp_flow_stats_request
*ofsr
;
187 VLOG_DBG("%012llx: Sending flow stats request to implement STP",
190 sw
->last_query
= now
;
191 sw
->query_xid
= random_uint32();
195 osr
= make_openflow_xid(sizeof *osr
+ sizeof *ofsr
,
196 OFPT_STATS_REQUEST
, sw
->query_xid
, &b
);
197 osr
->type
= htons(OFPST_FLOW
);
198 osr
->flags
= htons(0);
199 ofsr
= (struct ofp_flow_stats_request
*) osr
->body
;
200 ofsr
->match
.wildcards
= htonl(OFPFW_ALL
);
201 ofsr
->table_id
= 0xff;
202 ofsr
->out_port
= htons(OFPP_NONE
);
204 error
= rconn_send(rconn
, b
, NULL
);
206 VLOG_WARN_RL(&rl
, "%012llx: sending flow stats request "
207 "failed: %s", sw
->datapath_id
, strerror(error
));
209 schedule_query(sw
, 1000);
216 wait_timeout(long long int started
)
218 long long int now
= time_msec();
219 long long int timeout
= 10000 - (now
- started
);
221 poll_immediate_wake();
223 poll_timer_wait(timeout
);
228 lswitch_wait(struct lswitch
*sw
)
231 mac_learning_wait(sw
->ml
);
234 if (sw
->last_reply
!= LLONG_MIN
) {
235 wait_timeout(sw
->last_reply
);
236 } else if (sw
->last_query
!= LLONG_MIN
) {
237 wait_timeout(sw
->last_query
);
241 /* Processes 'msg', which should be an OpenFlow received on 'rconn', according
242 * to the learning switch state in 'sw'. The most likely result of processing
243 * is that flow-setup and packet-out OpenFlow messages will be sent out on
246 lswitch_process_packet(struct lswitch
*sw
, struct rconn
*rconn
,
247 const struct ofpbuf
*msg
)
252 packet_handler_func
*handler
;
254 static const struct processor processors
[] = {
257 sizeof(struct ofp_header
),
262 sizeof(struct ofp_switch_features
),
263 process_switch_features
267 offsetof(struct ofp_packet_in
, data
),
272 sizeof(struct ofp_port_status
),
277 offsetof(struct ofp_stats_reply
, body
),
282 sizeof(struct ofp_flow_expired
),
286 const size_t n_processors
= ARRAY_SIZE(processors
);
287 const struct processor
*p
;
288 struct ofp_header
*oh
;
291 if (sw
->datapath_id
== 0
292 && oh
->type
!= OFPT_ECHO_REQUEST
293 && oh
->type
!= OFPT_FEATURES_REPLY
) {
294 send_features_request(sw
, rconn
);
298 for (p
= processors
; p
< &processors
[n_processors
]; p
++) {
299 if (oh
->type
== p
->type
) {
300 if (msg
->size
< p
->min_size
) {
301 VLOG_WARN_RL(&rl
, "%012llx: %s: too short (%zu bytes) for "
302 "type %"PRIu8
" (min %zu)", sw
->datapath_id
,
303 rconn_get_name(rconn
), msg
->size
, oh
->type
,
308 (p
->handler
)(sw
, rconn
, msg
->data
);
313 if (VLOG_IS_DBG_ENABLED()) {
314 char *p
= ofp_to_string(msg
->data
, msg
->size
, 2);
315 VLOG_DBG_RL(&rl
, "%012llx: OpenFlow packet ignored: %s",
322 send_features_request(struct lswitch
*sw
, struct rconn
*rconn
)
324 time_t now
= time_now();
325 if (now
>= sw
->last_features_request
+ 1) {
327 struct ofp_switch_config
*osc
;
329 /* Send OFPT_FEATURES_REQUEST. */
330 make_openflow(sizeof(struct ofp_header
), OFPT_FEATURES_REQUEST
, &b
);
331 queue_tx(sw
, rconn
, b
);
333 /* Send OFPT_SET_CONFIG. */
334 osc
= make_openflow(sizeof *osc
, OFPT_SET_CONFIG
, &b
);
335 osc
->flags
= htons(OFPC_SEND_FLOW_EXP
);
336 osc
->miss_send_len
= htons(OFP_DEFAULT_MISS_SEND_LEN
);
337 queue_tx(sw
, rconn
, b
);
339 sw
->last_features_request
= now
;
344 queue_tx(struct lswitch
*sw
, struct rconn
*rconn
, struct ofpbuf
*b
)
346 int retval
= rconn_send_with_limit(rconn
, b
, sw
->queued
, 10);
347 if (retval
&& retval
!= ENOTCONN
) {
348 if (retval
== EAGAIN
) {
349 VLOG_INFO_RL(&rl
, "%012llx: %s: tx queue overflow",
350 sw
->datapath_id
, rconn_get_name(rconn
));
352 VLOG_WARN_RL(&rl
, "%012llx: %s: send: %s",
353 sw
->datapath_id
, rconn_get_name(rconn
),
360 schedule_query(struct lswitch
*sw
, long long int delay
)
362 long long int now
= time_msec();
363 if (sw
->next_query
== LLONG_MIN
|| sw
->next_query
> now
+ delay
) {
364 sw
->next_query
= now
+ delay
;
369 process_switch_features(struct lswitch
*sw
, struct rconn
*rconn
, void *osf_
)
371 struct ofp_switch_features
*osf
= osf_
;
372 size_t n_ports
= ((ntohs(osf
->header
.length
)
373 - offsetof(struct ofp_switch_features
, ports
))
374 / sizeof *osf
->ports
);
377 sw
->datapath_id
= ntohll(osf
->datapath_id
);
378 sw
->capabilities
= ntohl(osf
->capabilities
);
379 for (i
= 0; i
< n_ports
; i
++) {
380 process_phy_port(sw
, rconn
, &osf
->ports
[i
]);
382 if (sw
->capabilities
& OFPC_STP
) {
383 schedule_query(sw
, 1000);
388 process_packet_in(struct lswitch
*sw
, struct rconn
*rconn
, void *opi_
)
390 struct ofp_packet_in
*opi
= opi_
;
391 uint16_t in_port
= ntohs(opi
->in_port
);
392 uint16_t out_port
= OFPP_FLOOD
;
394 size_t pkt_ofs
, pkt_len
;
398 /* Extract flow data from 'opi' into 'flow'. */
399 pkt_ofs
= offsetof(struct ofp_packet_in
, data
);
400 pkt_len
= ntohs(opi
->header
.length
) - pkt_ofs
;
401 pkt
.data
= opi
->data
;
403 flow_extract(&pkt
, in_port
, &flow
);
405 if (may_learn(sw
, in_port
) && sw
->ml
) {
406 if (mac_learning_learn(sw
->ml
, flow
.dl_src
, 0, in_port
)) {
407 VLOG_DBG_RL(&rl
, "%012llx: learned that "ETH_ADDR_FMT
" is on "
408 "port %"PRIu16
, sw
->datapath_id
,
409 ETH_ADDR_ARGS(flow
.dl_src
), in_port
);
413 if (eth_addr_is_reserved(flow
.dl_src
)) {
417 if (!may_recv(sw
, in_port
, false)) {
418 /* STP prevents receiving anything on this port. */
423 int learned_port
= mac_learning_lookup(sw
->ml
, flow
.dl_dst
, 0);
424 if (learned_port
>= 0 && may_send(sw
, learned_port
)) {
425 out_port
= learned_port
;
429 if (in_port
== out_port
) {
430 /* Don't send out packets on their input ports. */
432 } else if (sw
->max_idle
>= 0 && (!sw
->ml
|| out_port
!= OFPP_FLOOD
)) {
433 /* The output port is known, or we always flood everything, so add a
435 queue_tx(sw
, rconn
, make_add_simple_flow(&flow
, ntohl(opi
->buffer_id
),
436 out_port
, sw
->max_idle
));
438 /* If the switch didn't buffer the packet, we need to send a copy. */
439 if (ntohl(opi
->buffer_id
) == UINT32_MAX
) {
441 make_unbuffered_packet_out(&pkt
, in_port
, out_port
));
444 /* We don't know that MAC, or we don't set up flows. Send along the
445 * packet without setting up a flow. */
447 if (ntohl(opi
->buffer_id
) == UINT32_MAX
) {
448 b
= make_unbuffered_packet_out(&pkt
, in_port
, out_port
);
450 b
= make_buffered_packet_out(ntohl(opi
->buffer_id
),
453 queue_tx(sw
, rconn
, b
);
458 if (sw
->max_idle
>= 0) {
459 /* Set up a flow to drop packets. */
460 queue_tx(sw
, rconn
, make_add_flow(&flow
, ntohl(opi
->buffer_id
),
463 /* Just drop the packet, since we don't set up flows at all.
464 * XXX we should send a packet_out with no actions if buffer_id !=
465 * UINT32_MAX, to avoid clogging the kernel buffers. */
471 process_echo_request(struct lswitch
*sw
, struct rconn
*rconn
, void *rq_
)
473 struct ofp_header
*rq
= rq_
;
474 queue_tx(sw
, rconn
, make_echo_reply(rq
));
478 process_port_status(struct lswitch
*sw
, struct rconn
*rconn
, void *ops_
)
480 struct ofp_port_status
*ops
= ops_
;
481 process_phy_port(sw
, rconn
, &ops
->desc
);
485 process_phy_port(struct lswitch
*sw
, struct rconn
*rconn UNUSED
, void *opp_
)
487 const struct ofp_phy_port
*opp
= opp_
;
488 uint16_t port_no
= ntohs(opp
->port_no
);
489 if (sw
->capabilities
& OFPC_STP
&& port_no
< STP_MAX_PORTS
) {
490 uint32_t config
= ntohl(opp
->config
);
491 uint32_t state
= ntohl(opp
->state
);
492 unsigned int *port_state
= &sw
->port_states
[port_no
];
493 unsigned int new_port_state
;
495 if (!(config
& (OFPPC_NO_STP
| OFPPC_PORT_DOWN
))
496 && !(state
& OFPPS_LINK_DOWN
))
498 switch (state
& OFPPS_STP_MASK
) {
499 case OFPPS_STP_LISTEN
:
500 new_port_state
= P_LISTENING
;
502 case OFPPS_STP_LEARN
:
503 new_port_state
= P_LEARNING
;
505 case OFPPS_STP_FORWARD
:
506 new_port_state
= P_FORWARDING
;
508 case OFPPS_STP_BLOCK
:
509 new_port_state
= P_BLOCKING
;
512 new_port_state
= P_DISABLED
;
516 new_port_state
= P_FORWARDING
;
518 if (*port_state
!= new_port_state
) {
519 *port_state
= new_port_state
;
520 schedule_query(sw
, 1000);
526 get_port_state(const struct lswitch
*sw
, uint16_t port_no
)
528 return (port_no
>= STP_MAX_PORTS
|| !(sw
->capabilities
& OFPC_STP
)
530 : sw
->port_states
[port_no
]);
534 may_learn(const struct lswitch
*sw
, uint16_t port_no
)
536 return get_port_state(sw
, port_no
) & (P_LEARNING
| P_FORWARDING
);
540 may_recv(const struct lswitch
*sw
, uint16_t port_no
, bool any_actions
)
542 unsigned int state
= get_port_state(sw
, port_no
);
544 ? state
& (P_DISABLED
| P_LISTENING
| P_BLOCKING
)
545 : state
& (P_DISABLED
| P_LISTENING
| P_BLOCKING
| P_LEARNING
));
549 may_send(const struct lswitch
*sw
, uint16_t port_no
)
551 return get_port_state(sw
, port_no
) & P_FORWARDING
;
555 process_flow_stats(struct lswitch
*sw
, struct rconn
*rconn
,
556 const struct ofp_flow_stats
*ofs
)
558 const char *end
= (char *) ofs
+ ntohs(ofs
->length
);
561 /* Decide to delete the flow if it matches on an STP-disabled physical
562 * port. But don't delete it if the flow just drops all received packets,
563 * because that's a perfectly reasonable thing to do for disabled physical
565 if (!(ofs
->match
.wildcards
& htonl(OFPFW_IN_PORT
))) {
566 if (!may_recv(sw
, ntohs(ofs
->match
.in_port
),
567 end
> (char *) ofs
->actions
)) {
573 /* Decide to delete the flow if it forwards to an STP-disabled physical
576 const struct ofp_action_header
*a
;
579 for (a
= ofs
->actions
; (char *) a
< end
; a
+= len
/ 8) {
581 if (len
> end
- (char *) a
) {
582 VLOG_DBG_RL(&rl
, "%012llx: action exceeds available space "
584 sw
->datapath_id
, len
, end
- (char *) a
);
586 } else if (len
% 8) {
587 VLOG_DBG_RL(&rl
, "%012llx: action length (%zu) not multiple "
588 "of 8 bytes", sw
->datapath_id
, len
);
592 if (a
->type
== htons(OFPAT_OUTPUT
)) {
593 struct ofp_action_output
*oao
= (struct ofp_action_output
*) a
;
594 if (!may_send(sw
, ntohs(oao
->port
))) {
603 /* Delete the flow. */
605 struct ofp_flow_mod
*ofm
;
608 ofm
= make_openflow(offsetof(struct ofp_flow_mod
, actions
),
610 ofm
->match
= ofs
->match
;
611 ofm
->command
= OFPFC_DELETE_STRICT
;
612 rconn_send(rconn
, b
, NULL
);
617 process_stats_reply(struct lswitch
*sw
, struct rconn
*rconn
, void *osr_
)
619 struct ofp_stats_reply
*osr
= osr_
;
620 struct flow_stats_iterator i
;
621 const struct ofp_flow_stats
*fs
;
623 if (sw
->last_query
== LLONG_MIN
624 || osr
->type
!= htons(OFPST_FLOW
)
625 || osr
->header
.xid
!= sw
->query_xid
) {
628 for (fs
= flow_stats_first(&i
, osr
); fs
; fs
= flow_stats_next(&i
)) {
630 process_flow_stats(sw
, rconn
, fs
);
632 if (!(osr
->flags
& htons(OFPSF_REPLY_MORE
))) {
633 VLOG_DBG("%012llx: Deleted %d of %d received flows to "
634 "implement STP, %d because of no-recv, %d because of "
635 "no-send", sw
->datapath_id
,
636 sw
->n_no_recv
+ sw
->n_no_send
, sw
->n_flows
,
637 sw
->n_no_recv
, sw
->n_no_send
);
638 sw
->last_query
= LLONG_MIN
;
639 sw
->last_reply
= LLONG_MIN
;
641 sw
->last_reply
= time_msec();