]> git.proxmox.com Git - ovs.git/blob - ofproto/ofproto.c
in-band: Drop in-band flows when turning off in-band control.
[ovs.git] / ofproto / ofproto.c
1 /*
2 * Copyright (c) 2009, 2010 Nicira Networks.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "ofproto.h"
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <net/if.h>
22 #include <netinet/in.h>
23 #include <stdbool.h>
24 #include <stdlib.h>
25 #include "classifier.h"
26 #include "coverage.h"
27 #include "discovery.h"
28 #include "dpif.h"
29 #include "dynamic-string.h"
30 #include "fail-open.h"
31 #include "in-band.h"
32 #include "mac-learning.h"
33 #include "netdev.h"
34 #include "netflow.h"
35 #include "odp-util.h"
36 #include "ofp-print.h"
37 #include "ofproto-sflow.h"
38 #include "ofpbuf.h"
39 #include "openflow/nicira-ext.h"
40 #include "openflow/openflow.h"
41 #include "openvswitch/datapath-protocol.h"
42 #include "packets.h"
43 #include "pinsched.h"
44 #include "pktbuf.h"
45 #include "poll-loop.h"
46 #include "port-array.h"
47 #include "rconn.h"
48 #include "shash.h"
49 #include "status.h"
50 #include "stp.h"
51 #include "stream-ssl.h"
52 #include "svec.h"
53 #include "tag.h"
54 #include "timeval.h"
55 #include "unixctl.h"
56 #include "vconn.h"
57 #include "xtoxll.h"
58
59 #define THIS_MODULE VLM_ofproto
60 #include "vlog.h"
61
62 #include "sflow_api.h"
63
64 enum {
65 TABLEID_HASH = 0,
66 TABLEID_CLASSIFIER = 1
67 };
68
69 struct ofport {
70 struct netdev *netdev;
71 struct ofp_phy_port opp; /* In host byte order. */
72 };
73
74 static void ofport_free(struct ofport *);
75 static void hton_ofp_phy_port(struct ofp_phy_port *);
76
77 static int xlate_actions(const union ofp_action *in, size_t n_in,
78 const flow_t *flow, struct ofproto *ofproto,
79 const struct ofpbuf *packet,
80 struct odp_actions *out, tag_type *tags,
81 bool *may_set_up_flow, uint16_t *nf_output_iface);
82
83 struct rule {
84 struct cls_rule cr;
85
86 uint64_t flow_cookie; /* Controller-issued identifier.
87 (Kept in network-byte order.) */
88 uint16_t idle_timeout; /* In seconds from time of last use. */
89 uint16_t hard_timeout; /* In seconds from time of creation. */
90 bool send_flow_removed; /* Send a flow removed message? */
91 long long int used; /* Last-used time (0 if never used). */
92 long long int created; /* Creation time. */
93 uint64_t packet_count; /* Number of packets received. */
94 uint64_t byte_count; /* Number of bytes received. */
95 uint64_t accounted_bytes; /* Number of bytes passed to account_cb. */
96 tag_type tags; /* Tags (set only by hooks). */
97 struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
98
99 /* If 'super' is non-NULL, this rule is a subrule, that is, it is an
100 * exact-match rule (having cr.wc.wildcards of 0) generated from the
101 * wildcard rule 'super'. In this case, 'list' is an element of the
102 * super-rule's list.
103 *
104 * If 'super' is NULL, this rule is a super-rule, and 'list' is the head of
105 * a list of subrules. A super-rule with no wildcards (where
106 * cr.wc.wildcards is 0) will never have any subrules. */
107 struct rule *super;
108 struct list list;
109
110 /* OpenFlow actions.
111 *
112 * 'n_actions' is the number of elements in the 'actions' array. A single
113 * action may take up more more than one element's worth of space.
114 *
115 * A subrule has no actions (it uses the super-rule's actions). */
116 int n_actions;
117 union ofp_action *actions;
118
119 /* Datapath actions.
120 *
121 * A super-rule with wildcard fields never has ODP actions (since the
122 * datapath only supports exact-match flows). */
123 bool installed; /* Installed in datapath? */
124 bool may_install; /* True ordinarily; false if actions must
125 * be reassessed for every packet. */
126 int n_odp_actions;
127 union odp_action *odp_actions;
128 };
129
130 static inline bool
131 rule_is_hidden(const struct rule *rule)
132 {
133 /* Subrules are merely an implementation detail, so hide them from the
134 * controller. */
135 if (rule->super != NULL) {
136 return true;
137 }
138
139 /* Rules with priority higher than UINT16_MAX are set up by ofproto itself
140 * (e.g. by in-band control) and are intentionally hidden from the
141 * controller. */
142 if (rule->cr.priority > UINT16_MAX) {
143 return true;
144 }
145
146 return false;
147 }
148
149 static struct rule *rule_create(struct ofproto *, struct rule *super,
150 const union ofp_action *, size_t n_actions,
151 uint16_t idle_timeout, uint16_t hard_timeout,
152 uint64_t flow_cookie, bool send_flow_removed);
153 static void rule_free(struct rule *);
154 static void rule_destroy(struct ofproto *, struct rule *);
155 static struct rule *rule_from_cls_rule(const struct cls_rule *);
156 static void rule_insert(struct ofproto *, struct rule *,
157 struct ofpbuf *packet, uint16_t in_port);
158 static void rule_remove(struct ofproto *, struct rule *);
159 static bool rule_make_actions(struct ofproto *, struct rule *,
160 const struct ofpbuf *packet);
161 static void rule_install(struct ofproto *, struct rule *,
162 struct rule *displaced_rule);
163 static void rule_uninstall(struct ofproto *, struct rule *);
164 static void rule_post_uninstall(struct ofproto *, struct rule *);
165 static void send_flow_removed(struct ofproto *p, struct rule *rule,
166 long long int now, uint8_t reason);
167
168 struct ofconn {
169 struct list node;
170 struct rconn *rconn;
171 struct pktbuf *pktbuf;
172 int miss_send_len;
173
174 struct rconn_packet_counter *packet_in_counter;
175
176 /* Number of OpenFlow messages queued as replies to OpenFlow requests, and
177 * the maximum number before we stop reading OpenFlow requests. */
178 #define OFCONN_REPLY_MAX 100
179 struct rconn_packet_counter *reply_counter;
180 };
181
182 static struct ofconn *ofconn_create(struct ofproto *, struct rconn *);
183 static void ofconn_destroy(struct ofconn *);
184 static void ofconn_run(struct ofconn *, struct ofproto *);
185 static void ofconn_wait(struct ofconn *);
186 static void queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn,
187 struct rconn_packet_counter *counter);
188
189 struct ofproto {
190 /* Settings. */
191 uint64_t datapath_id; /* Datapath ID. */
192 uint64_t fallback_dpid; /* Datapath ID if no better choice found. */
193 char *mfr_desc; /* Manufacturer. */
194 char *hw_desc; /* Hardware. */
195 char *sw_desc; /* Software version. */
196 char *serial_desc; /* Serial number. */
197 char *dp_desc; /* Datapath description. */
198
199 /* Datapath. */
200 struct dpif *dpif;
201 struct netdev_monitor *netdev_monitor;
202 struct port_array ports; /* Index is ODP port nr; ofport->opp.port_no is
203 * OFP port nr. */
204 struct shash port_by_name;
205 uint32_t max_ports;
206
207 /* Configuration. */
208 struct switch_status *switch_status;
209 struct status_category *ss_cat;
210 struct in_band *in_band;
211 struct discovery *discovery;
212 struct fail_open *fail_open;
213 struct pinsched *miss_sched, *action_sched;
214 struct netflow *netflow;
215 struct ofproto_sflow *sflow;
216
217 /* Flow table. */
218 struct classifier cls;
219 bool need_revalidate;
220 long long int next_expiration;
221 struct tag_set revalidate_set;
222 bool tun_id_from_cookie;
223
224 /* OpenFlow connections. */
225 struct list all_conns;
226 struct ofconn *controller;
227 struct pvconn **listeners;
228 size_t n_listeners;
229 struct pvconn **snoops;
230 size_t n_snoops;
231
232 /* Hooks for ovs-vswitchd. */
233 const struct ofhooks *ofhooks;
234 void *aux;
235
236 /* Used by default ofhooks. */
237 struct mac_learning *ml;
238 };
239
240 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
241
242 static const struct ofhooks default_ofhooks;
243
244 static uint64_t pick_datapath_id(const struct ofproto *);
245 static uint64_t pick_fallback_dpid(void);
246 static void send_packet_in_miss(struct ofpbuf *, void *ofproto);
247 static void send_packet_in_action(struct ofpbuf *, void *ofproto);
248 static void update_used(struct ofproto *);
249 static void update_stats(struct ofproto *, struct rule *,
250 const struct odp_flow_stats *);
251 static void expire_rule(struct cls_rule *, void *ofproto);
252 static void active_timeout(struct ofproto *ofproto, struct rule *rule);
253 static bool revalidate_rule(struct ofproto *p, struct rule *rule);
254 static void revalidate_cb(struct cls_rule *rule_, void *p_);
255
256 static void handle_odp_msg(struct ofproto *, struct ofpbuf *);
257
258 static void handle_openflow(struct ofconn *, struct ofproto *,
259 struct ofpbuf *);
260
261 static void refresh_port_groups(struct ofproto *);
262
263 static void update_port(struct ofproto *, const char *devname);
264 static int init_ports(struct ofproto *);
265 static void reinit_ports(struct ofproto *);
266
267 int
268 ofproto_create(const char *datapath, const char *datapath_type,
269 const struct ofhooks *ofhooks, void *aux,
270 struct ofproto **ofprotop)
271 {
272 struct odp_stats stats;
273 struct ofproto *p;
274 struct dpif *dpif;
275 int error;
276
277 *ofprotop = NULL;
278
279 /* Connect to datapath and start listening for messages. */
280 error = dpif_open(datapath, datapath_type, &dpif);
281 if (error) {
282 VLOG_ERR("failed to open datapath %s: %s", datapath, strerror(error));
283 return error;
284 }
285 error = dpif_get_dp_stats(dpif, &stats);
286 if (error) {
287 VLOG_ERR("failed to obtain stats for datapath %s: %s",
288 datapath, strerror(error));
289 dpif_close(dpif);
290 return error;
291 }
292 error = dpif_recv_set_mask(dpif, ODPL_MISS | ODPL_ACTION | ODPL_SFLOW);
293 if (error) {
294 VLOG_ERR("failed to listen on datapath %s: %s",
295 datapath, strerror(error));
296 dpif_close(dpif);
297 return error;
298 }
299 dpif_flow_flush(dpif);
300 dpif_recv_purge(dpif);
301
302 /* Initialize settings. */
303 p = xzalloc(sizeof *p);
304 p->fallback_dpid = pick_fallback_dpid();
305 p->datapath_id = p->fallback_dpid;
306 p->mfr_desc = xstrdup(DEFAULT_MFR_DESC);
307 p->hw_desc = xstrdup(DEFAULT_HW_DESC);
308 p->sw_desc = xstrdup(DEFAULT_SW_DESC);
309 p->serial_desc = xstrdup(DEFAULT_SERIAL_DESC);
310 p->dp_desc = xstrdup(DEFAULT_DP_DESC);
311
312 /* Initialize datapath. */
313 p->dpif = dpif;
314 p->netdev_monitor = netdev_monitor_create();
315 port_array_init(&p->ports);
316 shash_init(&p->port_by_name);
317 p->max_ports = stats.max_ports;
318
319 /* Initialize submodules. */
320 p->switch_status = switch_status_create(p);
321 p->in_band = NULL;
322 p->discovery = NULL;
323 p->fail_open = NULL;
324 p->miss_sched = p->action_sched = NULL;
325 p->netflow = NULL;
326 p->sflow = NULL;
327
328 /* Initialize flow table. */
329 classifier_init(&p->cls);
330 p->need_revalidate = false;
331 p->next_expiration = time_msec() + 1000;
332 tag_set_init(&p->revalidate_set);
333
334 /* Initialize OpenFlow connections. */
335 list_init(&p->all_conns);
336 p->controller = ofconn_create(p, rconn_create(5, 8));
337 p->controller->pktbuf = pktbuf_create();
338 p->controller->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN;
339 p->listeners = NULL;
340 p->n_listeners = 0;
341 p->snoops = NULL;
342 p->n_snoops = 0;
343
344 /* Initialize hooks. */
345 if (ofhooks) {
346 p->ofhooks = ofhooks;
347 p->aux = aux;
348 p->ml = NULL;
349 } else {
350 p->ofhooks = &default_ofhooks;
351 p->aux = p;
352 p->ml = mac_learning_create();
353 }
354
355 /* Register switch status category. */
356 p->ss_cat = switch_status_register(p->switch_status, "remote",
357 rconn_status_cb, p->controller->rconn);
358
359 /* Pick final datapath ID. */
360 p->datapath_id = pick_datapath_id(p);
361 VLOG_INFO("using datapath ID %016"PRIx64, p->datapath_id);
362
363 *ofprotop = p;
364 return 0;
365 }
366
367 void
368 ofproto_set_datapath_id(struct ofproto *p, uint64_t datapath_id)
369 {
370 uint64_t old_dpid = p->datapath_id;
371 p->datapath_id = datapath_id ? datapath_id : pick_datapath_id(p);
372 if (p->datapath_id != old_dpid) {
373 VLOG_INFO("datapath ID changed to %016"PRIx64, p->datapath_id);
374 rconn_reconnect(p->controller->rconn);
375 }
376 }
377
378 void
379 ofproto_set_probe_interval(struct ofproto *p, int probe_interval)
380 {
381 probe_interval = probe_interval ? MAX(probe_interval, 5) : 0;
382 rconn_set_probe_interval(p->controller->rconn, probe_interval);
383 if (p->fail_open) {
384 int trigger_duration = probe_interval ? probe_interval * 3 : 15;
385 fail_open_set_trigger_duration(p->fail_open, trigger_duration);
386 }
387 }
388
389 void
390 ofproto_set_max_backoff(struct ofproto *p, int max_backoff)
391 {
392 rconn_set_max_backoff(p->controller->rconn, max_backoff);
393 }
394
395 void
396 ofproto_set_desc(struct ofproto *p,
397 const char *mfr_desc, const char *hw_desc,
398 const char *sw_desc, const char *serial_desc,
399 const char *dp_desc)
400 {
401 struct ofp_desc_stats *ods;
402
403 if (mfr_desc) {
404 if (strlen(mfr_desc) >= sizeof ods->mfr_desc) {
405 VLOG_WARN("truncating mfr_desc, must be less than %zu characters",
406 sizeof ods->mfr_desc);
407 }
408 free(p->mfr_desc);
409 p->mfr_desc = xstrdup(mfr_desc);
410 }
411 if (hw_desc) {
412 if (strlen(hw_desc) >= sizeof ods->hw_desc) {
413 VLOG_WARN("truncating hw_desc, must be less than %zu characters",
414 sizeof ods->hw_desc);
415 }
416 free(p->hw_desc);
417 p->hw_desc = xstrdup(hw_desc);
418 }
419 if (sw_desc) {
420 if (strlen(sw_desc) >= sizeof ods->sw_desc) {
421 VLOG_WARN("truncating sw_desc, must be less than %zu characters",
422 sizeof ods->sw_desc);
423 }
424 free(p->sw_desc);
425 p->sw_desc = xstrdup(sw_desc);
426 }
427 if (serial_desc) {
428 if (strlen(serial_desc) >= sizeof ods->serial_num) {
429 VLOG_WARN("truncating serial_desc, must be less than %zu "
430 "characters",
431 sizeof ods->serial_num);
432 }
433 free(p->serial_desc);
434 p->serial_desc = xstrdup(serial_desc);
435 }
436 if (dp_desc) {
437 if (strlen(dp_desc) >= sizeof ods->dp_desc) {
438 VLOG_WARN("truncating dp_desc, must be less than %zu characters",
439 sizeof ods->dp_desc);
440 }
441 free(p->dp_desc);
442 p->dp_desc = xstrdup(dp_desc);
443 }
444 }
445
446 int
447 ofproto_set_in_band(struct ofproto *p, bool in_band)
448 {
449 if (in_band != (p->in_band != NULL)) {
450 if (in_band) {
451 return in_band_create(p, p->dpif, p->switch_status,
452 p->controller->rconn, &p->in_band);
453 } else {
454 ofproto_set_discovery(p, false, NULL, true);
455 in_band_destroy(p->in_band);
456 p->in_band = NULL;
457 }
458 rconn_reconnect(p->controller->rconn);
459 }
460 return 0;
461 }
462
463 int
464 ofproto_set_discovery(struct ofproto *p, bool discovery,
465 const char *re, bool update_resolv_conf)
466 {
467 if (discovery != (p->discovery != NULL)) {
468 if (discovery) {
469 int error = ofproto_set_in_band(p, true);
470 if (error) {
471 return error;
472 }
473 error = discovery_create(re, update_resolv_conf,
474 p->dpif, p->switch_status,
475 &p->discovery);
476 if (error) {
477 return error;
478 }
479 } else {
480 discovery_destroy(p->discovery);
481 p->discovery = NULL;
482 }
483 rconn_disconnect(p->controller->rconn);
484 } else if (discovery) {
485 discovery_set_update_resolv_conf(p->discovery, update_resolv_conf);
486 return discovery_set_accept_controller_re(p->discovery, re);
487 }
488 return 0;
489 }
490
491 int
492 ofproto_set_controller(struct ofproto *ofproto, const char *controller)
493 {
494 if (ofproto->discovery) {
495 return EINVAL;
496 } else if (controller) {
497 if (strcmp(rconn_get_name(ofproto->controller->rconn), controller)) {
498 return rconn_connect(ofproto->controller->rconn, controller);
499 } else {
500 return 0;
501 }
502 } else {
503 rconn_disconnect(ofproto->controller->rconn);
504 return 0;
505 }
506 }
507
508 static int
509 set_pvconns(struct pvconn ***pvconnsp, size_t *n_pvconnsp,
510 const struct svec *svec)
511 {
512 struct pvconn **pvconns = *pvconnsp;
513 size_t n_pvconns = *n_pvconnsp;
514 int retval = 0;
515 size_t i;
516
517 for (i = 0; i < n_pvconns; i++) {
518 pvconn_close(pvconns[i]);
519 }
520 free(pvconns);
521
522 pvconns = xmalloc(svec->n * sizeof *pvconns);
523 n_pvconns = 0;
524 for (i = 0; i < svec->n; i++) {
525 const char *name = svec->names[i];
526 struct pvconn *pvconn;
527 int error;
528
529 error = pvconn_open(name, &pvconn);
530 if (!error) {
531 pvconns[n_pvconns++] = pvconn;
532 } else {
533 VLOG_ERR("failed to listen on %s: %s", name, strerror(error));
534 if (!retval) {
535 retval = error;
536 }
537 }
538 }
539
540 *pvconnsp = pvconns;
541 *n_pvconnsp = n_pvconns;
542
543 return retval;
544 }
545
546 int
547 ofproto_set_listeners(struct ofproto *ofproto, const struct svec *listeners)
548 {
549 return set_pvconns(&ofproto->listeners, &ofproto->n_listeners, listeners);
550 }
551
552 int
553 ofproto_set_snoops(struct ofproto *ofproto, const struct svec *snoops)
554 {
555 return set_pvconns(&ofproto->snoops, &ofproto->n_snoops, snoops);
556 }
557
558 int
559 ofproto_set_netflow(struct ofproto *ofproto,
560 const struct netflow_options *nf_options)
561 {
562 if (nf_options && nf_options->collectors.n) {
563 if (!ofproto->netflow) {
564 ofproto->netflow = netflow_create();
565 }
566 return netflow_set_options(ofproto->netflow, nf_options);
567 } else {
568 netflow_destroy(ofproto->netflow);
569 ofproto->netflow = NULL;
570 return 0;
571 }
572 }
573
574 void
575 ofproto_set_sflow(struct ofproto *ofproto,
576 const struct ofproto_sflow_options *oso)
577 {
578 struct ofproto_sflow *os = ofproto->sflow;
579 if (oso) {
580 if (!os) {
581 struct ofport *ofport;
582 unsigned int odp_port;
583
584 os = ofproto->sflow = ofproto_sflow_create(ofproto->dpif);
585 refresh_port_groups(ofproto);
586 PORT_ARRAY_FOR_EACH (ofport, &ofproto->ports, odp_port) {
587 ofproto_sflow_add_port(os, odp_port,
588 netdev_get_name(ofport->netdev));
589 }
590 }
591 ofproto_sflow_set_options(os, oso);
592 } else {
593 ofproto_sflow_destroy(os);
594 ofproto->sflow = NULL;
595 }
596 }
597
598 void
599 ofproto_set_failure(struct ofproto *ofproto, bool fail_open)
600 {
601 if (fail_open) {
602 struct rconn *rconn = ofproto->controller->rconn;
603 int trigger_duration = rconn_get_probe_interval(rconn) * 3;
604 if (!ofproto->fail_open) {
605 ofproto->fail_open = fail_open_create(ofproto, trigger_duration,
606 ofproto->switch_status,
607 rconn);
608 } else {
609 fail_open_set_trigger_duration(ofproto->fail_open,
610 trigger_duration);
611 }
612 } else {
613 fail_open_destroy(ofproto->fail_open);
614 ofproto->fail_open = NULL;
615 }
616 }
617
618 void
619 ofproto_set_rate_limit(struct ofproto *ofproto,
620 int rate_limit, int burst_limit)
621 {
622 if (rate_limit > 0) {
623 if (!ofproto->miss_sched) {
624 ofproto->miss_sched = pinsched_create(rate_limit, burst_limit,
625 ofproto->switch_status);
626 ofproto->action_sched = pinsched_create(rate_limit, burst_limit,
627 NULL);
628 } else {
629 pinsched_set_limits(ofproto->miss_sched, rate_limit, burst_limit);
630 pinsched_set_limits(ofproto->action_sched,
631 rate_limit, burst_limit);
632 }
633 } else {
634 pinsched_destroy(ofproto->miss_sched);
635 ofproto->miss_sched = NULL;
636 pinsched_destroy(ofproto->action_sched);
637 ofproto->action_sched = NULL;
638 }
639 }
640
641 int
642 ofproto_set_stp(struct ofproto *ofproto OVS_UNUSED, bool enable_stp)
643 {
644 /* XXX */
645 if (enable_stp) {
646 VLOG_WARN("STP is not yet implemented");
647 return EINVAL;
648 } else {
649 return 0;
650 }
651 }
652
653 uint64_t
654 ofproto_get_datapath_id(const struct ofproto *ofproto)
655 {
656 return ofproto->datapath_id;
657 }
658
659 int
660 ofproto_get_probe_interval(const struct ofproto *ofproto)
661 {
662 return rconn_get_probe_interval(ofproto->controller->rconn);
663 }
664
665 int
666 ofproto_get_max_backoff(const struct ofproto *ofproto)
667 {
668 return rconn_get_max_backoff(ofproto->controller->rconn);
669 }
670
671 bool
672 ofproto_get_in_band(const struct ofproto *ofproto)
673 {
674 return ofproto->in_band != NULL;
675 }
676
677 bool
678 ofproto_get_discovery(const struct ofproto *ofproto)
679 {
680 return ofproto->discovery != NULL;
681 }
682
683 const char *
684 ofproto_get_controller(const struct ofproto *ofproto)
685 {
686 return rconn_get_name(ofproto->controller->rconn);
687 }
688
689 void
690 ofproto_get_listeners(const struct ofproto *ofproto, struct svec *listeners)
691 {
692 size_t i;
693
694 for (i = 0; i < ofproto->n_listeners; i++) {
695 svec_add(listeners, pvconn_get_name(ofproto->listeners[i]));
696 }
697 }
698
699 void
700 ofproto_get_snoops(const struct ofproto *ofproto, struct svec *snoops)
701 {
702 size_t i;
703
704 for (i = 0; i < ofproto->n_snoops; i++) {
705 svec_add(snoops, pvconn_get_name(ofproto->snoops[i]));
706 }
707 }
708
709 void
710 ofproto_destroy(struct ofproto *p)
711 {
712 struct ofconn *ofconn, *next_ofconn;
713 struct ofport *ofport;
714 unsigned int port_no;
715 size_t i;
716
717 if (!p) {
718 return;
719 }
720
721 /* Destroy fail-open and in-band early, since they touch the classifier. */
722 ofproto_set_failure(p, false);
723 ofproto_set_in_band(p, false);
724
725 ofproto_flush_flows(p);
726 classifier_destroy(&p->cls);
727
728 LIST_FOR_EACH_SAFE (ofconn, next_ofconn, struct ofconn, node,
729 &p->all_conns) {
730 ofconn_destroy(ofconn);
731 }
732
733 dpif_close(p->dpif);
734 netdev_monitor_destroy(p->netdev_monitor);
735 PORT_ARRAY_FOR_EACH (ofport, &p->ports, port_no) {
736 ofport_free(ofport);
737 }
738 shash_destroy(&p->port_by_name);
739
740 switch_status_destroy(p->switch_status);
741 discovery_destroy(p->discovery);
742 pinsched_destroy(p->miss_sched);
743 pinsched_destroy(p->action_sched);
744 netflow_destroy(p->netflow);
745 ofproto_sflow_destroy(p->sflow);
746
747 switch_status_unregister(p->ss_cat);
748
749 for (i = 0; i < p->n_listeners; i++) {
750 pvconn_close(p->listeners[i]);
751 }
752 free(p->listeners);
753
754 for (i = 0; i < p->n_snoops; i++) {
755 pvconn_close(p->snoops[i]);
756 }
757 free(p->snoops);
758
759 mac_learning_destroy(p->ml);
760
761 free(p->mfr_desc);
762 free(p->hw_desc);
763 free(p->sw_desc);
764 free(p->serial_desc);
765 free(p->dp_desc);
766
767 port_array_destroy(&p->ports);
768
769 free(p);
770 }
771
772 int
773 ofproto_run(struct ofproto *p)
774 {
775 int error = ofproto_run1(p);
776 if (!error) {
777 error = ofproto_run2(p, false);
778 }
779 return error;
780 }
781
782 static void
783 process_port_change(struct ofproto *ofproto, int error, char *devname)
784 {
785 if (error == ENOBUFS) {
786 reinit_ports(ofproto);
787 } else if (!error) {
788 update_port(ofproto, devname);
789 free(devname);
790 }
791 }
792
793 int
794 ofproto_run1(struct ofproto *p)
795 {
796 struct ofconn *ofconn, *next_ofconn;
797 char *devname;
798 int error;
799 int i;
800
801 if (shash_is_empty(&p->port_by_name)) {
802 init_ports(p);
803 }
804
805 for (i = 0; i < 50; i++) {
806 struct ofpbuf *buf;
807 int error;
808
809 error = dpif_recv(p->dpif, &buf);
810 if (error) {
811 if (error == ENODEV) {
812 /* Someone destroyed the datapath behind our back. The caller
813 * better destroy us and give up, because we're just going to
814 * spin from here on out. */
815 static struct vlog_rate_limit rl2 = VLOG_RATE_LIMIT_INIT(1, 5);
816 VLOG_ERR_RL(&rl2, "%s: datapath was destroyed externally",
817 dpif_name(p->dpif));
818 return ENODEV;
819 }
820 break;
821 }
822
823 handle_odp_msg(p, buf);
824 }
825
826 while ((error = dpif_port_poll(p->dpif, &devname)) != EAGAIN) {
827 process_port_change(p, error, devname);
828 }
829 while ((error = netdev_monitor_poll(p->netdev_monitor,
830 &devname)) != EAGAIN) {
831 process_port_change(p, error, devname);
832 }
833
834 if (p->in_band) {
835 in_band_run(p->in_band);
836 }
837 if (p->discovery) {
838 char *controller_name;
839 if (rconn_is_connectivity_questionable(p->controller->rconn)) {
840 discovery_question_connectivity(p->discovery);
841 }
842 if (discovery_run(p->discovery, &controller_name)) {
843 if (controller_name) {
844 rconn_connect(p->controller->rconn, controller_name);
845 } else {
846 rconn_disconnect(p->controller->rconn);
847 }
848 }
849 }
850 pinsched_run(p->miss_sched, send_packet_in_miss, p);
851 pinsched_run(p->action_sched, send_packet_in_action, p);
852
853 LIST_FOR_EACH_SAFE (ofconn, next_ofconn, struct ofconn, node,
854 &p->all_conns) {
855 ofconn_run(ofconn, p);
856 }
857
858 /* Fail-open maintenance. Do this after processing the ofconns since
859 * fail-open checks the status of the controller rconn. */
860 if (p->fail_open) {
861 fail_open_run(p->fail_open);
862 }
863
864 for (i = 0; i < p->n_listeners; i++) {
865 struct vconn *vconn;
866 int retval;
867
868 retval = pvconn_accept(p->listeners[i], OFP_VERSION, &vconn);
869 if (!retval) {
870 ofconn_create(p, rconn_new_from_vconn("passive", vconn));
871 } else if (retval != EAGAIN) {
872 VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval));
873 }
874 }
875
876 for (i = 0; i < p->n_snoops; i++) {
877 struct vconn *vconn;
878 int retval;
879
880 retval = pvconn_accept(p->snoops[i], OFP_VERSION, &vconn);
881 if (!retval) {
882 rconn_add_monitor(p->controller->rconn, vconn);
883 } else if (retval != EAGAIN) {
884 VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval));
885 }
886 }
887
888 if (time_msec() >= p->next_expiration) {
889 COVERAGE_INC(ofproto_expiration);
890 p->next_expiration = time_msec() + 1000;
891 update_used(p);
892
893 classifier_for_each(&p->cls, CLS_INC_ALL, expire_rule, p);
894
895 /* Let the hook know that we're at a stable point: all outstanding data
896 * in existing flows has been accounted to the account_cb. Thus, the
897 * hook can now reasonably do operations that depend on having accurate
898 * flow volume accounting (currently, that's just bond rebalancing). */
899 if (p->ofhooks->account_checkpoint_cb) {
900 p->ofhooks->account_checkpoint_cb(p->aux);
901 }
902 }
903
904 if (p->netflow) {
905 netflow_run(p->netflow);
906 }
907 if (p->sflow) {
908 ofproto_sflow_run(p->sflow);
909 }
910
911 return 0;
912 }
913
914 struct revalidate_cbdata {
915 struct ofproto *ofproto;
916 bool revalidate_all; /* Revalidate all exact-match rules? */
917 bool revalidate_subrules; /* Revalidate all exact-match subrules? */
918 struct tag_set revalidate_set; /* Set of tags to revalidate. */
919 };
920
921 int
922 ofproto_run2(struct ofproto *p, bool revalidate_all)
923 {
924 if (p->need_revalidate || revalidate_all
925 || !tag_set_is_empty(&p->revalidate_set)) {
926 struct revalidate_cbdata cbdata;
927 cbdata.ofproto = p;
928 cbdata.revalidate_all = revalidate_all;
929 cbdata.revalidate_subrules = p->need_revalidate;
930 cbdata.revalidate_set = p->revalidate_set;
931 tag_set_init(&p->revalidate_set);
932 COVERAGE_INC(ofproto_revalidate);
933 classifier_for_each(&p->cls, CLS_INC_EXACT, revalidate_cb, &cbdata);
934 p->need_revalidate = false;
935 }
936
937 return 0;
938 }
939
940 void
941 ofproto_wait(struct ofproto *p)
942 {
943 struct ofconn *ofconn;
944 size_t i;
945
946 dpif_recv_wait(p->dpif);
947 dpif_port_poll_wait(p->dpif);
948 netdev_monitor_poll_wait(p->netdev_monitor);
949 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
950 ofconn_wait(ofconn);
951 }
952 if (p->in_band) {
953 in_band_wait(p->in_band);
954 }
955 if (p->discovery) {
956 discovery_wait(p->discovery);
957 }
958 if (p->fail_open) {
959 fail_open_wait(p->fail_open);
960 }
961 pinsched_wait(p->miss_sched);
962 pinsched_wait(p->action_sched);
963 if (p->sflow) {
964 ofproto_sflow_wait(p->sflow);
965 }
966 if (!tag_set_is_empty(&p->revalidate_set)) {
967 poll_immediate_wake();
968 }
969 if (p->need_revalidate) {
970 /* Shouldn't happen, but if it does just go around again. */
971 VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
972 poll_immediate_wake();
973 } else if (p->next_expiration != LLONG_MAX) {
974 poll_timer_wait(p->next_expiration - time_msec());
975 }
976 for (i = 0; i < p->n_listeners; i++) {
977 pvconn_wait(p->listeners[i]);
978 }
979 for (i = 0; i < p->n_snoops; i++) {
980 pvconn_wait(p->snoops[i]);
981 }
982 }
983
984 void
985 ofproto_revalidate(struct ofproto *ofproto, tag_type tag)
986 {
987 tag_set_add(&ofproto->revalidate_set, tag);
988 }
989
990 struct tag_set *
991 ofproto_get_revalidate_set(struct ofproto *ofproto)
992 {
993 return &ofproto->revalidate_set;
994 }
995
996 bool
997 ofproto_is_alive(const struct ofproto *p)
998 {
999 return p->discovery || rconn_is_alive(p->controller->rconn);
1000 }
1001
1002 int
1003 ofproto_send_packet(struct ofproto *p, const flow_t *flow,
1004 const union ofp_action *actions, size_t n_actions,
1005 const struct ofpbuf *packet)
1006 {
1007 struct odp_actions odp_actions;
1008 int error;
1009
1010 error = xlate_actions(actions, n_actions, flow, p, packet, &odp_actions,
1011 NULL, NULL, NULL);
1012 if (error) {
1013 return error;
1014 }
1015
1016 /* XXX Should we translate the dpif_execute() errno value into an OpenFlow
1017 * error code? */
1018 dpif_execute(p->dpif, flow->in_port, odp_actions.actions,
1019 odp_actions.n_actions, packet);
1020 return 0;
1021 }
1022
1023 void
1024 ofproto_add_flow(struct ofproto *p,
1025 const flow_t *flow, uint32_t wildcards, unsigned int priority,
1026 const union ofp_action *actions, size_t n_actions,
1027 int idle_timeout)
1028 {
1029 struct rule *rule;
1030 rule = rule_create(p, NULL, actions, n_actions,
1031 idle_timeout >= 0 ? idle_timeout : 5 /* XXX */,
1032 0, 0, false);
1033 cls_rule_from_flow(flow, wildcards, priority, &rule->cr);
1034 rule_insert(p, rule, NULL, 0);
1035 }
1036
1037 void
1038 ofproto_delete_flow(struct ofproto *ofproto, const flow_t *flow,
1039 uint32_t wildcards, unsigned int priority)
1040 {
1041 struct rule *rule;
1042
1043 rule = rule_from_cls_rule(classifier_find_rule_exactly(&ofproto->cls,
1044 flow, wildcards,
1045 priority));
1046 if (rule) {
1047 rule_remove(ofproto, rule);
1048 }
1049 }
1050
1051 static void
1052 destroy_rule(struct cls_rule *rule_, void *ofproto_)
1053 {
1054 struct rule *rule = rule_from_cls_rule(rule_);
1055 struct ofproto *ofproto = ofproto_;
1056
1057 /* Mark the flow as not installed, even though it might really be
1058 * installed, so that rule_remove() doesn't bother trying to uninstall it.
1059 * There is no point in uninstalling it individually since we are about to
1060 * blow away all the flows with dpif_flow_flush(). */
1061 rule->installed = false;
1062
1063 rule_remove(ofproto, rule);
1064 }
1065
1066 void
1067 ofproto_flush_flows(struct ofproto *ofproto)
1068 {
1069 COVERAGE_INC(ofproto_flush);
1070 classifier_for_each(&ofproto->cls, CLS_INC_ALL, destroy_rule, ofproto);
1071 dpif_flow_flush(ofproto->dpif);
1072 if (ofproto->in_band) {
1073 in_band_flushed(ofproto->in_band);
1074 }
1075 if (ofproto->fail_open) {
1076 fail_open_flushed(ofproto->fail_open);
1077 }
1078 }
1079 \f
1080 static void
1081 reinit_ports(struct ofproto *p)
1082 {
1083 struct svec devnames;
1084 struct ofport *ofport;
1085 unsigned int port_no;
1086 struct odp_port *odp_ports;
1087 size_t n_odp_ports;
1088 size_t i;
1089
1090 svec_init(&devnames);
1091 PORT_ARRAY_FOR_EACH (ofport, &p->ports, port_no) {
1092 svec_add (&devnames, (char *) ofport->opp.name);
1093 }
1094 dpif_port_list(p->dpif, &odp_ports, &n_odp_ports);
1095 for (i = 0; i < n_odp_ports; i++) {
1096 svec_add (&devnames, odp_ports[i].devname);
1097 }
1098 free(odp_ports);
1099
1100 svec_sort_unique(&devnames);
1101 for (i = 0; i < devnames.n; i++) {
1102 update_port(p, devnames.names[i]);
1103 }
1104 svec_destroy(&devnames);
1105 }
1106
1107 static size_t
1108 refresh_port_group(struct ofproto *p, unsigned int group)
1109 {
1110 uint16_t *ports;
1111 size_t n_ports;
1112 struct ofport *port;
1113 unsigned int port_no;
1114
1115 assert(group == DP_GROUP_ALL || group == DP_GROUP_FLOOD);
1116
1117 ports = xmalloc(port_array_count(&p->ports) * sizeof *ports);
1118 n_ports = 0;
1119 PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) {
1120 if (group == DP_GROUP_ALL || !(port->opp.config & OFPPC_NO_FLOOD)) {
1121 ports[n_ports++] = port_no;
1122 }
1123 }
1124 dpif_port_group_set(p->dpif, group, ports, n_ports);
1125 free(ports);
1126
1127 return n_ports;
1128 }
1129
1130 static void
1131 refresh_port_groups(struct ofproto *p)
1132 {
1133 size_t n_flood = refresh_port_group(p, DP_GROUP_FLOOD);
1134 size_t n_all = refresh_port_group(p, DP_GROUP_ALL);
1135 if (p->sflow) {
1136 ofproto_sflow_set_group_sizes(p->sflow, n_flood, n_all);
1137 }
1138 }
1139
1140 static struct ofport *
1141 make_ofport(const struct odp_port *odp_port)
1142 {
1143 struct netdev_options netdev_options;
1144 enum netdev_flags flags;
1145 struct ofport *ofport;
1146 struct netdev *netdev;
1147 bool carrier;
1148 int error;
1149
1150 memset(&netdev_options, 0, sizeof netdev_options);
1151 netdev_options.name = odp_port->devname;
1152 netdev_options.ethertype = NETDEV_ETH_TYPE_NONE;
1153 netdev_options.may_open = true;
1154
1155 error = netdev_open(&netdev_options, &netdev);
1156 if (error) {
1157 VLOG_WARN_RL(&rl, "ignoring port %s (%"PRIu16") because netdev %s "
1158 "cannot be opened (%s)",
1159 odp_port->devname, odp_port->port,
1160 odp_port->devname, strerror(error));
1161 return NULL;
1162 }
1163
1164 ofport = xmalloc(sizeof *ofport);
1165 ofport->netdev = netdev;
1166 ofport->opp.port_no = odp_port_to_ofp_port(odp_port->port);
1167 netdev_get_etheraddr(netdev, ofport->opp.hw_addr);
1168 memcpy(ofport->opp.name, odp_port->devname,
1169 MIN(sizeof ofport->opp.name, sizeof odp_port->devname));
1170 ofport->opp.name[sizeof ofport->opp.name - 1] = '\0';
1171
1172 netdev_get_flags(netdev, &flags);
1173 ofport->opp.config = flags & NETDEV_UP ? 0 : OFPPC_PORT_DOWN;
1174
1175 netdev_get_carrier(netdev, &carrier);
1176 ofport->opp.state = carrier ? 0 : OFPPS_LINK_DOWN;
1177
1178 netdev_get_features(netdev,
1179 &ofport->opp.curr, &ofport->opp.advertised,
1180 &ofport->opp.supported, &ofport->opp.peer);
1181 return ofport;
1182 }
1183
1184 static bool
1185 ofport_conflicts(const struct ofproto *p, const struct odp_port *odp_port)
1186 {
1187 if (port_array_get(&p->ports, odp_port->port)) {
1188 VLOG_WARN_RL(&rl, "ignoring duplicate port %"PRIu16" in datapath",
1189 odp_port->port);
1190 return true;
1191 } else if (shash_find(&p->port_by_name, odp_port->devname)) {
1192 VLOG_WARN_RL(&rl, "ignoring duplicate device %s in datapath",
1193 odp_port->devname);
1194 return true;
1195 } else {
1196 return false;
1197 }
1198 }
1199
1200 static int
1201 ofport_equal(const struct ofport *a_, const struct ofport *b_)
1202 {
1203 const struct ofp_phy_port *a = &a_->opp;
1204 const struct ofp_phy_port *b = &b_->opp;
1205
1206 BUILD_ASSERT_DECL(sizeof *a == 48); /* Detect ofp_phy_port changes. */
1207 return (a->port_no == b->port_no
1208 && !memcmp(a->hw_addr, b->hw_addr, sizeof a->hw_addr)
1209 && !strcmp((char *) a->name, (char *) b->name)
1210 && a->state == b->state
1211 && a->config == b->config
1212 && a->curr == b->curr
1213 && a->advertised == b->advertised
1214 && a->supported == b->supported
1215 && a->peer == b->peer);
1216 }
1217
1218 static void
1219 send_port_status(struct ofproto *p, const struct ofport *ofport,
1220 uint8_t reason)
1221 {
1222 /* XXX Should limit the number of queued port status change messages. */
1223 struct ofconn *ofconn;
1224 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
1225 struct ofp_port_status *ops;
1226 struct ofpbuf *b;
1227
1228 ops = make_openflow_xid(sizeof *ops, OFPT_PORT_STATUS, 0, &b);
1229 ops->reason = reason;
1230 ops->desc = ofport->opp;
1231 hton_ofp_phy_port(&ops->desc);
1232 queue_tx(b, ofconn, NULL);
1233 }
1234 if (p->ofhooks->port_changed_cb) {
1235 p->ofhooks->port_changed_cb(reason, &ofport->opp, p->aux);
1236 }
1237 }
1238
1239 static void
1240 ofport_install(struct ofproto *p, struct ofport *ofport)
1241 {
1242 uint16_t odp_port = ofp_port_to_odp_port(ofport->opp.port_no);
1243 const char *netdev_name = (const char *) ofport->opp.name;
1244
1245 netdev_monitor_add(p->netdev_monitor, ofport->netdev);
1246 port_array_set(&p->ports, odp_port, ofport);
1247 shash_add(&p->port_by_name, netdev_name, ofport);
1248 if (p->sflow) {
1249 ofproto_sflow_add_port(p->sflow, odp_port, netdev_name);
1250 }
1251 }
1252
1253 static void
1254 ofport_remove(struct ofproto *p, struct ofport *ofport)
1255 {
1256 uint16_t odp_port = ofp_port_to_odp_port(ofport->opp.port_no);
1257
1258 netdev_monitor_remove(p->netdev_monitor, ofport->netdev);
1259 port_array_set(&p->ports, odp_port, NULL);
1260 shash_delete(&p->port_by_name,
1261 shash_find(&p->port_by_name, (char *) ofport->opp.name));
1262 if (p->sflow) {
1263 ofproto_sflow_del_port(p->sflow, odp_port);
1264 }
1265 }
1266
1267 static void
1268 ofport_free(struct ofport *ofport)
1269 {
1270 if (ofport) {
1271 netdev_close(ofport->netdev);
1272 free(ofport);
1273 }
1274 }
1275
1276 static void
1277 update_port(struct ofproto *p, const char *devname)
1278 {
1279 struct odp_port odp_port;
1280 struct ofport *old_ofport;
1281 struct ofport *new_ofport;
1282 int error;
1283
1284 COVERAGE_INC(ofproto_update_port);
1285
1286 /* Query the datapath for port information. */
1287 error = dpif_port_query_by_name(p->dpif, devname, &odp_port);
1288
1289 /* Find the old ofport. */
1290 old_ofport = shash_find_data(&p->port_by_name, devname);
1291 if (!error) {
1292 if (!old_ofport) {
1293 /* There's no port named 'devname' but there might be a port with
1294 * the same port number. This could happen if a port is deleted
1295 * and then a new one added in its place very quickly, or if a port
1296 * is renamed. In the former case we want to send an OFPPR_DELETE
1297 * and an OFPPR_ADD, and in the latter case we want to send a
1298 * single OFPPR_MODIFY. We can distinguish the cases by comparing
1299 * the old port's ifindex against the new port, or perhaps less
1300 * reliably but more portably by comparing the old port's MAC
1301 * against the new port's MAC. However, this code isn't that smart
1302 * and always sends an OFPPR_MODIFY (XXX). */
1303 old_ofport = port_array_get(&p->ports, odp_port.port);
1304 }
1305 } else if (error != ENOENT && error != ENODEV) {
1306 VLOG_WARN_RL(&rl, "dpif_port_query_by_name returned unexpected error "
1307 "%s", strerror(error));
1308 return;
1309 }
1310
1311 /* Create a new ofport. */
1312 new_ofport = !error ? make_ofport(&odp_port) : NULL;
1313
1314 /* Eliminate a few pathological cases. */
1315 if (!old_ofport && !new_ofport) {
1316 return;
1317 } else if (old_ofport && new_ofport) {
1318 /* Most of the 'config' bits are OpenFlow soft state, but
1319 * OFPPC_PORT_DOWN is maintained the kernel. So transfer the OpenFlow
1320 * bits from old_ofport. (make_ofport() only sets OFPPC_PORT_DOWN and
1321 * leaves the other bits 0.) */
1322 new_ofport->opp.config |= old_ofport->opp.config & ~OFPPC_PORT_DOWN;
1323
1324 if (ofport_equal(old_ofport, new_ofport)) {
1325 /* False alarm--no change. */
1326 ofport_free(new_ofport);
1327 return;
1328 }
1329 }
1330
1331 /* Now deal with the normal cases. */
1332 if (old_ofport) {
1333 ofport_remove(p, old_ofport);
1334 }
1335 if (new_ofport) {
1336 ofport_install(p, new_ofport);
1337 }
1338 send_port_status(p, new_ofport ? new_ofport : old_ofport,
1339 (!old_ofport ? OFPPR_ADD
1340 : !new_ofport ? OFPPR_DELETE
1341 : OFPPR_MODIFY));
1342 ofport_free(old_ofport);
1343
1344 /* Update port groups. */
1345 refresh_port_groups(p);
1346 }
1347
1348 static int
1349 init_ports(struct ofproto *p)
1350 {
1351 struct odp_port *ports;
1352 size_t n_ports;
1353 size_t i;
1354 int error;
1355
1356 error = dpif_port_list(p->dpif, &ports, &n_ports);
1357 if (error) {
1358 return error;
1359 }
1360
1361 for (i = 0; i < n_ports; i++) {
1362 const struct odp_port *odp_port = &ports[i];
1363 if (!ofport_conflicts(p, odp_port)) {
1364 struct ofport *ofport = make_ofport(odp_port);
1365 if (ofport) {
1366 ofport_install(p, ofport);
1367 }
1368 }
1369 }
1370 free(ports);
1371 refresh_port_groups(p);
1372 return 0;
1373 }
1374 \f
1375 static struct ofconn *
1376 ofconn_create(struct ofproto *p, struct rconn *rconn)
1377 {
1378 struct ofconn *ofconn = xmalloc(sizeof *ofconn);
1379 list_push_back(&p->all_conns, &ofconn->node);
1380 ofconn->rconn = rconn;
1381 ofconn->pktbuf = NULL;
1382 ofconn->miss_send_len = 0;
1383 ofconn->packet_in_counter = rconn_packet_counter_create ();
1384 ofconn->reply_counter = rconn_packet_counter_create ();
1385 return ofconn;
1386 }
1387
1388 static void
1389 ofconn_destroy(struct ofconn *ofconn)
1390 {
1391 list_remove(&ofconn->node);
1392 rconn_destroy(ofconn->rconn);
1393 rconn_packet_counter_destroy(ofconn->packet_in_counter);
1394 rconn_packet_counter_destroy(ofconn->reply_counter);
1395 pktbuf_destroy(ofconn->pktbuf);
1396 free(ofconn);
1397 }
1398
1399 static void
1400 ofconn_run(struct ofconn *ofconn, struct ofproto *p)
1401 {
1402 int iteration;
1403
1404 rconn_run(ofconn->rconn);
1405
1406 if (rconn_packet_counter_read (ofconn->reply_counter) < OFCONN_REPLY_MAX) {
1407 /* Limit the number of iterations to prevent other tasks from
1408 * starving. */
1409 for (iteration = 0; iteration < 50; iteration++) {
1410 struct ofpbuf *of_msg = rconn_recv(ofconn->rconn);
1411 if (!of_msg) {
1412 break;
1413 }
1414 if (p->fail_open) {
1415 fail_open_maybe_recover(p->fail_open);
1416 }
1417 handle_openflow(ofconn, p, of_msg);
1418 ofpbuf_delete(of_msg);
1419 }
1420 }
1421
1422 if (ofconn != p->controller && !rconn_is_alive(ofconn->rconn)) {
1423 ofconn_destroy(ofconn);
1424 }
1425 }
1426
1427 static void
1428 ofconn_wait(struct ofconn *ofconn)
1429 {
1430 rconn_run_wait(ofconn->rconn);
1431 if (rconn_packet_counter_read (ofconn->reply_counter) < OFCONN_REPLY_MAX) {
1432 rconn_recv_wait(ofconn->rconn);
1433 } else {
1434 COVERAGE_INC(ofproto_ofconn_stuck);
1435 }
1436 }
1437 \f
1438 /* Caller is responsible for initializing the 'cr' member of the returned
1439 * rule. */
1440 static struct rule *
1441 rule_create(struct ofproto *ofproto, struct rule *super,
1442 const union ofp_action *actions, size_t n_actions,
1443 uint16_t idle_timeout, uint16_t hard_timeout,
1444 uint64_t flow_cookie, bool send_flow_removed)
1445 {
1446 struct rule *rule = xzalloc(sizeof *rule);
1447 rule->idle_timeout = idle_timeout;
1448 rule->hard_timeout = hard_timeout;
1449 rule->flow_cookie = flow_cookie;
1450 rule->used = rule->created = time_msec();
1451 rule->send_flow_removed = send_flow_removed;
1452 rule->super = super;
1453 if (super) {
1454 list_push_back(&super->list, &rule->list);
1455 } else {
1456 list_init(&rule->list);
1457 }
1458 rule->n_actions = n_actions;
1459 rule->actions = xmemdup(actions, n_actions * sizeof *actions);
1460 netflow_flow_clear(&rule->nf_flow);
1461 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->created);
1462
1463 return rule;
1464 }
1465
1466 static struct rule *
1467 rule_from_cls_rule(const struct cls_rule *cls_rule)
1468 {
1469 return cls_rule ? CONTAINER_OF(cls_rule, struct rule, cr) : NULL;
1470 }
1471
1472 static void
1473 rule_free(struct rule *rule)
1474 {
1475 free(rule->actions);
1476 free(rule->odp_actions);
1477 free(rule);
1478 }
1479
1480 /* Destroys 'rule'. If 'rule' is a subrule, also removes it from its
1481 * super-rule's list of subrules. If 'rule' is a super-rule, also iterates
1482 * through all of its subrules and revalidates them, destroying any that no
1483 * longer has a super-rule (which is probably all of them).
1484 *
1485 * Before calling this function, the caller must make have removed 'rule' from
1486 * the classifier. If 'rule' is an exact-match rule, the caller is also
1487 * responsible for ensuring that it has been uninstalled from the datapath. */
1488 static void
1489 rule_destroy(struct ofproto *ofproto, struct rule *rule)
1490 {
1491 if (!rule->super) {
1492 struct rule *subrule, *next;
1493 LIST_FOR_EACH_SAFE (subrule, next, struct rule, list, &rule->list) {
1494 revalidate_rule(ofproto, subrule);
1495 }
1496 } else {
1497 list_remove(&rule->list);
1498 }
1499 rule_free(rule);
1500 }
1501
1502 static bool
1503 rule_has_out_port(const struct rule *rule, uint16_t out_port)
1504 {
1505 const union ofp_action *oa;
1506 struct actions_iterator i;
1507
1508 if (out_port == htons(OFPP_NONE)) {
1509 return true;
1510 }
1511 for (oa = actions_first(&i, rule->actions, rule->n_actions); oa;
1512 oa = actions_next(&i)) {
1513 if (oa->type == htons(OFPAT_OUTPUT) && oa->output.port == out_port) {
1514 return true;
1515 }
1516 }
1517 return false;
1518 }
1519
1520 /* Executes the actions indicated by 'rule' on 'packet', which is in flow
1521 * 'flow' and is considered to have arrived on ODP port 'in_port'.
1522 *
1523 * The flow that 'packet' actually contains does not need to actually match
1524 * 'rule'; the actions in 'rule' will be applied to it either way. Likewise,
1525 * the packet and byte counters for 'rule' will be credited for the packet sent
1526 * out whether or not the packet actually matches 'rule'.
1527 *
1528 * If 'rule' is an exact-match rule and 'flow' actually equals the rule's flow,
1529 * the caller must already have accurately composed ODP actions for it given
1530 * 'packet' using rule_make_actions(). If 'rule' is a wildcard rule, or if
1531 * 'rule' is an exact-match rule but 'flow' is not the rule's flow, then this
1532 * function will compose a set of ODP actions based on 'rule''s OpenFlow
1533 * actions and apply them to 'packet'. */
1534 static void
1535 rule_execute(struct ofproto *ofproto, struct rule *rule,
1536 struct ofpbuf *packet, const flow_t *flow)
1537 {
1538 const union odp_action *actions;
1539 size_t n_actions;
1540 struct odp_actions a;
1541
1542 /* Grab or compose the ODP actions.
1543 *
1544 * The special case for an exact-match 'rule' where 'flow' is not the
1545 * rule's flow is important to avoid, e.g., sending a packet out its input
1546 * port simply because the ODP actions were composed for the wrong
1547 * scenario. */
1548 if (rule->cr.wc.wildcards || !flow_equal(flow, &rule->cr.flow)) {
1549 struct rule *super = rule->super ? rule->super : rule;
1550 if (xlate_actions(super->actions, super->n_actions, flow, ofproto,
1551 packet, &a, NULL, 0, NULL)) {
1552 return;
1553 }
1554 actions = a.actions;
1555 n_actions = a.n_actions;
1556 } else {
1557 actions = rule->odp_actions;
1558 n_actions = rule->n_odp_actions;
1559 }
1560
1561 /* Execute the ODP actions. */
1562 if (!dpif_execute(ofproto->dpif, flow->in_port,
1563 actions, n_actions, packet)) {
1564 struct odp_flow_stats stats;
1565 flow_extract_stats(flow, packet, &stats);
1566 update_stats(ofproto, rule, &stats);
1567 rule->used = time_msec();
1568 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->used);
1569 }
1570 }
1571
1572 static void
1573 rule_insert(struct ofproto *p, struct rule *rule, struct ofpbuf *packet,
1574 uint16_t in_port)
1575 {
1576 struct rule *displaced_rule;
1577
1578 /* Insert the rule in the classifier. */
1579 displaced_rule = rule_from_cls_rule(classifier_insert(&p->cls, &rule->cr));
1580 if (!rule->cr.wc.wildcards) {
1581 rule_make_actions(p, rule, packet);
1582 }
1583
1584 /* Send the packet and credit it to the rule. */
1585 if (packet) {
1586 flow_t flow;
1587 flow_extract(packet, 0, in_port, &flow);
1588 rule_execute(p, rule, packet, &flow);
1589 }
1590
1591 /* Install the rule in the datapath only after sending the packet, to
1592 * avoid packet reordering. */
1593 if (rule->cr.wc.wildcards) {
1594 COVERAGE_INC(ofproto_add_wc_flow);
1595 p->need_revalidate = true;
1596 } else {
1597 rule_install(p, rule, displaced_rule);
1598 }
1599
1600 /* Free the rule that was displaced, if any. */
1601 if (displaced_rule) {
1602 rule_destroy(p, displaced_rule);
1603 }
1604 }
1605
1606 static struct rule *
1607 rule_create_subrule(struct ofproto *ofproto, struct rule *rule,
1608 const flow_t *flow)
1609 {
1610 struct rule *subrule = rule_create(ofproto, rule, NULL, 0,
1611 rule->idle_timeout, rule->hard_timeout,
1612 0, false);
1613 COVERAGE_INC(ofproto_subrule_create);
1614 cls_rule_from_flow(flow, 0, (rule->cr.priority <= UINT16_MAX ? UINT16_MAX
1615 : rule->cr.priority), &subrule->cr);
1616 classifier_insert_exact(&ofproto->cls, &subrule->cr);
1617
1618 return subrule;
1619 }
1620
1621 static void
1622 rule_remove(struct ofproto *ofproto, struct rule *rule)
1623 {
1624 if (rule->cr.wc.wildcards) {
1625 COVERAGE_INC(ofproto_del_wc_flow);
1626 ofproto->need_revalidate = true;
1627 } else {
1628 rule_uninstall(ofproto, rule);
1629 }
1630 classifier_remove(&ofproto->cls, &rule->cr);
1631 rule_destroy(ofproto, rule);
1632 }
1633
1634 /* Returns true if the actions changed, false otherwise. */
1635 static bool
1636 rule_make_actions(struct ofproto *p, struct rule *rule,
1637 const struct ofpbuf *packet)
1638 {
1639 const struct rule *super;
1640 struct odp_actions a;
1641 size_t actions_len;
1642
1643 assert(!rule->cr.wc.wildcards);
1644
1645 super = rule->super ? rule->super : rule;
1646 rule->tags = 0;
1647 xlate_actions(super->actions, super->n_actions, &rule->cr.flow, p,
1648 packet, &a, &rule->tags, &rule->may_install,
1649 &rule->nf_flow.output_iface);
1650
1651 actions_len = a.n_actions * sizeof *a.actions;
1652 if (rule->n_odp_actions != a.n_actions
1653 || memcmp(rule->odp_actions, a.actions, actions_len)) {
1654 COVERAGE_INC(ofproto_odp_unchanged);
1655 free(rule->odp_actions);
1656 rule->n_odp_actions = a.n_actions;
1657 rule->odp_actions = xmemdup(a.actions, actions_len);
1658 return true;
1659 } else {
1660 return false;
1661 }
1662 }
1663
1664 static int
1665 do_put_flow(struct ofproto *ofproto, struct rule *rule, int flags,
1666 struct odp_flow_put *put)
1667 {
1668 memset(&put->flow.stats, 0, sizeof put->flow.stats);
1669 put->flow.key = rule->cr.flow;
1670 put->flow.actions = rule->odp_actions;
1671 put->flow.n_actions = rule->n_odp_actions;
1672 put->flow.flags = 0;
1673 put->flags = flags;
1674 return dpif_flow_put(ofproto->dpif, put);
1675 }
1676
1677 static void
1678 rule_install(struct ofproto *p, struct rule *rule, struct rule *displaced_rule)
1679 {
1680 assert(!rule->cr.wc.wildcards);
1681
1682 if (rule->may_install) {
1683 struct odp_flow_put put;
1684 if (!do_put_flow(p, rule,
1685 ODPPF_CREATE | ODPPF_MODIFY | ODPPF_ZERO_STATS,
1686 &put)) {
1687 rule->installed = true;
1688 if (displaced_rule) {
1689 update_stats(p, displaced_rule, &put.flow.stats);
1690 rule_post_uninstall(p, displaced_rule);
1691 }
1692 }
1693 } else if (displaced_rule) {
1694 rule_uninstall(p, displaced_rule);
1695 }
1696 }
1697
1698 static void
1699 rule_reinstall(struct ofproto *ofproto, struct rule *rule)
1700 {
1701 if (rule->installed) {
1702 struct odp_flow_put put;
1703 COVERAGE_INC(ofproto_dp_missed);
1704 do_put_flow(ofproto, rule, ODPPF_CREATE | ODPPF_MODIFY, &put);
1705 } else {
1706 rule_install(ofproto, rule, NULL);
1707 }
1708 }
1709
1710 static void
1711 rule_update_actions(struct ofproto *ofproto, struct rule *rule)
1712 {
1713 bool actions_changed;
1714 uint16_t new_out_iface, old_out_iface;
1715
1716 old_out_iface = rule->nf_flow.output_iface;
1717 actions_changed = rule_make_actions(ofproto, rule, NULL);
1718
1719 if (rule->may_install) {
1720 if (rule->installed) {
1721 if (actions_changed) {
1722 struct odp_flow_put put;
1723 do_put_flow(ofproto, rule, ODPPF_CREATE | ODPPF_MODIFY
1724 | ODPPF_ZERO_STATS, &put);
1725 update_stats(ofproto, rule, &put.flow.stats);
1726
1727 /* Temporarily set the old output iface so that NetFlow
1728 * messages have the correct output interface for the old
1729 * stats. */
1730 new_out_iface = rule->nf_flow.output_iface;
1731 rule->nf_flow.output_iface = old_out_iface;
1732 rule_post_uninstall(ofproto, rule);
1733 rule->nf_flow.output_iface = new_out_iface;
1734 }
1735 } else {
1736 rule_install(ofproto, rule, NULL);
1737 }
1738 } else {
1739 rule_uninstall(ofproto, rule);
1740 }
1741 }
1742
1743 static void
1744 rule_account(struct ofproto *ofproto, struct rule *rule, uint64_t extra_bytes)
1745 {
1746 uint64_t total_bytes = rule->byte_count + extra_bytes;
1747
1748 if (ofproto->ofhooks->account_flow_cb
1749 && total_bytes > rule->accounted_bytes)
1750 {
1751 ofproto->ofhooks->account_flow_cb(
1752 &rule->cr.flow, rule->odp_actions, rule->n_odp_actions,
1753 total_bytes - rule->accounted_bytes, ofproto->aux);
1754 rule->accounted_bytes = total_bytes;
1755 }
1756 }
1757
1758 static void
1759 rule_uninstall(struct ofproto *p, struct rule *rule)
1760 {
1761 assert(!rule->cr.wc.wildcards);
1762 if (rule->installed) {
1763 struct odp_flow odp_flow;
1764
1765 odp_flow.key = rule->cr.flow;
1766 odp_flow.actions = NULL;
1767 odp_flow.n_actions = 0;
1768 odp_flow.flags = 0;
1769 if (!dpif_flow_del(p->dpif, &odp_flow)) {
1770 update_stats(p, rule, &odp_flow.stats);
1771 }
1772 rule->installed = false;
1773
1774 rule_post_uninstall(p, rule);
1775 }
1776 }
1777
1778 static bool
1779 is_controller_rule(struct rule *rule)
1780 {
1781 /* If the only action is send to the controller then don't report
1782 * NetFlow expiration messages since it is just part of the control
1783 * logic for the network and not real traffic. */
1784
1785 if (rule && rule->super) {
1786 struct rule *super = rule->super;
1787
1788 return super->n_actions == 1 &&
1789 super->actions[0].type == htons(OFPAT_OUTPUT) &&
1790 super->actions[0].output.port == htons(OFPP_CONTROLLER);
1791 }
1792
1793 return false;
1794 }
1795
1796 static void
1797 rule_post_uninstall(struct ofproto *ofproto, struct rule *rule)
1798 {
1799 struct rule *super = rule->super;
1800
1801 rule_account(ofproto, rule, 0);
1802
1803 if (ofproto->netflow && !is_controller_rule(rule)) {
1804 struct ofexpired expired;
1805 expired.flow = rule->cr.flow;
1806 expired.packet_count = rule->packet_count;
1807 expired.byte_count = rule->byte_count;
1808 expired.used = rule->used;
1809 netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
1810 }
1811 if (super) {
1812 super->packet_count += rule->packet_count;
1813 super->byte_count += rule->byte_count;
1814
1815 /* Reset counters to prevent double counting if the rule ever gets
1816 * reinstalled. */
1817 rule->packet_count = 0;
1818 rule->byte_count = 0;
1819 rule->accounted_bytes = 0;
1820
1821 netflow_flow_clear(&rule->nf_flow);
1822 }
1823 }
1824 \f
1825 static void
1826 queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn,
1827 struct rconn_packet_counter *counter)
1828 {
1829 update_openflow_length(msg);
1830 if (rconn_send(ofconn->rconn, msg, counter)) {
1831 ofpbuf_delete(msg);
1832 }
1833 }
1834
1835 static void
1836 send_error(const struct ofconn *ofconn, const struct ofp_header *oh,
1837 int error, const void *data, size_t len)
1838 {
1839 struct ofpbuf *buf;
1840 struct ofp_error_msg *oem;
1841
1842 if (!(error >> 16)) {
1843 VLOG_WARN_RL(&rl, "not sending bad error code %d to controller",
1844 error);
1845 return;
1846 }
1847
1848 COVERAGE_INC(ofproto_error);
1849 oem = make_openflow_xid(len + sizeof *oem, OFPT_ERROR,
1850 oh ? oh->xid : 0, &buf);
1851 oem->type = htons((unsigned int) error >> 16);
1852 oem->code = htons(error & 0xffff);
1853 memcpy(oem->data, data, len);
1854 queue_tx(buf, ofconn, ofconn->reply_counter);
1855 }
1856
1857 static void
1858 send_error_oh(const struct ofconn *ofconn, const struct ofp_header *oh,
1859 int error)
1860 {
1861 size_t oh_length = ntohs(oh->length);
1862 send_error(ofconn, oh, error, oh, MIN(oh_length, 64));
1863 }
1864
1865 static void
1866 hton_ofp_phy_port(struct ofp_phy_port *opp)
1867 {
1868 opp->port_no = htons(opp->port_no);
1869 opp->config = htonl(opp->config);
1870 opp->state = htonl(opp->state);
1871 opp->curr = htonl(opp->curr);
1872 opp->advertised = htonl(opp->advertised);
1873 opp->supported = htonl(opp->supported);
1874 opp->peer = htonl(opp->peer);
1875 }
1876
1877 static int
1878 handle_echo_request(struct ofconn *ofconn, struct ofp_header *oh)
1879 {
1880 struct ofp_header *rq = oh;
1881 queue_tx(make_echo_reply(rq), ofconn, ofconn->reply_counter);
1882 return 0;
1883 }
1884
1885 static int
1886 handle_features_request(struct ofproto *p, struct ofconn *ofconn,
1887 struct ofp_header *oh)
1888 {
1889 struct ofp_switch_features *osf;
1890 struct ofpbuf *buf;
1891 unsigned int port_no;
1892 struct ofport *port;
1893
1894 osf = make_openflow_xid(sizeof *osf, OFPT_FEATURES_REPLY, oh->xid, &buf);
1895 osf->datapath_id = htonll(p->datapath_id);
1896 osf->n_buffers = htonl(pktbuf_capacity());
1897 osf->n_tables = 2;
1898 osf->capabilities = htonl(OFPC_FLOW_STATS | OFPC_TABLE_STATS |
1899 OFPC_PORT_STATS | OFPC_ARP_MATCH_IP);
1900 osf->actions = htonl((1u << OFPAT_OUTPUT) |
1901 (1u << OFPAT_SET_VLAN_VID) |
1902 (1u << OFPAT_SET_VLAN_PCP) |
1903 (1u << OFPAT_STRIP_VLAN) |
1904 (1u << OFPAT_SET_DL_SRC) |
1905 (1u << OFPAT_SET_DL_DST) |
1906 (1u << OFPAT_SET_NW_SRC) |
1907 (1u << OFPAT_SET_NW_DST) |
1908 (1u << OFPAT_SET_NW_TOS) |
1909 (1u << OFPAT_SET_TP_SRC) |
1910 (1u << OFPAT_SET_TP_DST));
1911
1912 PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) {
1913 hton_ofp_phy_port(ofpbuf_put(buf, &port->opp, sizeof port->opp));
1914 }
1915
1916 queue_tx(buf, ofconn, ofconn->reply_counter);
1917 return 0;
1918 }
1919
1920 static int
1921 handle_get_config_request(struct ofproto *p, struct ofconn *ofconn,
1922 struct ofp_header *oh)
1923 {
1924 struct ofpbuf *buf;
1925 struct ofp_switch_config *osc;
1926 uint16_t flags;
1927 bool drop_frags;
1928
1929 /* Figure out flags. */
1930 dpif_get_drop_frags(p->dpif, &drop_frags);
1931 flags = drop_frags ? OFPC_FRAG_DROP : OFPC_FRAG_NORMAL;
1932
1933 /* Send reply. */
1934 osc = make_openflow_xid(sizeof *osc, OFPT_GET_CONFIG_REPLY, oh->xid, &buf);
1935 osc->flags = htons(flags);
1936 osc->miss_send_len = htons(ofconn->miss_send_len);
1937 queue_tx(buf, ofconn, ofconn->reply_counter);
1938
1939 return 0;
1940 }
1941
1942 static int
1943 handle_set_config(struct ofproto *p, struct ofconn *ofconn,
1944 struct ofp_switch_config *osc)
1945 {
1946 uint16_t flags;
1947 int error;
1948
1949 error = check_ofp_message(&osc->header, OFPT_SET_CONFIG, sizeof *osc);
1950 if (error) {
1951 return error;
1952 }
1953 flags = ntohs(osc->flags);
1954
1955 if (ofconn == p->controller) {
1956 switch (flags & OFPC_FRAG_MASK) {
1957 case OFPC_FRAG_NORMAL:
1958 dpif_set_drop_frags(p->dpif, false);
1959 break;
1960 case OFPC_FRAG_DROP:
1961 dpif_set_drop_frags(p->dpif, true);
1962 break;
1963 default:
1964 VLOG_WARN_RL(&rl, "requested bad fragment mode (flags=%"PRIx16")",
1965 osc->flags);
1966 break;
1967 }
1968 }
1969
1970 if ((ntohs(osc->miss_send_len) != 0) != (ofconn->miss_send_len != 0)) {
1971 if (ntohs(osc->miss_send_len) != 0) {
1972 ofconn->pktbuf = pktbuf_create();
1973 } else {
1974 pktbuf_destroy(ofconn->pktbuf);
1975 }
1976 }
1977
1978 ofconn->miss_send_len = ntohs(osc->miss_send_len);
1979
1980 return 0;
1981 }
1982
1983 static void
1984 add_output_group_action(struct odp_actions *actions, uint16_t group,
1985 uint16_t *nf_output_iface)
1986 {
1987 odp_actions_add(actions, ODPAT_OUTPUT_GROUP)->output_group.group = group;
1988
1989 if (group == DP_GROUP_ALL || group == DP_GROUP_FLOOD) {
1990 *nf_output_iface = NF_OUT_FLOOD;
1991 }
1992 }
1993
1994 static void
1995 add_controller_action(struct odp_actions *actions,
1996 const struct ofp_action_output *oao)
1997 {
1998 union odp_action *a = odp_actions_add(actions, ODPAT_CONTROLLER);
1999 a->controller.arg = oao->max_len ? ntohs(oao->max_len) : UINT32_MAX;
2000 }
2001
2002 struct action_xlate_ctx {
2003 /* Input. */
2004 flow_t flow; /* Flow to which these actions correspond. */
2005 int recurse; /* Recursion level, via xlate_table_action. */
2006 struct ofproto *ofproto;
2007 const struct ofpbuf *packet; /* The packet corresponding to 'flow', or a
2008 * null pointer if we are revalidating
2009 * without a packet to refer to. */
2010
2011 /* Output. */
2012 struct odp_actions *out; /* Datapath actions. */
2013 tag_type *tags; /* Tags associated with OFPP_NORMAL actions. */
2014 bool may_set_up_flow; /* True ordinarily; false if the actions must
2015 * be reassessed for every packet. */
2016 uint16_t nf_output_iface; /* Output interface index for NetFlow. */
2017 };
2018
2019 static void do_xlate_actions(const union ofp_action *in, size_t n_in,
2020 struct action_xlate_ctx *ctx);
2021
2022 static void
2023 add_output_action(struct action_xlate_ctx *ctx, uint16_t port)
2024 {
2025 const struct ofport *ofport = port_array_get(&ctx->ofproto->ports, port);
2026
2027 if (ofport) {
2028 if (ofport->opp.config & OFPPC_NO_FWD) {
2029 /* Forwarding disabled on port. */
2030 return;
2031 }
2032 } else {
2033 /*
2034 * We don't have an ofport record for this port, but it doesn't hurt to
2035 * allow forwarding to it anyhow. Maybe such a port will appear later
2036 * and we're pre-populating the flow table.
2037 */
2038 }
2039
2040 odp_actions_add(ctx->out, ODPAT_OUTPUT)->output.port = port;
2041 ctx->nf_output_iface = port;
2042 }
2043
2044 static struct rule *
2045 lookup_valid_rule(struct ofproto *ofproto, const flow_t *flow)
2046 {
2047 struct rule *rule;
2048 rule = rule_from_cls_rule(classifier_lookup(&ofproto->cls, flow));
2049
2050 /* The rule we found might not be valid, since we could be in need of
2051 * revalidation. If it is not valid, don't return it. */
2052 if (rule
2053 && rule->super
2054 && ofproto->need_revalidate
2055 && !revalidate_rule(ofproto, rule)) {
2056 COVERAGE_INC(ofproto_invalidated);
2057 return NULL;
2058 }
2059
2060 return rule;
2061 }
2062
2063 static void
2064 xlate_table_action(struct action_xlate_ctx *ctx, uint16_t in_port)
2065 {
2066 if (!ctx->recurse) {
2067 uint16_t old_in_port;
2068 struct rule *rule;
2069
2070 /* Look up a flow with 'in_port' as the input port. Then restore the
2071 * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
2072 * have surprising behavior). */
2073 old_in_port = ctx->flow.in_port;
2074 ctx->flow.in_port = in_port;
2075 rule = lookup_valid_rule(ctx->ofproto, &ctx->flow);
2076 ctx->flow.in_port = old_in_port;
2077
2078 if (rule) {
2079 if (rule->super) {
2080 rule = rule->super;
2081 }
2082
2083 ctx->recurse++;
2084 do_xlate_actions(rule->actions, rule->n_actions, ctx);
2085 ctx->recurse--;
2086 }
2087 }
2088 }
2089
2090 static void
2091 xlate_output_action(struct action_xlate_ctx *ctx,
2092 const struct ofp_action_output *oao)
2093 {
2094 uint16_t odp_port;
2095 uint16_t prev_nf_output_iface = ctx->nf_output_iface;
2096
2097 ctx->nf_output_iface = NF_OUT_DROP;
2098
2099 switch (ntohs(oao->port)) {
2100 case OFPP_IN_PORT:
2101 add_output_action(ctx, ctx->flow.in_port);
2102 break;
2103 case OFPP_TABLE:
2104 xlate_table_action(ctx, ctx->flow.in_port);
2105 break;
2106 case OFPP_NORMAL:
2107 if (!ctx->ofproto->ofhooks->normal_cb(&ctx->flow, ctx->packet,
2108 ctx->out, ctx->tags,
2109 &ctx->nf_output_iface,
2110 ctx->ofproto->aux)) {
2111 COVERAGE_INC(ofproto_uninstallable);
2112 ctx->may_set_up_flow = false;
2113 }
2114 break;
2115 case OFPP_FLOOD:
2116 add_output_group_action(ctx->out, DP_GROUP_FLOOD,
2117 &ctx->nf_output_iface);
2118 break;
2119 case OFPP_ALL:
2120 add_output_group_action(ctx->out, DP_GROUP_ALL, &ctx->nf_output_iface);
2121 break;
2122 case OFPP_CONTROLLER:
2123 add_controller_action(ctx->out, oao);
2124 break;
2125 case OFPP_LOCAL:
2126 add_output_action(ctx, ODPP_LOCAL);
2127 break;
2128 default:
2129 odp_port = ofp_port_to_odp_port(ntohs(oao->port));
2130 if (odp_port != ctx->flow.in_port) {
2131 add_output_action(ctx, odp_port);
2132 }
2133 break;
2134 }
2135
2136 if (prev_nf_output_iface == NF_OUT_FLOOD) {
2137 ctx->nf_output_iface = NF_OUT_FLOOD;
2138 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
2139 ctx->nf_output_iface = prev_nf_output_iface;
2140 } else if (prev_nf_output_iface != NF_OUT_DROP &&
2141 ctx->nf_output_iface != NF_OUT_FLOOD) {
2142 ctx->nf_output_iface = NF_OUT_MULTI;
2143 }
2144 }
2145
2146 static void
2147 xlate_nicira_action(struct action_xlate_ctx *ctx,
2148 const struct nx_action_header *nah)
2149 {
2150 const struct nx_action_resubmit *nar;
2151 const struct nx_action_set_tunnel *nast;
2152 union odp_action *oa;
2153 int subtype = ntohs(nah->subtype);
2154
2155 assert(nah->vendor == htonl(NX_VENDOR_ID));
2156 switch (subtype) {
2157 case NXAST_RESUBMIT:
2158 nar = (const struct nx_action_resubmit *) nah;
2159 xlate_table_action(ctx, ofp_port_to_odp_port(ntohs(nar->in_port)));
2160 break;
2161
2162 case NXAST_SET_TUNNEL:
2163 nast = (const struct nx_action_set_tunnel *) nah;
2164 oa = odp_actions_add(ctx->out, ODPAT_SET_TUNNEL);
2165 ctx->flow.tun_id = oa->tunnel.tun_id = nast->tun_id;
2166 break;
2167
2168 /* If you add a new action here that modifies flow data, don't forget to
2169 * update the flow key in ctx->flow in the same key. */
2170
2171 default:
2172 VLOG_DBG_RL(&rl, "unknown Nicira action type %"PRIu16, subtype);
2173 break;
2174 }
2175 }
2176
2177 static void
2178 do_xlate_actions(const union ofp_action *in, size_t n_in,
2179 struct action_xlate_ctx *ctx)
2180 {
2181 struct actions_iterator iter;
2182 const union ofp_action *ia;
2183 const struct ofport *port;
2184
2185 port = port_array_get(&ctx->ofproto->ports, ctx->flow.in_port);
2186 if (port && port->opp.config & (OFPPC_NO_RECV | OFPPC_NO_RECV_STP) &&
2187 port->opp.config & (eth_addr_equals(ctx->flow.dl_dst, stp_eth_addr)
2188 ? OFPPC_NO_RECV_STP : OFPPC_NO_RECV)) {
2189 /* Drop this flow. */
2190 return;
2191 }
2192
2193 for (ia = actions_first(&iter, in, n_in); ia; ia = actions_next(&iter)) {
2194 uint16_t type = ntohs(ia->type);
2195 union odp_action *oa;
2196
2197 switch (type) {
2198 case OFPAT_OUTPUT:
2199 xlate_output_action(ctx, &ia->output);
2200 break;
2201
2202 case OFPAT_SET_VLAN_VID:
2203 oa = odp_actions_add(ctx->out, ODPAT_SET_VLAN_VID);
2204 ctx->flow.dl_vlan = oa->vlan_vid.vlan_vid = ia->vlan_vid.vlan_vid;
2205 break;
2206
2207 case OFPAT_SET_VLAN_PCP:
2208 oa = odp_actions_add(ctx->out, ODPAT_SET_VLAN_PCP);
2209 ctx->flow.dl_vlan_pcp = oa->vlan_pcp.vlan_pcp = ia->vlan_pcp.vlan_pcp;
2210 break;
2211
2212 case OFPAT_STRIP_VLAN:
2213 odp_actions_add(ctx->out, ODPAT_STRIP_VLAN);
2214 ctx->flow.dl_vlan = OFP_VLAN_NONE;
2215 ctx->flow.dl_vlan_pcp = 0;
2216 break;
2217
2218 case OFPAT_SET_DL_SRC:
2219 oa = odp_actions_add(ctx->out, ODPAT_SET_DL_SRC);
2220 memcpy(oa->dl_addr.dl_addr,
2221 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
2222 memcpy(ctx->flow.dl_src,
2223 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
2224 break;
2225
2226 case OFPAT_SET_DL_DST:
2227 oa = odp_actions_add(ctx->out, ODPAT_SET_DL_DST);
2228 memcpy(oa->dl_addr.dl_addr,
2229 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
2230 memcpy(ctx->flow.dl_dst,
2231 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
2232 break;
2233
2234 case OFPAT_SET_NW_SRC:
2235 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_SRC);
2236 ctx->flow.nw_src = oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
2237 break;
2238
2239 case OFPAT_SET_NW_DST:
2240 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_DST);
2241 ctx->flow.nw_dst = oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
2242 break;
2243
2244 case OFPAT_SET_NW_TOS:
2245 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_TOS);
2246 ctx->flow.nw_tos = oa->nw_tos.nw_tos = ia->nw_tos.nw_tos;
2247 break;
2248
2249 case OFPAT_SET_TP_SRC:
2250 oa = odp_actions_add(ctx->out, ODPAT_SET_TP_SRC);
2251 ctx->flow.tp_src = oa->tp_port.tp_port = ia->tp_port.tp_port;
2252 break;
2253
2254 case OFPAT_SET_TP_DST:
2255 oa = odp_actions_add(ctx->out, ODPAT_SET_TP_DST);
2256 ctx->flow.tp_dst = oa->tp_port.tp_port = ia->tp_port.tp_port;
2257 break;
2258
2259 case OFPAT_VENDOR:
2260 xlate_nicira_action(ctx, (const struct nx_action_header *) ia);
2261 break;
2262
2263 default:
2264 VLOG_DBG_RL(&rl, "unknown action type %"PRIu16, type);
2265 break;
2266 }
2267 }
2268 }
2269
2270 static int
2271 xlate_actions(const union ofp_action *in, size_t n_in,
2272 const flow_t *flow, struct ofproto *ofproto,
2273 const struct ofpbuf *packet,
2274 struct odp_actions *out, tag_type *tags, bool *may_set_up_flow,
2275 uint16_t *nf_output_iface)
2276 {
2277 tag_type no_tags = 0;
2278 struct action_xlate_ctx ctx;
2279 COVERAGE_INC(ofproto_ofp2odp);
2280 odp_actions_init(out);
2281 ctx.flow = *flow;
2282 ctx.recurse = 0;
2283 ctx.ofproto = ofproto;
2284 ctx.packet = packet;
2285 ctx.out = out;
2286 ctx.tags = tags ? tags : &no_tags;
2287 ctx.may_set_up_flow = true;
2288 ctx.nf_output_iface = NF_OUT_DROP;
2289 do_xlate_actions(in, n_in, &ctx);
2290
2291 /* Check with in-band control to see if we're allowed to set up this
2292 * flow. */
2293 if (!in_band_rule_check(ofproto->in_band, flow, out)) {
2294 ctx.may_set_up_flow = false;
2295 }
2296
2297 if (may_set_up_flow) {
2298 *may_set_up_flow = ctx.may_set_up_flow;
2299 }
2300 if (nf_output_iface) {
2301 *nf_output_iface = ctx.nf_output_iface;
2302 }
2303 if (odp_actions_overflow(out)) {
2304 odp_actions_init(out);
2305 return ofp_mkerr(OFPET_BAD_ACTION, OFPBAC_TOO_MANY);
2306 }
2307 return 0;
2308 }
2309
2310 static int
2311 handle_packet_out(struct ofproto *p, struct ofconn *ofconn,
2312 struct ofp_header *oh)
2313 {
2314 struct ofp_packet_out *opo;
2315 struct ofpbuf payload, *buffer;
2316 struct odp_actions actions;
2317 int n_actions;
2318 uint16_t in_port;
2319 flow_t flow;
2320 int error;
2321
2322 error = check_ofp_packet_out(oh, &payload, &n_actions, p->max_ports);
2323 if (error) {
2324 return error;
2325 }
2326 opo = (struct ofp_packet_out *) oh;
2327
2328 COVERAGE_INC(ofproto_packet_out);
2329 if (opo->buffer_id != htonl(UINT32_MAX)) {
2330 error = pktbuf_retrieve(ofconn->pktbuf, ntohl(opo->buffer_id),
2331 &buffer, &in_port);
2332 if (error || !buffer) {
2333 return error;
2334 }
2335 payload = *buffer;
2336 } else {
2337 buffer = NULL;
2338 }
2339
2340 flow_extract(&payload, 0, ofp_port_to_odp_port(ntohs(opo->in_port)), &flow);
2341 error = xlate_actions((const union ofp_action *) opo->actions, n_actions,
2342 &flow, p, &payload, &actions, NULL, NULL, NULL);
2343 if (error) {
2344 return error;
2345 }
2346
2347 dpif_execute(p->dpif, flow.in_port, actions.actions, actions.n_actions,
2348 &payload);
2349 ofpbuf_delete(buffer);
2350
2351 return 0;
2352 }
2353
2354 static void
2355 update_port_config(struct ofproto *p, struct ofport *port,
2356 uint32_t config, uint32_t mask)
2357 {
2358 mask &= config ^ port->opp.config;
2359 if (mask & OFPPC_PORT_DOWN) {
2360 if (config & OFPPC_PORT_DOWN) {
2361 netdev_turn_flags_off(port->netdev, NETDEV_UP, true);
2362 } else {
2363 netdev_turn_flags_on(port->netdev, NETDEV_UP, true);
2364 }
2365 }
2366 #define REVALIDATE_BITS (OFPPC_NO_RECV | OFPPC_NO_RECV_STP | OFPPC_NO_FWD)
2367 if (mask & REVALIDATE_BITS) {
2368 COVERAGE_INC(ofproto_costly_flags);
2369 port->opp.config ^= mask & REVALIDATE_BITS;
2370 p->need_revalidate = true;
2371 }
2372 #undef REVALIDATE_BITS
2373 if (mask & OFPPC_NO_FLOOD) {
2374 port->opp.config ^= OFPPC_NO_FLOOD;
2375 refresh_port_groups(p);
2376 }
2377 if (mask & OFPPC_NO_PACKET_IN) {
2378 port->opp.config ^= OFPPC_NO_PACKET_IN;
2379 }
2380 }
2381
2382 static int
2383 handle_port_mod(struct ofproto *p, struct ofp_header *oh)
2384 {
2385 const struct ofp_port_mod *opm;
2386 struct ofport *port;
2387 int error;
2388
2389 error = check_ofp_message(oh, OFPT_PORT_MOD, sizeof *opm);
2390 if (error) {
2391 return error;
2392 }
2393 opm = (struct ofp_port_mod *) oh;
2394
2395 port = port_array_get(&p->ports,
2396 ofp_port_to_odp_port(ntohs(opm->port_no)));
2397 if (!port) {
2398 return ofp_mkerr(OFPET_PORT_MOD_FAILED, OFPPMFC_BAD_PORT);
2399 } else if (memcmp(port->opp.hw_addr, opm->hw_addr, OFP_ETH_ALEN)) {
2400 return ofp_mkerr(OFPET_PORT_MOD_FAILED, OFPPMFC_BAD_HW_ADDR);
2401 } else {
2402 update_port_config(p, port, ntohl(opm->config), ntohl(opm->mask));
2403 if (opm->advertise) {
2404 netdev_set_advertisements(port->netdev, ntohl(opm->advertise));
2405 }
2406 }
2407 return 0;
2408 }
2409
2410 static struct ofpbuf *
2411 make_stats_reply(uint32_t xid, uint16_t type, size_t body_len)
2412 {
2413 struct ofp_stats_reply *osr;
2414 struct ofpbuf *msg;
2415
2416 msg = ofpbuf_new(MIN(sizeof *osr + body_len, UINT16_MAX));
2417 osr = put_openflow_xid(sizeof *osr, OFPT_STATS_REPLY, xid, msg);
2418 osr->type = type;
2419 osr->flags = htons(0);
2420 return msg;
2421 }
2422
2423 static struct ofpbuf *
2424 start_stats_reply(const struct ofp_stats_request *request, size_t body_len)
2425 {
2426 return make_stats_reply(request->header.xid, request->type, body_len);
2427 }
2428
2429 static void *
2430 append_stats_reply(size_t nbytes, struct ofconn *ofconn, struct ofpbuf **msgp)
2431 {
2432 struct ofpbuf *msg = *msgp;
2433 assert(nbytes <= UINT16_MAX - sizeof(struct ofp_stats_reply));
2434 if (nbytes + msg->size > UINT16_MAX) {
2435 struct ofp_stats_reply *reply = msg->data;
2436 reply->flags = htons(OFPSF_REPLY_MORE);
2437 *msgp = make_stats_reply(reply->header.xid, reply->type, nbytes);
2438 queue_tx(msg, ofconn, ofconn->reply_counter);
2439 }
2440 return ofpbuf_put_uninit(*msgp, nbytes);
2441 }
2442
2443 static int
2444 handle_desc_stats_request(struct ofproto *p, struct ofconn *ofconn,
2445 struct ofp_stats_request *request)
2446 {
2447 struct ofp_desc_stats *ods;
2448 struct ofpbuf *msg;
2449
2450 msg = start_stats_reply(request, sizeof *ods);
2451 ods = append_stats_reply(sizeof *ods, ofconn, &msg);
2452 memset(ods, 0, sizeof *ods);
2453 ovs_strlcpy(ods->mfr_desc, p->mfr_desc, sizeof ods->mfr_desc);
2454 ovs_strlcpy(ods->hw_desc, p->hw_desc, sizeof ods->hw_desc);
2455 ovs_strlcpy(ods->sw_desc, p->sw_desc, sizeof ods->sw_desc);
2456 ovs_strlcpy(ods->serial_num, p->serial_desc, sizeof ods->serial_num);
2457 ovs_strlcpy(ods->dp_desc, p->dp_desc, sizeof ods->dp_desc);
2458 queue_tx(msg, ofconn, ofconn->reply_counter);
2459
2460 return 0;
2461 }
2462
2463 static void
2464 count_subrules(struct cls_rule *cls_rule, void *n_subrules_)
2465 {
2466 struct rule *rule = rule_from_cls_rule(cls_rule);
2467 int *n_subrules = n_subrules_;
2468
2469 if (rule->super) {
2470 (*n_subrules)++;
2471 }
2472 }
2473
2474 static int
2475 handle_table_stats_request(struct ofproto *p, struct ofconn *ofconn,
2476 struct ofp_stats_request *request)
2477 {
2478 struct ofp_table_stats *ots;
2479 struct ofpbuf *msg;
2480 struct odp_stats dpstats;
2481 int n_exact, n_subrules, n_wild;
2482
2483 msg = start_stats_reply(request, sizeof *ots * 2);
2484
2485 /* Count rules of various kinds. */
2486 n_subrules = 0;
2487 classifier_for_each(&p->cls, CLS_INC_EXACT, count_subrules, &n_subrules);
2488 n_exact = classifier_count_exact(&p->cls) - n_subrules;
2489 n_wild = classifier_count(&p->cls) - classifier_count_exact(&p->cls);
2490
2491 /* Hash table. */
2492 dpif_get_dp_stats(p->dpif, &dpstats);
2493 ots = append_stats_reply(sizeof *ots, ofconn, &msg);
2494 memset(ots, 0, sizeof *ots);
2495 ots->table_id = TABLEID_HASH;
2496 strcpy(ots->name, "hash");
2497 ots->wildcards = htonl(0);
2498 ots->max_entries = htonl(dpstats.max_capacity);
2499 ots->active_count = htonl(n_exact);
2500 ots->lookup_count = htonll(dpstats.n_frags + dpstats.n_hit +
2501 dpstats.n_missed);
2502 ots->matched_count = htonll(dpstats.n_hit); /* XXX */
2503
2504 /* Classifier table. */
2505 ots = append_stats_reply(sizeof *ots, ofconn, &msg);
2506 memset(ots, 0, sizeof *ots);
2507 ots->table_id = TABLEID_CLASSIFIER;
2508 strcpy(ots->name, "classifier");
2509 ots->wildcards = p->tun_id_from_cookie ? htonl(OVSFW_ALL)
2510 : htonl(OFPFW_ALL);
2511 ots->max_entries = htonl(65536);
2512 ots->active_count = htonl(n_wild);
2513 ots->lookup_count = htonll(0); /* XXX */
2514 ots->matched_count = htonll(0); /* XXX */
2515
2516 queue_tx(msg, ofconn, ofconn->reply_counter);
2517 return 0;
2518 }
2519
2520 static void
2521 append_port_stat(struct ofport *port, uint16_t port_no, struct ofconn *ofconn,
2522 struct ofpbuf *msg)
2523 {
2524 struct netdev_stats stats;
2525 struct ofp_port_stats *ops;
2526
2527 /* Intentionally ignore return value, since errors will set
2528 * 'stats' to all-1s, which is correct for OpenFlow, and
2529 * netdev_get_stats() will log errors. */
2530 netdev_get_stats(port->netdev, &stats);
2531
2532 ops = append_stats_reply(sizeof *ops, ofconn, &msg);
2533 ops->port_no = htons(odp_port_to_ofp_port(port_no));
2534 memset(ops->pad, 0, sizeof ops->pad);
2535 ops->rx_packets = htonll(stats.rx_packets);
2536 ops->tx_packets = htonll(stats.tx_packets);
2537 ops->rx_bytes = htonll(stats.rx_bytes);
2538 ops->tx_bytes = htonll(stats.tx_bytes);
2539 ops->rx_dropped = htonll(stats.rx_dropped);
2540 ops->tx_dropped = htonll(stats.tx_dropped);
2541 ops->rx_errors = htonll(stats.rx_errors);
2542 ops->tx_errors = htonll(stats.tx_errors);
2543 ops->rx_frame_err = htonll(stats.rx_frame_errors);
2544 ops->rx_over_err = htonll(stats.rx_over_errors);
2545 ops->rx_crc_err = htonll(stats.rx_crc_errors);
2546 ops->collisions = htonll(stats.collisions);
2547 }
2548
2549 static int
2550 handle_port_stats_request(struct ofproto *p, struct ofconn *ofconn,
2551 struct ofp_stats_request *osr,
2552 size_t arg_size)
2553 {
2554 struct ofp_port_stats_request *psr;
2555 struct ofp_port_stats *ops;
2556 struct ofpbuf *msg;
2557 struct ofport *port;
2558 unsigned int port_no;
2559
2560 if (arg_size != sizeof *psr) {
2561 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
2562 }
2563 psr = (struct ofp_port_stats_request *) osr->body;
2564
2565 msg = start_stats_reply(osr, sizeof *ops * 16);
2566 if (psr->port_no != htons(OFPP_NONE)) {
2567 port = port_array_get(&p->ports,
2568 ofp_port_to_odp_port(ntohs(psr->port_no)));
2569 if (port) {
2570 append_port_stat(port, ntohs(psr->port_no), ofconn, msg);
2571 }
2572 } else {
2573 PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) {
2574 append_port_stat(port, port_no, ofconn, msg);
2575 }
2576 }
2577
2578 queue_tx(msg, ofconn, ofconn->reply_counter);
2579 return 0;
2580 }
2581
2582 struct flow_stats_cbdata {
2583 struct ofproto *ofproto;
2584 struct ofconn *ofconn;
2585 uint16_t out_port;
2586 struct ofpbuf *msg;
2587 };
2588
2589 /* Obtains statistic counters for 'rule' within 'p' and stores them into
2590 * '*packet_countp' and '*byte_countp'. If 'rule' is a wildcarded rule, the
2591 * returned statistic include statistics for all of 'rule''s subrules. */
2592 static void
2593 query_stats(struct ofproto *p, struct rule *rule,
2594 uint64_t *packet_countp, uint64_t *byte_countp)
2595 {
2596 uint64_t packet_count, byte_count;
2597 struct rule *subrule;
2598 struct odp_flow *odp_flows;
2599 size_t n_odp_flows;
2600
2601 /* Start from historical data for 'rule' itself that are no longer tracked
2602 * by the datapath. This counts, for example, subrules that have
2603 * expired. */
2604 packet_count = rule->packet_count;
2605 byte_count = rule->byte_count;
2606
2607 /* Prepare to ask the datapath for statistics on 'rule', or if it is
2608 * wildcarded then on all of its subrules.
2609 *
2610 * Also, add any statistics that are not tracked by the datapath for each
2611 * subrule. This includes, for example, statistics for packets that were
2612 * executed "by hand" by ofproto via dpif_execute() but must be accounted
2613 * to a flow. */
2614 n_odp_flows = rule->cr.wc.wildcards ? list_size(&rule->list) : 1;
2615 odp_flows = xzalloc(n_odp_flows * sizeof *odp_flows);
2616 if (rule->cr.wc.wildcards) {
2617 size_t i = 0;
2618 LIST_FOR_EACH (subrule, struct rule, list, &rule->list) {
2619 odp_flows[i++].key = subrule->cr.flow;
2620 packet_count += subrule->packet_count;
2621 byte_count += subrule->byte_count;
2622 }
2623 } else {
2624 odp_flows[0].key = rule->cr.flow;
2625 }
2626
2627 /* Fetch up-to-date statistics from the datapath and add them in. */
2628 if (!dpif_flow_get_multiple(p->dpif, odp_flows, n_odp_flows)) {
2629 size_t i;
2630 for (i = 0; i < n_odp_flows; i++) {
2631 struct odp_flow *odp_flow = &odp_flows[i];
2632 packet_count += odp_flow->stats.n_packets;
2633 byte_count += odp_flow->stats.n_bytes;
2634 }
2635 }
2636 free(odp_flows);
2637
2638 /* Return the stats to the caller. */
2639 *packet_countp = packet_count;
2640 *byte_countp = byte_count;
2641 }
2642
2643 static void
2644 flow_stats_cb(struct cls_rule *rule_, void *cbdata_)
2645 {
2646 struct rule *rule = rule_from_cls_rule(rule_);
2647 struct flow_stats_cbdata *cbdata = cbdata_;
2648 struct ofp_flow_stats *ofs;
2649 uint64_t packet_count, byte_count;
2650 size_t act_len, len;
2651 long long int tdiff = time_msec() - rule->created;
2652 uint32_t sec = tdiff / 1000;
2653 uint32_t msec = tdiff - (sec * 1000);
2654
2655 if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) {
2656 return;
2657 }
2658
2659 act_len = sizeof *rule->actions * rule->n_actions;
2660 len = offsetof(struct ofp_flow_stats, actions) + act_len;
2661
2662 query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
2663
2664 ofs = append_stats_reply(len, cbdata->ofconn, &cbdata->msg);
2665 ofs->length = htons(len);
2666 ofs->table_id = rule->cr.wc.wildcards ? TABLEID_CLASSIFIER : TABLEID_HASH;
2667 ofs->pad = 0;
2668 flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards,
2669 cbdata->ofproto->tun_id_from_cookie, &ofs->match);
2670 ofs->duration_sec = htonl(sec);
2671 ofs->duration_nsec = htonl(msec * 1000000);
2672 ofs->cookie = rule->flow_cookie;
2673 ofs->priority = htons(rule->cr.priority);
2674 ofs->idle_timeout = htons(rule->idle_timeout);
2675 ofs->hard_timeout = htons(rule->hard_timeout);
2676 memset(ofs->pad2, 0, sizeof ofs->pad2);
2677 ofs->packet_count = htonll(packet_count);
2678 ofs->byte_count = htonll(byte_count);
2679 memcpy(ofs->actions, rule->actions, act_len);
2680 }
2681
2682 static int
2683 table_id_to_include(uint8_t table_id)
2684 {
2685 return (table_id == TABLEID_HASH ? CLS_INC_EXACT
2686 : table_id == TABLEID_CLASSIFIER ? CLS_INC_WILD
2687 : table_id == 0xff ? CLS_INC_ALL
2688 : 0);
2689 }
2690
2691 static int
2692 handle_flow_stats_request(struct ofproto *p, struct ofconn *ofconn,
2693 const struct ofp_stats_request *osr,
2694 size_t arg_size)
2695 {
2696 struct ofp_flow_stats_request *fsr;
2697 struct flow_stats_cbdata cbdata;
2698 struct cls_rule target;
2699
2700 if (arg_size != sizeof *fsr) {
2701 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
2702 }
2703 fsr = (struct ofp_flow_stats_request *) osr->body;
2704
2705 COVERAGE_INC(ofproto_flows_req);
2706 cbdata.ofproto = p;
2707 cbdata.ofconn = ofconn;
2708 cbdata.out_port = fsr->out_port;
2709 cbdata.msg = start_stats_reply(osr, 1024);
2710 cls_rule_from_match(&fsr->match, 0, false, 0, &target);
2711 classifier_for_each_match(&p->cls, &target,
2712 table_id_to_include(fsr->table_id),
2713 flow_stats_cb, &cbdata);
2714 queue_tx(cbdata.msg, ofconn, ofconn->reply_counter);
2715 return 0;
2716 }
2717
2718 struct flow_stats_ds_cbdata {
2719 struct ofproto *ofproto;
2720 struct ds *results;
2721 };
2722
2723 static void
2724 flow_stats_ds_cb(struct cls_rule *rule_, void *cbdata_)
2725 {
2726 struct rule *rule = rule_from_cls_rule(rule_);
2727 struct flow_stats_ds_cbdata *cbdata = cbdata_;
2728 struct ds *results = cbdata->results;
2729 struct ofp_match match;
2730 uint64_t packet_count, byte_count;
2731 size_t act_len = sizeof *rule->actions * rule->n_actions;
2732
2733 /* Don't report on subrules. */
2734 if (rule->super != NULL) {
2735 return;
2736 }
2737
2738 query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
2739 flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards,
2740 cbdata->ofproto->tun_id_from_cookie, &match);
2741
2742 ds_put_format(results, "duration=%llds, ",
2743 (time_msec() - rule->created) / 1000);
2744 ds_put_format(results, "priority=%u, ", rule->cr.priority);
2745 ds_put_format(results, "n_packets=%"PRIu64", ", packet_count);
2746 ds_put_format(results, "n_bytes=%"PRIu64", ", byte_count);
2747 ofp_print_match(results, &match, true);
2748 ofp_print_actions(results, &rule->actions->header, act_len);
2749 ds_put_cstr(results, "\n");
2750 }
2751
2752 /* Adds a pretty-printed description of all flows to 'results', including
2753 * those marked hidden by secchan (e.g., by in-band control). */
2754 void
2755 ofproto_get_all_flows(struct ofproto *p, struct ds *results)
2756 {
2757 struct ofp_match match;
2758 struct cls_rule target;
2759 struct flow_stats_ds_cbdata cbdata;
2760
2761 memset(&match, 0, sizeof match);
2762 match.wildcards = htonl(OVSFW_ALL);
2763
2764 cbdata.ofproto = p;
2765 cbdata.results = results;
2766
2767 cls_rule_from_match(&match, 0, false, 0, &target);
2768 classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
2769 flow_stats_ds_cb, &cbdata);
2770 }
2771
2772 struct aggregate_stats_cbdata {
2773 struct ofproto *ofproto;
2774 uint16_t out_port;
2775 uint64_t packet_count;
2776 uint64_t byte_count;
2777 uint32_t n_flows;
2778 };
2779
2780 static void
2781 aggregate_stats_cb(struct cls_rule *rule_, void *cbdata_)
2782 {
2783 struct rule *rule = rule_from_cls_rule(rule_);
2784 struct aggregate_stats_cbdata *cbdata = cbdata_;
2785 uint64_t packet_count, byte_count;
2786
2787 if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) {
2788 return;
2789 }
2790
2791 query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
2792
2793 cbdata->packet_count += packet_count;
2794 cbdata->byte_count += byte_count;
2795 cbdata->n_flows++;
2796 }
2797
2798 static int
2799 handle_aggregate_stats_request(struct ofproto *p, struct ofconn *ofconn,
2800 const struct ofp_stats_request *osr,
2801 size_t arg_size)
2802 {
2803 struct ofp_aggregate_stats_request *asr;
2804 struct ofp_aggregate_stats_reply *reply;
2805 struct aggregate_stats_cbdata cbdata;
2806 struct cls_rule target;
2807 struct ofpbuf *msg;
2808
2809 if (arg_size != sizeof *asr) {
2810 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
2811 }
2812 asr = (struct ofp_aggregate_stats_request *) osr->body;
2813
2814 COVERAGE_INC(ofproto_agg_request);
2815 cbdata.ofproto = p;
2816 cbdata.out_port = asr->out_port;
2817 cbdata.packet_count = 0;
2818 cbdata.byte_count = 0;
2819 cbdata.n_flows = 0;
2820 cls_rule_from_match(&asr->match, 0, false, 0, &target);
2821 classifier_for_each_match(&p->cls, &target,
2822 table_id_to_include(asr->table_id),
2823 aggregate_stats_cb, &cbdata);
2824
2825 msg = start_stats_reply(osr, sizeof *reply);
2826 reply = append_stats_reply(sizeof *reply, ofconn, &msg);
2827 reply->flow_count = htonl(cbdata.n_flows);
2828 reply->packet_count = htonll(cbdata.packet_count);
2829 reply->byte_count = htonll(cbdata.byte_count);
2830 queue_tx(msg, ofconn, ofconn->reply_counter);
2831 return 0;
2832 }
2833
2834 static int
2835 handle_stats_request(struct ofproto *p, struct ofconn *ofconn,
2836 struct ofp_header *oh)
2837 {
2838 struct ofp_stats_request *osr;
2839 size_t arg_size;
2840 int error;
2841
2842 error = check_ofp_message_array(oh, OFPT_STATS_REQUEST, sizeof *osr,
2843 1, &arg_size);
2844 if (error) {
2845 return error;
2846 }
2847 osr = (struct ofp_stats_request *) oh;
2848
2849 switch (ntohs(osr->type)) {
2850 case OFPST_DESC:
2851 return handle_desc_stats_request(p, ofconn, osr);
2852
2853 case OFPST_FLOW:
2854 return handle_flow_stats_request(p, ofconn, osr, arg_size);
2855
2856 case OFPST_AGGREGATE:
2857 return handle_aggregate_stats_request(p, ofconn, osr, arg_size);
2858
2859 case OFPST_TABLE:
2860 return handle_table_stats_request(p, ofconn, osr);
2861
2862 case OFPST_PORT:
2863 return handle_port_stats_request(p, ofconn, osr, arg_size);
2864
2865 case OFPST_VENDOR:
2866 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR);
2867
2868 default:
2869 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_STAT);
2870 }
2871 }
2872
2873 static long long int
2874 msec_from_nsec(uint64_t sec, uint32_t nsec)
2875 {
2876 return !sec ? 0 : sec * 1000 + nsec / 1000000;
2877 }
2878
2879 static void
2880 update_time(struct ofproto *ofproto, struct rule *rule,
2881 const struct odp_flow_stats *stats)
2882 {
2883 long long int used = msec_from_nsec(stats->used_sec, stats->used_nsec);
2884 if (used > rule->used) {
2885 rule->used = used;
2886 if (rule->super && used > rule->super->used) {
2887 rule->super->used = used;
2888 }
2889 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, used);
2890 }
2891 }
2892
2893 static void
2894 update_stats(struct ofproto *ofproto, struct rule *rule,
2895 const struct odp_flow_stats *stats)
2896 {
2897 if (stats->n_packets) {
2898 update_time(ofproto, rule, stats);
2899 rule->packet_count += stats->n_packets;
2900 rule->byte_count += stats->n_bytes;
2901 netflow_flow_update_flags(&rule->nf_flow, stats->ip_tos,
2902 stats->tcp_flags);
2903 }
2904 }
2905
2906 /* Implements OFPFC_ADD and the cases for OFPFC_MODIFY and OFPFC_MODIFY_STRICT
2907 * in which no matching flow already exists in the flow table.
2908 *
2909 * Adds the flow specified by 'ofm', which is followed by 'n_actions'
2910 * ofp_actions, to 'p''s flow table. Returns 0 on success or an OpenFlow error
2911 * code as encoded by ofp_mkerr() on failure.
2912 *
2913 * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
2914 * if any. */
2915 static int
2916 add_flow(struct ofproto *p, struct ofconn *ofconn,
2917 const struct ofp_flow_mod *ofm, size_t n_actions)
2918 {
2919 struct ofpbuf *packet;
2920 struct rule *rule;
2921 uint16_t in_port;
2922 int error;
2923
2924 if (ofm->flags & htons(OFPFF_CHECK_OVERLAP)) {
2925 flow_t flow;
2926 uint32_t wildcards;
2927
2928 flow_from_match(&ofm->match, p->tun_id_from_cookie, ofm->cookie,
2929 &flow, &wildcards);
2930 if (classifier_rule_overlaps(&p->cls, &flow, wildcards,
2931 ntohs(ofm->priority))) {
2932 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_OVERLAP);
2933 }
2934 }
2935
2936 rule = rule_create(p, NULL, (const union ofp_action *) ofm->actions,
2937 n_actions, ntohs(ofm->idle_timeout),
2938 ntohs(ofm->hard_timeout), ofm->cookie,
2939 ofm->flags & htons(OFPFF_SEND_FLOW_REM));
2940 cls_rule_from_match(&ofm->match, ntohs(ofm->priority),
2941 p->tun_id_from_cookie, ofm->cookie, &rule->cr);
2942
2943 error = 0;
2944 if (ofm->buffer_id != htonl(UINT32_MAX)) {
2945 error = pktbuf_retrieve(ofconn->pktbuf, ntohl(ofm->buffer_id),
2946 &packet, &in_port);
2947 } else {
2948 packet = NULL;
2949 in_port = UINT16_MAX;
2950 }
2951
2952 rule_insert(p, rule, packet, in_port);
2953 ofpbuf_delete(packet);
2954 return error;
2955 }
2956
2957 static struct rule *
2958 find_flow_strict(struct ofproto *p, const struct ofp_flow_mod *ofm)
2959 {
2960 uint32_t wildcards;
2961 flow_t flow;
2962
2963 flow_from_match(&ofm->match, p->tun_id_from_cookie, ofm->cookie,
2964 &flow, &wildcards);
2965 return rule_from_cls_rule(classifier_find_rule_exactly(
2966 &p->cls, &flow, wildcards,
2967 ntohs(ofm->priority)));
2968 }
2969
2970 static int
2971 send_buffered_packet(struct ofproto *ofproto, struct ofconn *ofconn,
2972 struct rule *rule, const struct ofp_flow_mod *ofm)
2973 {
2974 struct ofpbuf *packet;
2975 uint16_t in_port;
2976 flow_t flow;
2977 int error;
2978
2979 if (ofm->buffer_id == htonl(UINT32_MAX)) {
2980 return 0;
2981 }
2982
2983 error = pktbuf_retrieve(ofconn->pktbuf, ntohl(ofm->buffer_id),
2984 &packet, &in_port);
2985 if (error) {
2986 return error;
2987 }
2988
2989 flow_extract(packet, 0, in_port, &flow);
2990 rule_execute(ofproto, rule, packet, &flow);
2991 ofpbuf_delete(packet);
2992
2993 return 0;
2994 }
2995 \f
2996 /* OFPFC_MODIFY and OFPFC_MODIFY_STRICT. */
2997
2998 struct modify_flows_cbdata {
2999 struct ofproto *ofproto;
3000 const struct ofp_flow_mod *ofm;
3001 size_t n_actions;
3002 struct rule *match;
3003 };
3004
3005 static int modify_flow(struct ofproto *, const struct ofp_flow_mod *,
3006 size_t n_actions, struct rule *);
3007 static void modify_flows_cb(struct cls_rule *, void *cbdata_);
3008
3009 /* Implements OFPFC_MODIFY. Returns 0 on success or an OpenFlow error code as
3010 * encoded by ofp_mkerr() on failure.
3011 *
3012 * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
3013 * if any. */
3014 static int
3015 modify_flows_loose(struct ofproto *p, struct ofconn *ofconn,
3016 const struct ofp_flow_mod *ofm, size_t n_actions)
3017 {
3018 struct modify_flows_cbdata cbdata;
3019 struct cls_rule target;
3020
3021 cbdata.ofproto = p;
3022 cbdata.ofm = ofm;
3023 cbdata.n_actions = n_actions;
3024 cbdata.match = NULL;
3025
3026 cls_rule_from_match(&ofm->match, 0, p->tun_id_from_cookie, ofm->cookie,
3027 &target);
3028
3029 classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
3030 modify_flows_cb, &cbdata);
3031 if (cbdata.match) {
3032 /* This credits the packet to whichever flow happened to happened to
3033 * match last. That's weird. Maybe we should do a lookup for the
3034 * flow that actually matches the packet? Who knows. */
3035 send_buffered_packet(p, ofconn, cbdata.match, ofm);
3036 return 0;
3037 } else {
3038 return add_flow(p, ofconn, ofm, n_actions);
3039 }
3040 }
3041
3042 /* Implements OFPFC_MODIFY_STRICT. Returns 0 on success or an OpenFlow error
3043 * code as encoded by ofp_mkerr() on failure.
3044 *
3045 * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
3046 * if any. */
3047 static int
3048 modify_flow_strict(struct ofproto *p, struct ofconn *ofconn,
3049 struct ofp_flow_mod *ofm, size_t n_actions)
3050 {
3051 struct rule *rule = find_flow_strict(p, ofm);
3052 if (rule && !rule_is_hidden(rule)) {
3053 modify_flow(p, ofm, n_actions, rule);
3054 return send_buffered_packet(p, ofconn, rule, ofm);
3055 } else {
3056 return add_flow(p, ofconn, ofm, n_actions);
3057 }
3058 }
3059
3060 /* Callback for modify_flows_loose(). */
3061 static void
3062 modify_flows_cb(struct cls_rule *rule_, void *cbdata_)
3063 {
3064 struct rule *rule = rule_from_cls_rule(rule_);
3065 struct modify_flows_cbdata *cbdata = cbdata_;
3066
3067 if (!rule_is_hidden(rule)) {
3068 cbdata->match = rule;
3069 modify_flow(cbdata->ofproto, cbdata->ofm, cbdata->n_actions, rule);
3070 }
3071 }
3072
3073 /* Implements core of OFPFC_MODIFY and OFPFC_MODIFY_STRICT where 'rule' has
3074 * been identified as a flow in 'p''s flow table to be modified, by changing
3075 * the rule's actions to match those in 'ofm' (which is followed by 'n_actions'
3076 * ofp_action[] structures). */
3077 static int
3078 modify_flow(struct ofproto *p, const struct ofp_flow_mod *ofm,
3079 size_t n_actions, struct rule *rule)
3080 {
3081 size_t actions_len = n_actions * sizeof *rule->actions;
3082
3083 rule->flow_cookie = ofm->cookie;
3084
3085 /* If the actions are the same, do nothing. */
3086 if (n_actions == rule->n_actions
3087 && !memcmp(ofm->actions, rule->actions, actions_len))
3088 {
3089 return 0;
3090 }
3091
3092 /* Replace actions. */
3093 free(rule->actions);
3094 rule->actions = xmemdup(ofm->actions, actions_len);
3095 rule->n_actions = n_actions;
3096
3097 /* Make sure that the datapath gets updated properly. */
3098 if (rule->cr.wc.wildcards) {
3099 COVERAGE_INC(ofproto_mod_wc_flow);
3100 p->need_revalidate = true;
3101 } else {
3102 rule_update_actions(p, rule);
3103 }
3104
3105 return 0;
3106 }
3107 \f
3108 /* OFPFC_DELETE implementation. */
3109
3110 struct delete_flows_cbdata {
3111 struct ofproto *ofproto;
3112 uint16_t out_port;
3113 };
3114
3115 static void delete_flows_cb(struct cls_rule *, void *cbdata_);
3116 static void delete_flow(struct ofproto *, struct rule *, uint16_t out_port);
3117
3118 /* Implements OFPFC_DELETE. */
3119 static void
3120 delete_flows_loose(struct ofproto *p, const struct ofp_flow_mod *ofm)
3121 {
3122 struct delete_flows_cbdata cbdata;
3123 struct cls_rule target;
3124
3125 cbdata.ofproto = p;
3126 cbdata.out_port = ofm->out_port;
3127
3128 cls_rule_from_match(&ofm->match, 0, p->tun_id_from_cookie, ofm->cookie,
3129 &target);
3130
3131 classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
3132 delete_flows_cb, &cbdata);
3133 }
3134
3135 /* Implements OFPFC_DELETE_STRICT. */
3136 static void
3137 delete_flow_strict(struct ofproto *p, struct ofp_flow_mod *ofm)
3138 {
3139 struct rule *rule = find_flow_strict(p, ofm);
3140 if (rule) {
3141 delete_flow(p, rule, ofm->out_port);
3142 }
3143 }
3144
3145 /* Callback for delete_flows_loose(). */
3146 static void
3147 delete_flows_cb(struct cls_rule *rule_, void *cbdata_)
3148 {
3149 struct rule *rule = rule_from_cls_rule(rule_);
3150 struct delete_flows_cbdata *cbdata = cbdata_;
3151
3152 delete_flow(cbdata->ofproto, rule, cbdata->out_port);
3153 }
3154
3155 /* Implements core of OFPFC_DELETE and OFPFC_DELETE_STRICT where 'rule' has
3156 * been identified as a flow to delete from 'p''s flow table, by deleting the
3157 * flow and sending out a OFPT_FLOW_REMOVED message to any interested
3158 * controller.
3159 *
3160 * Will not delete 'rule' if it is hidden. Will delete 'rule' only if
3161 * 'out_port' is htons(OFPP_NONE) or if 'rule' actually outputs to the
3162 * specified 'out_port'. */
3163 static void
3164 delete_flow(struct ofproto *p, struct rule *rule, uint16_t out_port)
3165 {
3166 if (rule_is_hidden(rule)) {
3167 return;
3168 }
3169
3170 if (out_port != htons(OFPP_NONE) && !rule_has_out_port(rule, out_port)) {
3171 return;
3172 }
3173
3174 send_flow_removed(p, rule, time_msec(), OFPRR_DELETE);
3175 rule_remove(p, rule);
3176 }
3177 \f
3178 static int
3179 handle_flow_mod(struct ofproto *p, struct ofconn *ofconn,
3180 struct ofp_flow_mod *ofm)
3181 {
3182 size_t n_actions;
3183 int error;
3184
3185 error = check_ofp_message_array(&ofm->header, OFPT_FLOW_MOD, sizeof *ofm,
3186 sizeof *ofm->actions, &n_actions);
3187 if (error) {
3188 return error;
3189 }
3190
3191 /* We do not support the emergency flow cache. It will hopefully
3192 * get dropped from OpenFlow in the near future. */
3193 if (ofm->flags & htons(OFPFF_EMERG)) {
3194 /* There isn't a good fit for an error code, so just state that the
3195 * flow table is full. */
3196 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_ALL_TABLES_FULL);
3197 }
3198
3199 normalize_match(&ofm->match);
3200 if (!ofm->match.wildcards) {
3201 ofm->priority = htons(UINT16_MAX);
3202 }
3203
3204 error = validate_actions((const union ofp_action *) ofm->actions,
3205 n_actions, p->max_ports);
3206 if (error) {
3207 return error;
3208 }
3209
3210 switch (ntohs(ofm->command)) {
3211 case OFPFC_ADD:
3212 return add_flow(p, ofconn, ofm, n_actions);
3213
3214 case OFPFC_MODIFY:
3215 return modify_flows_loose(p, ofconn, ofm, n_actions);
3216
3217 case OFPFC_MODIFY_STRICT:
3218 return modify_flow_strict(p, ofconn, ofm, n_actions);
3219
3220 case OFPFC_DELETE:
3221 delete_flows_loose(p, ofm);
3222 return 0;
3223
3224 case OFPFC_DELETE_STRICT:
3225 delete_flow_strict(p, ofm);
3226 return 0;
3227
3228 default:
3229 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_BAD_COMMAND);
3230 }
3231 }
3232
3233 static int
3234 handle_tun_id_from_cookie(struct ofproto *p, struct nxt_tun_id_cookie *msg)
3235 {
3236 int error;
3237
3238 error = check_ofp_message(&msg->header, OFPT_VENDOR, sizeof *msg);
3239 if (error) {
3240 return error;
3241 }
3242
3243 p->tun_id_from_cookie = !!msg->set;
3244 return 0;
3245 }
3246
3247 static int
3248 handle_vendor(struct ofproto *p, struct ofconn *ofconn, void *msg)
3249 {
3250 struct ofp_vendor_header *ovh = msg;
3251 struct nicira_header *nh;
3252
3253 if (ntohs(ovh->header.length) < sizeof(struct ofp_vendor_header)) {
3254 VLOG_WARN_RL(&rl, "received vendor message of length %zu "
3255 "(expected at least %zu)",
3256 ntohs(ovh->header.length), sizeof(struct ofp_vendor_header));
3257 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3258 }
3259 if (ovh->vendor != htonl(NX_VENDOR_ID)) {
3260 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR);
3261 }
3262 if (ntohs(ovh->header.length) < sizeof(struct nicira_header)) {
3263 VLOG_WARN_RL(&rl, "received Nicira vendor message of length %zu "
3264 "(expected at least %zu)",
3265 ntohs(ovh->header.length), sizeof(struct nicira_header));
3266 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3267 }
3268
3269 nh = msg;
3270 switch (ntohl(nh->subtype)) {
3271 case NXT_STATUS_REQUEST:
3272 return switch_status_handle_request(p->switch_status, ofconn->rconn,
3273 msg);
3274
3275 case NXT_TUN_ID_FROM_COOKIE:
3276 return handle_tun_id_from_cookie(p, msg);
3277 }
3278
3279 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE);
3280 }
3281
3282 static int
3283 handle_barrier_request(struct ofconn *ofconn, struct ofp_header *oh)
3284 {
3285 struct ofp_header *ob;
3286 struct ofpbuf *buf;
3287
3288 /* Currently, everything executes synchronously, so we can just
3289 * immediately send the barrier reply. */
3290 ob = make_openflow_xid(sizeof *ob, OFPT_BARRIER_REPLY, oh->xid, &buf);
3291 queue_tx(buf, ofconn, ofconn->reply_counter);
3292 return 0;
3293 }
3294
3295 static void
3296 handle_openflow(struct ofconn *ofconn, struct ofproto *p,
3297 struct ofpbuf *ofp_msg)
3298 {
3299 struct ofp_header *oh = ofp_msg->data;
3300 int error;
3301
3302 COVERAGE_INC(ofproto_recv_openflow);
3303 switch (oh->type) {
3304 case OFPT_ECHO_REQUEST:
3305 error = handle_echo_request(ofconn, oh);
3306 break;
3307
3308 case OFPT_ECHO_REPLY:
3309 error = 0;
3310 break;
3311
3312 case OFPT_FEATURES_REQUEST:
3313 error = handle_features_request(p, ofconn, oh);
3314 break;
3315
3316 case OFPT_GET_CONFIG_REQUEST:
3317 error = handle_get_config_request(p, ofconn, oh);
3318 break;
3319
3320 case OFPT_SET_CONFIG:
3321 error = handle_set_config(p, ofconn, ofp_msg->data);
3322 break;
3323
3324 case OFPT_PACKET_OUT:
3325 error = handle_packet_out(p, ofconn, ofp_msg->data);
3326 break;
3327
3328 case OFPT_PORT_MOD:
3329 error = handle_port_mod(p, oh);
3330 break;
3331
3332 case OFPT_FLOW_MOD:
3333 error = handle_flow_mod(p, ofconn, ofp_msg->data);
3334 break;
3335
3336 case OFPT_STATS_REQUEST:
3337 error = handle_stats_request(p, ofconn, oh);
3338 break;
3339
3340 case OFPT_VENDOR:
3341 error = handle_vendor(p, ofconn, ofp_msg->data);
3342 break;
3343
3344 case OFPT_BARRIER_REQUEST:
3345 error = handle_barrier_request(ofconn, oh);
3346 break;
3347
3348 default:
3349 if (VLOG_IS_WARN_ENABLED()) {
3350 char *s = ofp_to_string(oh, ntohs(oh->length), 2);
3351 VLOG_DBG_RL(&rl, "OpenFlow message ignored: %s", s);
3352 free(s);
3353 }
3354 error = ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_TYPE);
3355 break;
3356 }
3357
3358 if (error) {
3359 send_error_oh(ofconn, ofp_msg->data, error);
3360 }
3361 }
3362 \f
3363 static void
3364 handle_odp_miss_msg(struct ofproto *p, struct ofpbuf *packet)
3365 {
3366 struct odp_msg *msg = packet->data;
3367 uint16_t in_port = odp_port_to_ofp_port(msg->port);
3368 struct rule *rule;
3369 struct ofpbuf payload;
3370 flow_t flow;
3371
3372 payload.data = msg + 1;
3373 payload.size = msg->length - sizeof *msg;
3374 flow_extract(&payload, msg->arg, msg->port, &flow);
3375
3376 /* Check with in-band control to see if this packet should be sent
3377 * to the local port regardless of the flow table. */
3378 if (in_band_msg_in_hook(p->in_band, &flow, &payload)) {
3379 union odp_action action;
3380
3381 memset(&action, 0, sizeof(action));
3382 action.output.type = ODPAT_OUTPUT;
3383 action.output.port = ODPP_LOCAL;
3384 dpif_execute(p->dpif, flow.in_port, &action, 1, &payload);
3385 }
3386
3387 rule = lookup_valid_rule(p, &flow);
3388 if (!rule) {
3389 /* Don't send a packet-in if OFPPC_NO_PACKET_IN asserted. */
3390 struct ofport *port = port_array_get(&p->ports, msg->port);
3391 if (port) {
3392 if (port->opp.config & OFPPC_NO_PACKET_IN) {
3393 COVERAGE_INC(ofproto_no_packet_in);
3394 /* XXX install 'drop' flow entry */
3395 ofpbuf_delete(packet);
3396 return;
3397 }
3398 } else {
3399 VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16, msg->port);
3400 }
3401
3402 COVERAGE_INC(ofproto_packet_in);
3403 pinsched_send(p->miss_sched, in_port, packet, send_packet_in_miss, p);
3404 return;
3405 }
3406
3407 if (rule->cr.wc.wildcards) {
3408 rule = rule_create_subrule(p, rule, &flow);
3409 rule_make_actions(p, rule, packet);
3410 } else {
3411 if (!rule->may_install) {
3412 /* The rule is not installable, that is, we need to process every
3413 * packet, so process the current packet and set its actions into
3414 * 'subrule'. */
3415 rule_make_actions(p, rule, packet);
3416 } else {
3417 /* XXX revalidate rule if it needs it */
3418 }
3419 }
3420
3421 rule_execute(p, rule, &payload, &flow);
3422 rule_reinstall(p, rule);
3423
3424 if (rule->super && rule->super->cr.priority == FAIL_OPEN_PRIORITY
3425 && rconn_is_connected(p->controller->rconn)) {
3426 /*
3427 * Extra-special case for fail-open mode.
3428 *
3429 * We are in fail-open mode and the packet matched the fail-open rule,
3430 * but we are connected to a controller too. We should send the packet
3431 * up to the controller in the hope that it will try to set up a flow
3432 * and thereby allow us to exit fail-open.
3433 *
3434 * See the top-level comment in fail-open.c for more information.
3435 */
3436 pinsched_send(p->miss_sched, in_port, packet, send_packet_in_miss, p);
3437 } else {
3438 ofpbuf_delete(packet);
3439 }
3440 }
3441
3442 static void
3443 handle_odp_msg(struct ofproto *p, struct ofpbuf *packet)
3444 {
3445 struct odp_msg *msg = packet->data;
3446
3447 switch (msg->type) {
3448 case _ODPL_ACTION_NR:
3449 COVERAGE_INC(ofproto_ctlr_action);
3450 pinsched_send(p->action_sched, odp_port_to_ofp_port(msg->port), packet,
3451 send_packet_in_action, p);
3452 break;
3453
3454 case _ODPL_SFLOW_NR:
3455 if (p->sflow) {
3456 ofproto_sflow_received(p->sflow, msg);
3457 }
3458 ofpbuf_delete(packet);
3459 break;
3460
3461 case _ODPL_MISS_NR:
3462 handle_odp_miss_msg(p, packet);
3463 break;
3464
3465 default:
3466 VLOG_WARN_RL(&rl, "received ODP message of unexpected type %"PRIu32,
3467 msg->type);
3468 break;
3469 }
3470 }
3471 \f
3472 static void
3473 revalidate_cb(struct cls_rule *sub_, void *cbdata_)
3474 {
3475 struct rule *sub = rule_from_cls_rule(sub_);
3476 struct revalidate_cbdata *cbdata = cbdata_;
3477
3478 if (cbdata->revalidate_all
3479 || (cbdata->revalidate_subrules && sub->super)
3480 || (tag_set_intersects(&cbdata->revalidate_set, sub->tags))) {
3481 revalidate_rule(cbdata->ofproto, sub);
3482 }
3483 }
3484
3485 static bool
3486 revalidate_rule(struct ofproto *p, struct rule *rule)
3487 {
3488 const flow_t *flow = &rule->cr.flow;
3489
3490 COVERAGE_INC(ofproto_revalidate_rule);
3491 if (rule->super) {
3492 struct rule *super;
3493 super = rule_from_cls_rule(classifier_lookup_wild(&p->cls, flow));
3494 if (!super) {
3495 rule_remove(p, rule);
3496 return false;
3497 } else if (super != rule->super) {
3498 COVERAGE_INC(ofproto_revalidate_moved);
3499 list_remove(&rule->list);
3500 list_push_back(&super->list, &rule->list);
3501 rule->super = super;
3502 rule->hard_timeout = super->hard_timeout;
3503 rule->idle_timeout = super->idle_timeout;
3504 rule->created = super->created;
3505 rule->used = 0;
3506 }
3507 }
3508
3509 rule_update_actions(p, rule);
3510 return true;
3511 }
3512
3513 static struct ofpbuf *
3514 compose_flow_removed(struct ofproto *p, const struct rule *rule,
3515 long long int now, uint8_t reason)
3516 {
3517 struct ofp_flow_removed *ofr;
3518 struct ofpbuf *buf;
3519 long long int tdiff = now - rule->created;
3520 uint32_t sec = tdiff / 1000;
3521 uint32_t msec = tdiff - (sec * 1000);
3522
3523 ofr = make_openflow(sizeof *ofr, OFPT_FLOW_REMOVED, &buf);
3524 flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, p->tun_id_from_cookie,
3525 &ofr->match);
3526 ofr->cookie = rule->flow_cookie;
3527 ofr->priority = htons(rule->cr.priority);
3528 ofr->reason = reason;
3529 ofr->duration_sec = htonl(sec);
3530 ofr->duration_nsec = htonl(msec * 1000000);
3531 ofr->idle_timeout = htons(rule->idle_timeout);
3532 ofr->packet_count = htonll(rule->packet_count);
3533 ofr->byte_count = htonll(rule->byte_count);
3534
3535 return buf;
3536 }
3537
3538 static void
3539 uninstall_idle_flow(struct ofproto *ofproto, struct rule *rule)
3540 {
3541 assert(rule->installed);
3542 assert(!rule->cr.wc.wildcards);
3543
3544 if (rule->super) {
3545 rule_remove(ofproto, rule);
3546 } else {
3547 rule_uninstall(ofproto, rule);
3548 }
3549 }
3550 static void
3551 send_flow_removed(struct ofproto *p, struct rule *rule,
3552 long long int now, uint8_t reason)
3553 {
3554 struct ofconn *ofconn;
3555 struct ofconn *prev;
3556 struct ofpbuf *buf = NULL;
3557
3558 /* We limit the maximum number of queued flow expirations it by accounting
3559 * them under the counter for replies. That works because preventing
3560 * OpenFlow requests from being processed also prevents new flows from
3561 * being added (and expiring). (It also prevents processing OpenFlow
3562 * requests that would not add new flows, so it is imperfect.) */
3563
3564 prev = NULL;
3565 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
3566 if (rule->send_flow_removed && rconn_is_connected(ofconn->rconn)) {
3567 if (prev) {
3568 queue_tx(ofpbuf_clone(buf), prev, prev->reply_counter);
3569 } else {
3570 buf = compose_flow_removed(p, rule, now, reason);
3571 }
3572 prev = ofconn;
3573 }
3574 }
3575 if (prev) {
3576 queue_tx(buf, prev, prev->reply_counter);
3577 }
3578 }
3579
3580
3581 static void
3582 expire_rule(struct cls_rule *cls_rule, void *p_)
3583 {
3584 struct ofproto *p = p_;
3585 struct rule *rule = rule_from_cls_rule(cls_rule);
3586 long long int hard_expire, idle_expire, expire, now;
3587
3588 hard_expire = (rule->hard_timeout
3589 ? rule->created + rule->hard_timeout * 1000
3590 : LLONG_MAX);
3591 idle_expire = (rule->idle_timeout
3592 && (rule->super || list_is_empty(&rule->list))
3593 ? rule->used + rule->idle_timeout * 1000
3594 : LLONG_MAX);
3595 expire = MIN(hard_expire, idle_expire);
3596
3597 now = time_msec();
3598 if (now < expire) {
3599 if (rule->installed && now >= rule->used + 5000) {
3600 uninstall_idle_flow(p, rule);
3601 } else if (!rule->cr.wc.wildcards) {
3602 active_timeout(p, rule);
3603 }
3604
3605 return;
3606 }
3607
3608 COVERAGE_INC(ofproto_expired);
3609
3610 /* Update stats. This code will be a no-op if the rule expired
3611 * due to an idle timeout. */
3612 if (rule->cr.wc.wildcards) {
3613 struct rule *subrule, *next;
3614 LIST_FOR_EACH_SAFE (subrule, next, struct rule, list, &rule->list) {
3615 rule_remove(p, subrule);
3616 }
3617 } else {
3618 rule_uninstall(p, rule);
3619 }
3620
3621 if (!rule_is_hidden(rule)) {
3622 send_flow_removed(p, rule, now,
3623 (now >= hard_expire
3624 ? OFPRR_HARD_TIMEOUT : OFPRR_IDLE_TIMEOUT));
3625 }
3626 rule_remove(p, rule);
3627 }
3628
3629 static void
3630 active_timeout(struct ofproto *ofproto, struct rule *rule)
3631 {
3632 if (ofproto->netflow && !is_controller_rule(rule) &&
3633 netflow_active_timeout_expired(ofproto->netflow, &rule->nf_flow)) {
3634 struct ofexpired expired;
3635 struct odp_flow odp_flow;
3636
3637 /* Get updated flow stats. */
3638 memset(&odp_flow, 0, sizeof odp_flow);
3639 if (rule->installed) {
3640 odp_flow.key = rule->cr.flow;
3641 odp_flow.flags = ODPFF_ZERO_TCP_FLAGS;
3642 dpif_flow_get(ofproto->dpif, &odp_flow);
3643
3644 if (odp_flow.stats.n_packets) {
3645 update_time(ofproto, rule, &odp_flow.stats);
3646 netflow_flow_update_flags(&rule->nf_flow, odp_flow.stats.ip_tos,
3647 odp_flow.stats.tcp_flags);
3648 }
3649 }
3650
3651 expired.flow = rule->cr.flow;
3652 expired.packet_count = rule->packet_count +
3653 odp_flow.stats.n_packets;
3654 expired.byte_count = rule->byte_count + odp_flow.stats.n_bytes;
3655 expired.used = rule->used;
3656
3657 netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
3658
3659 /* Schedule us to send the accumulated records once we have
3660 * collected all of them. */
3661 poll_immediate_wake();
3662 }
3663 }
3664
3665 static void
3666 update_used(struct ofproto *p)
3667 {
3668 struct odp_flow *flows;
3669 size_t n_flows;
3670 size_t i;
3671 int error;
3672
3673 error = dpif_flow_list_all(p->dpif, &flows, &n_flows);
3674 if (error) {
3675 return;
3676 }
3677
3678 for (i = 0; i < n_flows; i++) {
3679 struct odp_flow *f = &flows[i];
3680 struct rule *rule;
3681
3682 rule = rule_from_cls_rule(
3683 classifier_find_rule_exactly(&p->cls, &f->key, 0, UINT16_MAX));
3684 if (!rule || !rule->installed) {
3685 COVERAGE_INC(ofproto_unexpected_rule);
3686 dpif_flow_del(p->dpif, f);
3687 continue;
3688 }
3689
3690 update_time(p, rule, &f->stats);
3691 rule_account(p, rule, f->stats.n_bytes);
3692 }
3693 free(flows);
3694 }
3695
3696 static void
3697 do_send_packet_in(struct ofconn *ofconn, uint32_t buffer_id,
3698 const struct ofpbuf *packet, int send_len)
3699 {
3700 struct odp_msg *msg = packet->data;
3701 struct ofpbuf payload;
3702 struct ofpbuf *opi;
3703 uint8_t reason;
3704
3705 /* Extract packet payload from 'msg'. */
3706 payload.data = msg + 1;
3707 payload.size = msg->length - sizeof *msg;
3708
3709 /* Construct ofp_packet_in message. */
3710 reason = msg->type == _ODPL_ACTION_NR ? OFPR_ACTION : OFPR_NO_MATCH;
3711 opi = make_packet_in(buffer_id, odp_port_to_ofp_port(msg->port), reason,
3712 &payload, send_len);
3713
3714 /* Send. */
3715 rconn_send_with_limit(ofconn->rconn, opi, ofconn->packet_in_counter, 100);
3716 }
3717
3718 static void
3719 send_packet_in_action(struct ofpbuf *packet, void *p_)
3720 {
3721 struct ofproto *p = p_;
3722 struct ofconn *ofconn;
3723 struct odp_msg *msg;
3724
3725 msg = packet->data;
3726 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
3727 if (ofconn == p->controller || ofconn->miss_send_len) {
3728 do_send_packet_in(ofconn, UINT32_MAX, packet, msg->arg);
3729 }
3730 }
3731 ofpbuf_delete(packet);
3732 }
3733
3734 static void
3735 send_packet_in_miss(struct ofpbuf *packet, void *p_)
3736 {
3737 struct ofproto *p = p_;
3738 bool in_fail_open = p->fail_open && fail_open_is_active(p->fail_open);
3739 struct ofconn *ofconn;
3740 struct ofpbuf payload;
3741 struct odp_msg *msg;
3742
3743 msg = packet->data;
3744 payload.data = msg + 1;
3745 payload.size = msg->length - sizeof *msg;
3746 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
3747 if (ofconn->miss_send_len) {
3748 struct pktbuf *pb = ofconn->pktbuf;
3749 uint32_t buffer_id = (in_fail_open
3750 ? pktbuf_get_null()
3751 : pktbuf_save(pb, &payload, msg->port));
3752 int send_len = (buffer_id != UINT32_MAX ? ofconn->miss_send_len
3753 : INT_MAX);
3754 do_send_packet_in(ofconn, buffer_id, packet, send_len);
3755 }
3756 }
3757 ofpbuf_delete(packet);
3758 }
3759
3760 static uint64_t
3761 pick_datapath_id(const struct ofproto *ofproto)
3762 {
3763 const struct ofport *port;
3764
3765 port = port_array_get(&ofproto->ports, ODPP_LOCAL);
3766 if (port) {
3767 uint8_t ea[ETH_ADDR_LEN];
3768 int error;
3769
3770 error = netdev_get_etheraddr(port->netdev, ea);
3771 if (!error) {
3772 return eth_addr_to_uint64(ea);
3773 }
3774 VLOG_WARN("could not get MAC address for %s (%s)",
3775 netdev_get_name(port->netdev), strerror(error));
3776 }
3777 return ofproto->fallback_dpid;
3778 }
3779
3780 static uint64_t
3781 pick_fallback_dpid(void)
3782 {
3783 uint8_t ea[ETH_ADDR_LEN];
3784 eth_addr_nicira_random(ea);
3785 return eth_addr_to_uint64(ea);
3786 }
3787 \f
3788 static bool
3789 default_normal_ofhook_cb(const flow_t *flow, const struct ofpbuf *packet,
3790 struct odp_actions *actions, tag_type *tags,
3791 uint16_t *nf_output_iface, void *ofproto_)
3792 {
3793 struct ofproto *ofproto = ofproto_;
3794 int out_port;
3795
3796 /* Drop frames for reserved multicast addresses. */
3797 if (eth_addr_is_reserved(flow->dl_dst)) {
3798 return true;
3799 }
3800
3801 /* Learn source MAC (but don't try to learn from revalidation). */
3802 if (packet != NULL) {
3803 tag_type rev_tag = mac_learning_learn(ofproto->ml, flow->dl_src,
3804 0, flow->in_port);
3805 if (rev_tag) {
3806 /* The log messages here could actually be useful in debugging,
3807 * so keep the rate limit relatively high. */
3808 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
3809 VLOG_DBG_RL(&rl, "learned that "ETH_ADDR_FMT" is on port %"PRIu16,
3810 ETH_ADDR_ARGS(flow->dl_src), flow->in_port);
3811 ofproto_revalidate(ofproto, rev_tag);
3812 }
3813 }
3814
3815 /* Determine output port. */
3816 out_port = mac_learning_lookup_tag(ofproto->ml, flow->dl_dst, 0, tags);
3817 if (out_port < 0) {
3818 add_output_group_action(actions, DP_GROUP_FLOOD, nf_output_iface);
3819 } else if (out_port != flow->in_port) {
3820 odp_actions_add(actions, ODPAT_OUTPUT)->output.port = out_port;
3821 *nf_output_iface = out_port;
3822 } else {
3823 /* Drop. */
3824 }
3825
3826 return true;
3827 }
3828
3829 static const struct ofhooks default_ofhooks = {
3830 NULL,
3831 default_normal_ofhook_cb,
3832 NULL,
3833 NULL
3834 };