]> git.proxmox.com Git - ovs.git/blame - ofproto/ofproto.c
ofproto: Cleanups to openflow.h (OpenFlow 0.9)
[ovs.git] / ofproto / ofproto.c
CommitLineData
064af421 1/*
c475ae67 2 * Copyright (c) 2009, 2010 Nicira Networks.
064af421 3 *
a14bc59f
BP
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
064af421 7 *
a14bc59f
BP
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
064af421
BP
15 */
16
17#include <config.h>
18#include "ofproto.h"
19#include <errno.h>
20#include <inttypes.h>
21#include <net/if.h>
22#include <netinet/in.h>
23#include <stdbool.h>
24#include <stdlib.h>
25#include "classifier.h"
26#include "coverage.h"
27#include "discovery.h"
28#include "dpif.h"
4f2cad2c 29#include "dynamic-string.h"
064af421
BP
30#include "fail-open.h"
31#include "in-band.h"
32#include "mac-learning.h"
33#include "netdev.h"
34#include "netflow.h"
35#include "odp-util.h"
36#include "ofp-print.h"
72b06300 37#include "ofproto-sflow.h"
064af421
BP
38#include "ofpbuf.h"
39#include "openflow/nicira-ext.h"
40#include "openflow/openflow.h"
064af421
BP
41#include "openvswitch/datapath-protocol.h"
42#include "packets.h"
43#include "pinsched.h"
44#include "pktbuf.h"
45#include "poll-loop.h"
46#include "port-array.h"
47#include "rconn.h"
48#include "shash.h"
49#include "status.h"
50#include "stp.h"
fe55ad15 51#include "stream-ssl.h"
064af421
BP
52#include "svec.h"
53#include "tag.h"
54#include "timeval.h"
4f2cad2c 55#include "unixctl.h"
064af421 56#include "vconn.h"
064af421
BP
57#include "xtoxll.h"
58
59#define THIS_MODULE VLM_ofproto
60#include "vlog.h"
61
72b06300 62#include "sflow_api.h"
064af421
BP
63
64enum {
65 TABLEID_HASH = 0,
66 TABLEID_CLASSIFIER = 1
67};
68
69struct ofport {
70 struct netdev *netdev;
71 struct ofp_phy_port opp; /* In host byte order. */
72};
73
74static void ofport_free(struct ofport *);
75static void hton_ofp_phy_port(struct ofp_phy_port *);
76
77static int xlate_actions(const union ofp_action *in, size_t n_in,
78 const flow_t *flow, struct ofproto *ofproto,
79 const struct ofpbuf *packet,
80 struct odp_actions *out, tag_type *tags,
6a07af36 81 bool *may_set_up_flow, uint16_t *nf_output_iface);
064af421
BP
82
83struct rule {
84 struct cls_rule cr;
85
86 uint16_t idle_timeout; /* In seconds from time of last use. */
87 uint16_t hard_timeout; /* In seconds from time of creation. */
ca069229 88 bool send_flow_removed; /* Send a flow removed message? */
064af421
BP
89 long long int used; /* Last-used time (0 if never used). */
90 long long int created; /* Creation time. */
91 uint64_t packet_count; /* Number of packets received. */
92 uint64_t byte_count; /* Number of bytes received. */
93 uint64_t accounted_bytes; /* Number of bytes passed to account_cb. */
064af421 94 tag_type tags; /* Tags (set only by hooks). */
0193b2af 95 struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
064af421
BP
96
97 /* If 'super' is non-NULL, this rule is a subrule, that is, it is an
98 * exact-match rule (having cr.wc.wildcards of 0) generated from the
99 * wildcard rule 'super'. In this case, 'list' is an element of the
100 * super-rule's list.
101 *
102 * If 'super' is NULL, this rule is a super-rule, and 'list' is the head of
103 * a list of subrules. A super-rule with no wildcards (where
104 * cr.wc.wildcards is 0) will never have any subrules. */
105 struct rule *super;
106 struct list list;
107
108 /* OpenFlow actions.
109 *
110 * A subrule has no actions (it uses the super-rule's actions). */
111 int n_actions;
112 union ofp_action *actions;
113
114 /* Datapath actions.
115 *
116 * A super-rule with wildcard fields never has ODP actions (since the
117 * datapath only supports exact-match flows). */
118 bool installed; /* Installed in datapath? */
119 bool may_install; /* True ordinarily; false if actions must
120 * be reassessed for every packet. */
121 int n_odp_actions;
122 union odp_action *odp_actions;
123};
124
125static inline bool
126rule_is_hidden(const struct rule *rule)
127{
128 /* Subrules are merely an implementation detail, so hide them from the
129 * controller. */
130 if (rule->super != NULL) {
131 return true;
132 }
133
8cd4882f 134 /* Rules with priority higher than UINT16_MAX are set up by ofproto itself
064af421
BP
135 * (e.g. by in-band control) and are intentionally hidden from the
136 * controller. */
137 if (rule->cr.priority > UINT16_MAX) {
138 return true;
139 }
140
141 return false;
142}
143
0193b2af
JG
144static struct rule *rule_create(struct ofproto *, struct rule *super,
145 const union ofp_action *, size_t n_actions,
ca069229
JP
146 uint16_t idle_timeout, uint16_t hard_timeout,
147 bool send_flow_removed);
064af421
BP
148static void rule_free(struct rule *);
149static void rule_destroy(struct ofproto *, struct rule *);
150static struct rule *rule_from_cls_rule(const struct cls_rule *);
151static void rule_insert(struct ofproto *, struct rule *,
152 struct ofpbuf *packet, uint16_t in_port);
153static void rule_remove(struct ofproto *, struct rule *);
154static bool rule_make_actions(struct ofproto *, struct rule *,
155 const struct ofpbuf *packet);
156static void rule_install(struct ofproto *, struct rule *,
157 struct rule *displaced_rule);
158static void rule_uninstall(struct ofproto *, struct rule *);
159static void rule_post_uninstall(struct ofproto *, struct rule *);
ca069229
JP
160static void send_flow_removed(struct ofproto *p, struct rule *rule,
161 long long int now, uint8_t reason);
064af421
BP
162
163struct ofconn {
164 struct list node;
165 struct rconn *rconn;
166 struct pktbuf *pktbuf;
064af421
BP
167 int miss_send_len;
168
169 struct rconn_packet_counter *packet_in_counter;
170
171 /* Number of OpenFlow messages queued as replies to OpenFlow requests, and
172 * the maximum number before we stop reading OpenFlow requests. */
173#define OFCONN_REPLY_MAX 100
174 struct rconn_packet_counter *reply_counter;
175};
176
177static struct ofconn *ofconn_create(struct ofproto *, struct rconn *);
c475ae67 178static void ofconn_destroy(struct ofconn *);
064af421
BP
179static void ofconn_run(struct ofconn *, struct ofproto *);
180static void ofconn_wait(struct ofconn *);
181static void queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn,
182 struct rconn_packet_counter *counter);
183
184struct ofproto {
185 /* Settings. */
186 uint64_t datapath_id; /* Datapath ID. */
187 uint64_t fallback_dpid; /* Datapath ID if no better choice found. */
064af421
BP
188 char *manufacturer; /* Manufacturer. */
189 char *hardware; /* Hardware. */
190 char *software; /* Software version. */
191 char *serial; /* Serial number. */
192
193 /* Datapath. */
c228a364 194 struct dpif *dpif;
e9e28be3 195 struct netdev_monitor *netdev_monitor;
064af421
BP
196 struct port_array ports; /* Index is ODP port nr; ofport->opp.port_no is
197 * OFP port nr. */
198 struct shash port_by_name;
199 uint32_t max_ports;
200
201 /* Configuration. */
202 struct switch_status *switch_status;
203 struct status_category *ss_cat;
204 struct in_band *in_band;
205 struct discovery *discovery;
206 struct fail_open *fail_open;
207 struct pinsched *miss_sched, *action_sched;
064af421 208 struct netflow *netflow;
72b06300 209 struct ofproto_sflow *sflow;
064af421
BP
210
211 /* Flow table. */
212 struct classifier cls;
213 bool need_revalidate;
214 long long int next_expiration;
215 struct tag_set revalidate_set;
216
217 /* OpenFlow connections. */
218 struct list all_conns;
219 struct ofconn *controller;
220 struct pvconn **listeners;
221 size_t n_listeners;
222 struct pvconn **snoops;
223 size_t n_snoops;
224
225 /* Hooks for ovs-vswitchd. */
226 const struct ofhooks *ofhooks;
227 void *aux;
228
229 /* Used by default ofhooks. */
230 struct mac_learning *ml;
231};
232
233static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
234
235static const struct ofhooks default_ofhooks;
236
fa60c019 237static uint64_t pick_datapath_id(const struct ofproto *);
064af421
BP
238static uint64_t pick_fallback_dpid(void);
239static void send_packet_in_miss(struct ofpbuf *, void *ofproto);
240static void send_packet_in_action(struct ofpbuf *, void *ofproto);
241static void update_used(struct ofproto *);
0193b2af
JG
242static void update_stats(struct ofproto *, struct rule *,
243 const struct odp_flow_stats *);
064af421 244static void expire_rule(struct cls_rule *, void *ofproto);
0193b2af 245static void active_timeout(struct ofproto *ofproto, struct rule *rule);
064af421
BP
246static bool revalidate_rule(struct ofproto *p, struct rule *rule);
247static void revalidate_cb(struct cls_rule *rule_, void *p_);
248
249static void handle_odp_msg(struct ofproto *, struct ofpbuf *);
250
251static void handle_openflow(struct ofconn *, struct ofproto *,
252 struct ofpbuf *);
253
72b06300
BP
254static void refresh_port_groups(struct ofproto *);
255
064af421
BP
256static void update_port(struct ofproto *, const char *devname);
257static int init_ports(struct ofproto *);
258static void reinit_ports(struct ofproto *);
259
260int
1a6f1e2a
JG
261ofproto_create(const char *datapath, const char *datapath_type,
262 const struct ofhooks *ofhooks, void *aux,
064af421
BP
263 struct ofproto **ofprotop)
264{
064af421
BP
265 struct odp_stats stats;
266 struct ofproto *p;
c228a364 267 struct dpif *dpif;
064af421
BP
268 int error;
269
270 *ofprotop = NULL;
271
272 /* Connect to datapath and start listening for messages. */
1a6f1e2a 273 error = dpif_open(datapath, datapath_type, &dpif);
064af421
BP
274 if (error) {
275 VLOG_ERR("failed to open datapath %s: %s", datapath, strerror(error));
276 return error;
277 }
c228a364 278 error = dpif_get_dp_stats(dpif, &stats);
064af421
BP
279 if (error) {
280 VLOG_ERR("failed to obtain stats for datapath %s: %s",
281 datapath, strerror(error));
c228a364 282 dpif_close(dpif);
064af421
BP
283 return error;
284 }
72b06300 285 error = dpif_recv_set_mask(dpif, ODPL_MISS | ODPL_ACTION | ODPL_SFLOW);
064af421
BP
286 if (error) {
287 VLOG_ERR("failed to listen on datapath %s: %s",
288 datapath, strerror(error));
c228a364 289 dpif_close(dpif);
064af421
BP
290 return error;
291 }
c228a364 292 dpif_flow_flush(dpif);
8f24562a 293 dpif_recv_purge(dpif);
064af421
BP
294
295 /* Initialize settings. */
ec6fde61 296 p = xzalloc(sizeof *p);
064af421 297 p->fallback_dpid = pick_fallback_dpid();
fa60c019 298 p->datapath_id = p->fallback_dpid;
064af421
BP
299 p->manufacturer = xstrdup("Nicira Networks, Inc.");
300 p->hardware = xstrdup("Reference Implementation");
301 p->software = xstrdup(VERSION BUILDNR);
302 p->serial = xstrdup("None");
303
304 /* Initialize datapath. */
305 p->dpif = dpif;
8b61709d 306 p->netdev_monitor = netdev_monitor_create();
064af421
BP
307 port_array_init(&p->ports);
308 shash_init(&p->port_by_name);
309 p->max_ports = stats.max_ports;
310
311 /* Initialize submodules. */
312 p->switch_status = switch_status_create(p);
313 p->in_band = NULL;
314 p->discovery = NULL;
315 p->fail_open = NULL;
316 p->miss_sched = p->action_sched = NULL;
064af421 317 p->netflow = NULL;
72b06300 318 p->sflow = NULL;
064af421
BP
319
320 /* Initialize flow table. */
321 classifier_init(&p->cls);
322 p->need_revalidate = false;
323 p->next_expiration = time_msec() + 1000;
324 tag_set_init(&p->revalidate_set);
325
326 /* Initialize OpenFlow connections. */
327 list_init(&p->all_conns);
f9fb1858 328 p->controller = ofconn_create(p, rconn_create(5, 8));
064af421
BP
329 p->controller->pktbuf = pktbuf_create();
330 p->controller->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN;
331 p->listeners = NULL;
332 p->n_listeners = 0;
333 p->snoops = NULL;
334 p->n_snoops = 0;
335
336 /* Initialize hooks. */
337 if (ofhooks) {
338 p->ofhooks = ofhooks;
339 p->aux = aux;
340 p->ml = NULL;
341 } else {
342 p->ofhooks = &default_ofhooks;
343 p->aux = p;
344 p->ml = mac_learning_create();
345 }
346
347 /* Register switch status category. */
348 p->ss_cat = switch_status_register(p->switch_status, "remote",
349 rconn_status_cb, p->controller->rconn);
350
fa60c019
BP
351 /* Pick final datapath ID. */
352 p->datapath_id = pick_datapath_id(p);
353 VLOG_INFO("using datapath ID %012"PRIx64, p->datapath_id);
354
064af421
BP
355 *ofprotop = p;
356 return 0;
357}
358
359void
360ofproto_set_datapath_id(struct ofproto *p, uint64_t datapath_id)
361{
362 uint64_t old_dpid = p->datapath_id;
fa60c019 363 p->datapath_id = datapath_id ? datapath_id : pick_datapath_id(p);
064af421
BP
364 if (p->datapath_id != old_dpid) {
365 VLOG_INFO("datapath ID changed to %012"PRIx64, p->datapath_id);
366 rconn_reconnect(p->controller->rconn);
367 }
368}
369
064af421
BP
370void
371ofproto_set_probe_interval(struct ofproto *p, int probe_interval)
372{
373 probe_interval = probe_interval ? MAX(probe_interval, 5) : 0;
374 rconn_set_probe_interval(p->controller->rconn, probe_interval);
375 if (p->fail_open) {
376 int trigger_duration = probe_interval ? probe_interval * 3 : 15;
377 fail_open_set_trigger_duration(p->fail_open, trigger_duration);
378 }
379}
380
381void
382ofproto_set_max_backoff(struct ofproto *p, int max_backoff)
383{
384 rconn_set_max_backoff(p->controller->rconn, max_backoff);
385}
386
387void
388ofproto_set_desc(struct ofproto *p,
389 const char *manufacturer, const char *hardware,
390 const char *software, const char *serial)
391{
392 if (manufacturer) {
393 free(p->manufacturer);
394 p->manufacturer = xstrdup(manufacturer);
395 }
396 if (hardware) {
397 free(p->hardware);
398 p->hardware = xstrdup(hardware);
399 }
400 if (software) {
401 free(p->software);
402 p->software = xstrdup(software);
403 }
404 if (serial) {
405 free(p->serial);
406 p->serial = xstrdup(serial);
407 }
408}
409
410int
411ofproto_set_in_band(struct ofproto *p, bool in_band)
412{
413 if (in_band != (p->in_band != NULL)) {
414 if (in_band) {
f1acd62b
BP
415 return in_band_create(p, p->dpif, p->switch_status,
416 p->controller->rconn, &p->in_band);
064af421
BP
417 } else {
418 ofproto_set_discovery(p, false, NULL, true);
419 in_band_destroy(p->in_band);
420 p->in_band = NULL;
421 }
422 rconn_reconnect(p->controller->rconn);
423 }
424 return 0;
425}
426
427int
428ofproto_set_discovery(struct ofproto *p, bool discovery,
429 const char *re, bool update_resolv_conf)
430{
431 if (discovery != (p->discovery != NULL)) {
432 if (discovery) {
433 int error = ofproto_set_in_band(p, true);
434 if (error) {
435 return error;
436 }
437 error = discovery_create(re, update_resolv_conf,
c228a364 438 p->dpif, p->switch_status,
064af421
BP
439 &p->discovery);
440 if (error) {
441 return error;
442 }
443 } else {
444 discovery_destroy(p->discovery);
445 p->discovery = NULL;
446 }
447 rconn_disconnect(p->controller->rconn);
448 } else if (discovery) {
449 discovery_set_update_resolv_conf(p->discovery, update_resolv_conf);
450 return discovery_set_accept_controller_re(p->discovery, re);
451 }
452 return 0;
453}
454
455int
456ofproto_set_controller(struct ofproto *ofproto, const char *controller)
457{
458 if (ofproto->discovery) {
459 return EINVAL;
460 } else if (controller) {
461 if (strcmp(rconn_get_name(ofproto->controller->rconn), controller)) {
462 return rconn_connect(ofproto->controller->rconn, controller);
463 } else {
464 return 0;
465 }
466 } else {
467 rconn_disconnect(ofproto->controller->rconn);
468 return 0;
469 }
470}
471
472static int
473set_pvconns(struct pvconn ***pvconnsp, size_t *n_pvconnsp,
474 const struct svec *svec)
475{
476 struct pvconn **pvconns = *pvconnsp;
477 size_t n_pvconns = *n_pvconnsp;
478 int retval = 0;
479 size_t i;
480
481 for (i = 0; i < n_pvconns; i++) {
482 pvconn_close(pvconns[i]);
483 }
484 free(pvconns);
485
486 pvconns = xmalloc(svec->n * sizeof *pvconns);
487 n_pvconns = 0;
488 for (i = 0; i < svec->n; i++) {
489 const char *name = svec->names[i];
490 struct pvconn *pvconn;
491 int error;
492
493 error = pvconn_open(name, &pvconn);
494 if (!error) {
495 pvconns[n_pvconns++] = pvconn;
496 } else {
497 VLOG_ERR("failed to listen on %s: %s", name, strerror(error));
498 if (!retval) {
499 retval = error;
500 }
501 }
502 }
503
504 *pvconnsp = pvconns;
505 *n_pvconnsp = n_pvconns;
506
507 return retval;
508}
509
510int
511ofproto_set_listeners(struct ofproto *ofproto, const struct svec *listeners)
512{
513 return set_pvconns(&ofproto->listeners, &ofproto->n_listeners, listeners);
514}
515
516int
517ofproto_set_snoops(struct ofproto *ofproto, const struct svec *snoops)
518{
519 return set_pvconns(&ofproto->snoops, &ofproto->n_snoops, snoops);
520}
521
522int
0193b2af
JG
523ofproto_set_netflow(struct ofproto *ofproto,
524 const struct netflow_options *nf_options)
064af421 525{
76343538 526 if (nf_options && nf_options->collectors.n) {
064af421
BP
527 if (!ofproto->netflow) {
528 ofproto->netflow = netflow_create();
529 }
0193b2af 530 return netflow_set_options(ofproto->netflow, nf_options);
064af421
BP
531 } else {
532 netflow_destroy(ofproto->netflow);
533 ofproto->netflow = NULL;
534 return 0;
535 }
536}
537
72b06300
BP
538void
539ofproto_set_sflow(struct ofproto *ofproto,
540 const struct ofproto_sflow_options *oso)
541{
542 struct ofproto_sflow *os = ofproto->sflow;
543 if (oso) {
544 if (!os) {
545 struct ofport *ofport;
546 unsigned int odp_port;
547
548 os = ofproto->sflow = ofproto_sflow_create(ofproto->dpif);
549 refresh_port_groups(ofproto);
550 PORT_ARRAY_FOR_EACH (ofport, &ofproto->ports, odp_port) {
551 ofproto_sflow_add_port(os, odp_port,
552 netdev_get_name(ofport->netdev));
553 }
554 }
555 ofproto_sflow_set_options(os, oso);
556 } else {
557 ofproto_sflow_destroy(os);
558 ofproto->sflow = NULL;
559 }
560}
561
064af421
BP
562void
563ofproto_set_failure(struct ofproto *ofproto, bool fail_open)
564{
565 if (fail_open) {
566 struct rconn *rconn = ofproto->controller->rconn;
567 int trigger_duration = rconn_get_probe_interval(rconn) * 3;
568 if (!ofproto->fail_open) {
569 ofproto->fail_open = fail_open_create(ofproto, trigger_duration,
570 ofproto->switch_status,
571 rconn);
572 } else {
573 fail_open_set_trigger_duration(ofproto->fail_open,
574 trigger_duration);
575 }
576 } else {
577 fail_open_destroy(ofproto->fail_open);
578 ofproto->fail_open = NULL;
579 }
580}
581
582void
583ofproto_set_rate_limit(struct ofproto *ofproto,
584 int rate_limit, int burst_limit)
585{
586 if (rate_limit > 0) {
587 if (!ofproto->miss_sched) {
588 ofproto->miss_sched = pinsched_create(rate_limit, burst_limit,
589 ofproto->switch_status);
590 ofproto->action_sched = pinsched_create(rate_limit, burst_limit,
591 NULL);
592 } else {
593 pinsched_set_limits(ofproto->miss_sched, rate_limit, burst_limit);
594 pinsched_set_limits(ofproto->action_sched,
595 rate_limit, burst_limit);
596 }
597 } else {
598 pinsched_destroy(ofproto->miss_sched);
599 ofproto->miss_sched = NULL;
600 pinsched_destroy(ofproto->action_sched);
601 ofproto->action_sched = NULL;
602 }
603}
604
605int
67a4917b 606ofproto_set_stp(struct ofproto *ofproto OVS_UNUSED, bool enable_stp)
064af421
BP
607{
608 /* XXX */
609 if (enable_stp) {
610 VLOG_WARN("STP is not yet implemented");
611 return EINVAL;
612 } else {
613 return 0;
614 }
615}
616
064af421
BP
617uint64_t
618ofproto_get_datapath_id(const struct ofproto *ofproto)
619{
620 return ofproto->datapath_id;
621}
622
623int
624ofproto_get_probe_interval(const struct ofproto *ofproto)
625{
626 return rconn_get_probe_interval(ofproto->controller->rconn);
627}
628
629int
630ofproto_get_max_backoff(const struct ofproto *ofproto)
631{
632 return rconn_get_max_backoff(ofproto->controller->rconn);
633}
634
635bool
636ofproto_get_in_band(const struct ofproto *ofproto)
637{
638 return ofproto->in_band != NULL;
639}
640
641bool
642ofproto_get_discovery(const struct ofproto *ofproto)
643{
644 return ofproto->discovery != NULL;
645}
646
647const char *
648ofproto_get_controller(const struct ofproto *ofproto)
649{
650 return rconn_get_name(ofproto->controller->rconn);
651}
652
653void
654ofproto_get_listeners(const struct ofproto *ofproto, struct svec *listeners)
655{
656 size_t i;
657
658 for (i = 0; i < ofproto->n_listeners; i++) {
659 svec_add(listeners, pvconn_get_name(ofproto->listeners[i]));
660 }
661}
662
663void
664ofproto_get_snoops(const struct ofproto *ofproto, struct svec *snoops)
665{
666 size_t i;
667
668 for (i = 0; i < ofproto->n_snoops; i++) {
669 svec_add(snoops, pvconn_get_name(ofproto->snoops[i]));
670 }
671}
672
673void
674ofproto_destroy(struct ofproto *p)
675{
676 struct ofconn *ofconn, *next_ofconn;
677 struct ofport *ofport;
678 unsigned int port_no;
679 size_t i;
680
681 if (!p) {
682 return;
683 }
684
2f6d3445
BP
685 /* Destroy fail-open early, because it touches the classifier. */
686 ofproto_set_failure(p, false);
687
064af421
BP
688 ofproto_flush_flows(p);
689 classifier_destroy(&p->cls);
690
691 LIST_FOR_EACH_SAFE (ofconn, next_ofconn, struct ofconn, node,
692 &p->all_conns) {
c475ae67 693 ofconn_destroy(ofconn);
064af421
BP
694 }
695
c228a364 696 dpif_close(p->dpif);
e9e28be3 697 netdev_monitor_destroy(p->netdev_monitor);
064af421
BP
698 PORT_ARRAY_FOR_EACH (ofport, &p->ports, port_no) {
699 ofport_free(ofport);
700 }
701 shash_destroy(&p->port_by_name);
702
703 switch_status_destroy(p->switch_status);
704 in_band_destroy(p->in_band);
705 discovery_destroy(p->discovery);
064af421
BP
706 pinsched_destroy(p->miss_sched);
707 pinsched_destroy(p->action_sched);
064af421 708 netflow_destroy(p->netflow);
72b06300 709 ofproto_sflow_destroy(p->sflow);
064af421
BP
710
711 switch_status_unregister(p->ss_cat);
712
713 for (i = 0; i < p->n_listeners; i++) {
714 pvconn_close(p->listeners[i]);
715 }
716 free(p->listeners);
717
718 for (i = 0; i < p->n_snoops; i++) {
719 pvconn_close(p->snoops[i]);
720 }
721 free(p->snoops);
722
723 mac_learning_destroy(p->ml);
724
725 free(p);
726}
727
728int
729ofproto_run(struct ofproto *p)
730{
731 int error = ofproto_run1(p);
732 if (!error) {
733 error = ofproto_run2(p, false);
734 }
735 return error;
736}
737
e9e28be3
BP
738static void
739process_port_change(struct ofproto *ofproto, int error, char *devname)
740{
741 if (error == ENOBUFS) {
742 reinit_ports(ofproto);
743 } else if (!error) {
744 update_port(ofproto, devname);
745 free(devname);
746 }
747}
748
064af421
BP
749int
750ofproto_run1(struct ofproto *p)
751{
752 struct ofconn *ofconn, *next_ofconn;
753 char *devname;
754 int error;
755 int i;
756
149f577a
JG
757 if (shash_is_empty(&p->port_by_name)) {
758 init_ports(p);
759 }
760
064af421
BP
761 for (i = 0; i < 50; i++) {
762 struct ofpbuf *buf;
763 int error;
764
c228a364 765 error = dpif_recv(p->dpif, &buf);
064af421
BP
766 if (error) {
767 if (error == ENODEV) {
768 /* Someone destroyed the datapath behind our back. The caller
769 * better destroy us and give up, because we're just going to
770 * spin from here on out. */
771 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
b29ba128 772 VLOG_ERR_RL(&rl, "%s: datapath was destroyed externally",
c228a364 773 dpif_name(p->dpif));
064af421
BP
774 return ENODEV;
775 }
776 break;
777 }
778
779 handle_odp_msg(p, buf);
780 }
781
e9e28be3
BP
782 while ((error = dpif_port_poll(p->dpif, &devname)) != EAGAIN) {
783 process_port_change(p, error, devname);
784 }
785 while ((error = netdev_monitor_poll(p->netdev_monitor,
786 &devname)) != EAGAIN) {
787 process_port_change(p, error, devname);
064af421
BP
788 }
789
790 if (p->in_band) {
791 in_band_run(p->in_band);
792 }
793 if (p->discovery) {
794 char *controller_name;
795 if (rconn_is_connectivity_questionable(p->controller->rconn)) {
796 discovery_question_connectivity(p->discovery);
797 }
798 if (discovery_run(p->discovery, &controller_name)) {
799 if (controller_name) {
800 rconn_connect(p->controller->rconn, controller_name);
801 } else {
802 rconn_disconnect(p->controller->rconn);
803 }
804 }
805 }
064af421
BP
806 pinsched_run(p->miss_sched, send_packet_in_miss, p);
807 pinsched_run(p->action_sched, send_packet_in_action, p);
064af421
BP
808
809 LIST_FOR_EACH_SAFE (ofconn, next_ofconn, struct ofconn, node,
810 &p->all_conns) {
811 ofconn_run(ofconn, p);
812 }
813
7778bd15
BP
814 /* Fail-open maintenance. Do this after processing the ofconns since
815 * fail-open checks the status of the controller rconn. */
816 if (p->fail_open) {
817 fail_open_run(p->fail_open);
818 }
819
064af421
BP
820 for (i = 0; i < p->n_listeners; i++) {
821 struct vconn *vconn;
822 int retval;
823
824 retval = pvconn_accept(p->listeners[i], OFP_VERSION, &vconn);
825 if (!retval) {
826 ofconn_create(p, rconn_new_from_vconn("passive", vconn));
827 } else if (retval != EAGAIN) {
828 VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval));
829 }
830 }
831
832 for (i = 0; i < p->n_snoops; i++) {
833 struct vconn *vconn;
834 int retval;
835
836 retval = pvconn_accept(p->snoops[i], OFP_VERSION, &vconn);
837 if (!retval) {
838 rconn_add_monitor(p->controller->rconn, vconn);
839 } else if (retval != EAGAIN) {
840 VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval));
841 }
842 }
843
844 if (time_msec() >= p->next_expiration) {
845 COVERAGE_INC(ofproto_expiration);
846 p->next_expiration = time_msec() + 1000;
847 update_used(p);
848
849 classifier_for_each(&p->cls, CLS_INC_ALL, expire_rule, p);
850
851 /* Let the hook know that we're at a stable point: all outstanding data
852 * in existing flows has been accounted to the account_cb. Thus, the
853 * hook can now reasonably do operations that depend on having accurate
854 * flow volume accounting (currently, that's just bond rebalancing). */
855 if (p->ofhooks->account_checkpoint_cb) {
856 p->ofhooks->account_checkpoint_cb(p->aux);
857 }
858 }
859
860 if (p->netflow) {
861 netflow_run(p->netflow);
862 }
72b06300
BP
863 if (p->sflow) {
864 ofproto_sflow_run(p->sflow);
865 }
064af421
BP
866
867 return 0;
868}
869
870struct revalidate_cbdata {
871 struct ofproto *ofproto;
872 bool revalidate_all; /* Revalidate all exact-match rules? */
873 bool revalidate_subrules; /* Revalidate all exact-match subrules? */
874 struct tag_set revalidate_set; /* Set of tags to revalidate. */
875};
876
877int
878ofproto_run2(struct ofproto *p, bool revalidate_all)
879{
880 if (p->need_revalidate || revalidate_all
881 || !tag_set_is_empty(&p->revalidate_set)) {
882 struct revalidate_cbdata cbdata;
883 cbdata.ofproto = p;
884 cbdata.revalidate_all = revalidate_all;
885 cbdata.revalidate_subrules = p->need_revalidate;
886 cbdata.revalidate_set = p->revalidate_set;
887 tag_set_init(&p->revalidate_set);
888 COVERAGE_INC(ofproto_revalidate);
889 classifier_for_each(&p->cls, CLS_INC_EXACT, revalidate_cb, &cbdata);
890 p->need_revalidate = false;
891 }
892
893 return 0;
894}
895
896void
897ofproto_wait(struct ofproto *p)
898{
899 struct ofconn *ofconn;
900 size_t i;
901
c228a364 902 dpif_recv_wait(p->dpif);
e9e28be3
BP
903 dpif_port_poll_wait(p->dpif);
904 netdev_monitor_poll_wait(p->netdev_monitor);
064af421
BP
905 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
906 ofconn_wait(ofconn);
907 }
908 if (p->in_band) {
909 in_band_wait(p->in_band);
910 }
911 if (p->discovery) {
912 discovery_wait(p->discovery);
913 }
914 if (p->fail_open) {
915 fail_open_wait(p->fail_open);
916 }
917 pinsched_wait(p->miss_sched);
918 pinsched_wait(p->action_sched);
72b06300
BP
919 if (p->sflow) {
920 ofproto_sflow_wait(p->sflow);
921 }
064af421
BP
922 if (!tag_set_is_empty(&p->revalidate_set)) {
923 poll_immediate_wake();
924 }
925 if (p->need_revalidate) {
926 /* Shouldn't happen, but if it does just go around again. */
927 VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
928 poll_immediate_wake();
929 } else if (p->next_expiration != LLONG_MAX) {
930 poll_timer_wait(p->next_expiration - time_msec());
931 }
932 for (i = 0; i < p->n_listeners; i++) {
933 pvconn_wait(p->listeners[i]);
934 }
935 for (i = 0; i < p->n_snoops; i++) {
936 pvconn_wait(p->snoops[i]);
937 }
938}
939
940void
941ofproto_revalidate(struct ofproto *ofproto, tag_type tag)
942{
943 tag_set_add(&ofproto->revalidate_set, tag);
944}
945
946struct tag_set *
947ofproto_get_revalidate_set(struct ofproto *ofproto)
948{
949 return &ofproto->revalidate_set;
950}
951
952bool
953ofproto_is_alive(const struct ofproto *p)
954{
955 return p->discovery || rconn_is_alive(p->controller->rconn);
956}
957
958int
959ofproto_send_packet(struct ofproto *p, const flow_t *flow,
960 const union ofp_action *actions, size_t n_actions,
961 const struct ofpbuf *packet)
962{
963 struct odp_actions odp_actions;
964 int error;
965
966 error = xlate_actions(actions, n_actions, flow, p, packet, &odp_actions,
6a07af36 967 NULL, NULL, NULL);
064af421
BP
968 if (error) {
969 return error;
970 }
971
972 /* XXX Should we translate the dpif_execute() errno value into an OpenFlow
973 * error code? */
c228a364 974 dpif_execute(p->dpif, flow->in_port, odp_actions.actions,
064af421
BP
975 odp_actions.n_actions, packet);
976 return 0;
977}
978
979void
980ofproto_add_flow(struct ofproto *p,
981 const flow_t *flow, uint32_t wildcards, unsigned int priority,
982 const union ofp_action *actions, size_t n_actions,
983 int idle_timeout)
984{
985 struct rule *rule;
0193b2af 986 rule = rule_create(p, NULL, actions, n_actions,
ca069229
JP
987 idle_timeout >= 0 ? idle_timeout : 5 /* XXX */,
988 0, false);
064af421
BP
989 cls_rule_from_flow(&rule->cr, flow, wildcards, priority);
990 rule_insert(p, rule, NULL, 0);
991}
992
993void
994ofproto_delete_flow(struct ofproto *ofproto, const flow_t *flow,
995 uint32_t wildcards, unsigned int priority)
996{
997 struct rule *rule;
998
999 rule = rule_from_cls_rule(classifier_find_rule_exactly(&ofproto->cls,
1000 flow, wildcards,
1001 priority));
1002 if (rule) {
1003 rule_remove(ofproto, rule);
1004 }
1005}
1006
1007static void
1008destroy_rule(struct cls_rule *rule_, void *ofproto_)
1009{
1010 struct rule *rule = rule_from_cls_rule(rule_);
1011 struct ofproto *ofproto = ofproto_;
1012
1013 /* Mark the flow as not installed, even though it might really be
1014 * installed, so that rule_remove() doesn't bother trying to uninstall it.
1015 * There is no point in uninstalling it individually since we are about to
1016 * blow away all the flows with dpif_flow_flush(). */
1017 rule->installed = false;
1018
1019 rule_remove(ofproto, rule);
1020}
1021
1022void
1023ofproto_flush_flows(struct ofproto *ofproto)
1024{
1025 COVERAGE_INC(ofproto_flush);
1026 classifier_for_each(&ofproto->cls, CLS_INC_ALL, destroy_rule, ofproto);
c228a364 1027 dpif_flow_flush(ofproto->dpif);
064af421
BP
1028 if (ofproto->in_band) {
1029 in_band_flushed(ofproto->in_band);
1030 }
1031 if (ofproto->fail_open) {
1032 fail_open_flushed(ofproto->fail_open);
1033 }
1034}
1035\f
1036static void
1037reinit_ports(struct ofproto *p)
1038{
1039 struct svec devnames;
1040 struct ofport *ofport;
1041 unsigned int port_no;
1042 struct odp_port *odp_ports;
1043 size_t n_odp_ports;
1044 size_t i;
1045
1046 svec_init(&devnames);
1047 PORT_ARRAY_FOR_EACH (ofport, &p->ports, port_no) {
1048 svec_add (&devnames, (char *) ofport->opp.name);
1049 }
c228a364 1050 dpif_port_list(p->dpif, &odp_ports, &n_odp_ports);
064af421
BP
1051 for (i = 0; i < n_odp_ports; i++) {
1052 svec_add (&devnames, odp_ports[i].devname);
1053 }
1054 free(odp_ports);
1055
1056 svec_sort_unique(&devnames);
1057 for (i = 0; i < devnames.n; i++) {
1058 update_port(p, devnames.names[i]);
1059 }
1060 svec_destroy(&devnames);
1061}
1062
72b06300 1063static size_t
064af421
BP
1064refresh_port_group(struct ofproto *p, unsigned int group)
1065{
1066 uint16_t *ports;
1067 size_t n_ports;
1068 struct ofport *port;
1069 unsigned int port_no;
1070
1071 assert(group == DP_GROUP_ALL || group == DP_GROUP_FLOOD);
1072
1073 ports = xmalloc(port_array_count(&p->ports) * sizeof *ports);
1074 n_ports = 0;
1075 PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) {
1076 if (group == DP_GROUP_ALL || !(port->opp.config & OFPPC_NO_FLOOD)) {
1077 ports[n_ports++] = port_no;
1078 }
1079 }
c228a364 1080 dpif_port_group_set(p->dpif, group, ports, n_ports);
064af421 1081 free(ports);
72b06300
BP
1082
1083 return n_ports;
064af421
BP
1084}
1085
1086static void
1087refresh_port_groups(struct ofproto *p)
1088{
72b06300
BP
1089 size_t n_flood = refresh_port_group(p, DP_GROUP_FLOOD);
1090 size_t n_all = refresh_port_group(p, DP_GROUP_ALL);
1091 if (p->sflow) {
1092 ofproto_sflow_set_group_sizes(p->sflow, n_flood, n_all);
1093 }
064af421
BP
1094}
1095
1096static struct ofport *
1097make_ofport(const struct odp_port *odp_port)
1098{
149f577a 1099 struct netdev_options netdev_options;
064af421
BP
1100 enum netdev_flags flags;
1101 struct ofport *ofport;
1102 struct netdev *netdev;
1103 bool carrier;
1104 int error;
1105
149f577a
JG
1106 memset(&netdev_options, 0, sizeof netdev_options);
1107 netdev_options.name = odp_port->devname;
1108 netdev_options.ethertype = NETDEV_ETH_TYPE_NONE;
1109 netdev_options.may_open = true;
1110
1111 error = netdev_open(&netdev_options, &netdev);
064af421
BP
1112 if (error) {
1113 VLOG_WARN_RL(&rl, "ignoring port %s (%"PRIu16") because netdev %s "
1114 "cannot be opened (%s)",
1115 odp_port->devname, odp_port->port,
1116 odp_port->devname, strerror(error));
1117 return NULL;
1118 }
1119
1120 ofport = xmalloc(sizeof *ofport);
1121 ofport->netdev = netdev;
1122 ofport->opp.port_no = odp_port_to_ofp_port(odp_port->port);
80992a35 1123 netdev_get_etheraddr(netdev, ofport->opp.hw_addr);
064af421
BP
1124 memcpy(ofport->opp.name, odp_port->devname,
1125 MIN(sizeof ofport->opp.name, sizeof odp_port->devname));
1126 ofport->opp.name[sizeof ofport->opp.name - 1] = '\0';
1127
1128 netdev_get_flags(netdev, &flags);
1129 ofport->opp.config = flags & NETDEV_UP ? 0 : OFPPC_PORT_DOWN;
1130
1131 netdev_get_carrier(netdev, &carrier);
1132 ofport->opp.state = carrier ? 0 : OFPPS_LINK_DOWN;
1133
1134 netdev_get_features(netdev,
1135 &ofport->opp.curr, &ofport->opp.advertised,
1136 &ofport->opp.supported, &ofport->opp.peer);
1137 return ofport;
1138}
1139
1140static bool
1141ofport_conflicts(const struct ofproto *p, const struct odp_port *odp_port)
1142{
1143 if (port_array_get(&p->ports, odp_port->port)) {
1144 VLOG_WARN_RL(&rl, "ignoring duplicate port %"PRIu16" in datapath",
1145 odp_port->port);
1146 return true;
1147 } else if (shash_find(&p->port_by_name, odp_port->devname)) {
1148 VLOG_WARN_RL(&rl, "ignoring duplicate device %s in datapath",
1149 odp_port->devname);
1150 return true;
1151 } else {
1152 return false;
1153 }
1154}
1155
1156static int
1157ofport_equal(const struct ofport *a_, const struct ofport *b_)
1158{
1159 const struct ofp_phy_port *a = &a_->opp;
1160 const struct ofp_phy_port *b = &b_->opp;
1161
1162 BUILD_ASSERT_DECL(sizeof *a == 48); /* Detect ofp_phy_port changes. */
1163 return (a->port_no == b->port_no
1164 && !memcmp(a->hw_addr, b->hw_addr, sizeof a->hw_addr)
1165 && !strcmp((char *) a->name, (char *) b->name)
1166 && a->state == b->state
1167 && a->config == b->config
1168 && a->curr == b->curr
1169 && a->advertised == b->advertised
1170 && a->supported == b->supported
1171 && a->peer == b->peer);
1172}
1173
1174static void
1175send_port_status(struct ofproto *p, const struct ofport *ofport,
1176 uint8_t reason)
1177{
1178 /* XXX Should limit the number of queued port status change messages. */
1179 struct ofconn *ofconn;
1180 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
1181 struct ofp_port_status *ops;
1182 struct ofpbuf *b;
1183
1184 ops = make_openflow_xid(sizeof *ops, OFPT_PORT_STATUS, 0, &b);
1185 ops->reason = reason;
1186 ops->desc = ofport->opp;
1187 hton_ofp_phy_port(&ops->desc);
1188 queue_tx(b, ofconn, NULL);
1189 }
1190 if (p->ofhooks->port_changed_cb) {
1191 p->ofhooks->port_changed_cb(reason, &ofport->opp, p->aux);
1192 }
1193}
1194
1195static void
1196ofport_install(struct ofproto *p, struct ofport *ofport)
1197{
72b06300
BP
1198 uint16_t odp_port = ofp_port_to_odp_port(ofport->opp.port_no);
1199 const char *netdev_name = (const char *) ofport->opp.name;
1200
e9e28be3 1201 netdev_monitor_add(p->netdev_monitor, ofport->netdev);
72b06300
BP
1202 port_array_set(&p->ports, odp_port, ofport);
1203 shash_add(&p->port_by_name, netdev_name, ofport);
1204 if (p->sflow) {
1205 ofproto_sflow_add_port(p->sflow, odp_port, netdev_name);
1206 }
064af421
BP
1207}
1208
1209static void
1210ofport_remove(struct ofproto *p, struct ofport *ofport)
1211{
72b06300
BP
1212 uint16_t odp_port = ofp_port_to_odp_port(ofport->opp.port_no);
1213
e9e28be3 1214 netdev_monitor_remove(p->netdev_monitor, ofport->netdev);
72b06300 1215 port_array_set(&p->ports, odp_port, NULL);
064af421
BP
1216 shash_delete(&p->port_by_name,
1217 shash_find(&p->port_by_name, (char *) ofport->opp.name));
72b06300
BP
1218 if (p->sflow) {
1219 ofproto_sflow_del_port(p->sflow, odp_port);
1220 }
064af421
BP
1221}
1222
1223static void
1224ofport_free(struct ofport *ofport)
1225{
1226 if (ofport) {
1227 netdev_close(ofport->netdev);
1228 free(ofport);
1229 }
1230}
1231
1232static void
1233update_port(struct ofproto *p, const char *devname)
1234{
1235 struct odp_port odp_port;
c874dc6d
BP
1236 struct ofport *old_ofport;
1237 struct ofport *new_ofport;
064af421
BP
1238 int error;
1239
1240 COVERAGE_INC(ofproto_update_port);
c874dc6d
BP
1241
1242 /* Query the datapath for port information. */
c228a364 1243 error = dpif_port_query_by_name(p->dpif, devname, &odp_port);
064af421 1244
c874dc6d
BP
1245 /* Find the old ofport. */
1246 old_ofport = shash_find_data(&p->port_by_name, devname);
1247 if (!error) {
1248 if (!old_ofport) {
1249 /* There's no port named 'devname' but there might be a port with
1250 * the same port number. This could happen if a port is deleted
1251 * and then a new one added in its place very quickly, or if a port
1252 * is renamed. In the former case we want to send an OFPPR_DELETE
1253 * and an OFPPR_ADD, and in the latter case we want to send a
1254 * single OFPPR_MODIFY. We can distinguish the cases by comparing
1255 * the old port's ifindex against the new port, or perhaps less
1256 * reliably but more portably by comparing the old port's MAC
1257 * against the new port's MAC. However, this code isn't that smart
1258 * and always sends an OFPPR_MODIFY (XXX). */
1259 old_ofport = port_array_get(&p->ports, odp_port.port);
064af421 1260 }
c874dc6d 1261 } else if (error != ENOENT && error != ENODEV) {
064af421
BP
1262 VLOG_WARN_RL(&rl, "dpif_port_query_by_name returned unexpected error "
1263 "%s", strerror(error));
1264 return;
1265 }
c874dc6d
BP
1266
1267 /* Create a new ofport. */
1268 new_ofport = !error ? make_ofport(&odp_port) : NULL;
1269
1270 /* Eliminate a few pathological cases. */
1271 if (!old_ofport && !new_ofport) {
1272 return;
1273 } else if (old_ofport && new_ofport) {
1274 /* Most of the 'config' bits are OpenFlow soft state, but
1275 * OFPPC_PORT_DOWN is maintained the kernel. So transfer the OpenFlow
1276 * bits from old_ofport. (make_ofport() only sets OFPPC_PORT_DOWN and
1277 * leaves the other bits 0.) */
1278 new_ofport->opp.config |= old_ofport->opp.config & ~OFPPC_PORT_DOWN;
1279
1280 if (ofport_equal(old_ofport, new_ofport)) {
1281 /* False alarm--no change. */
1282 ofport_free(new_ofport);
1283 return;
1284 }
1285 }
1286
1287 /* Now deal with the normal cases. */
1288 if (old_ofport) {
1289 ofport_remove(p, old_ofport);
1290 }
1291 if (new_ofport) {
1292 ofport_install(p, new_ofport);
1293 }
1294 send_port_status(p, new_ofport ? new_ofport : old_ofport,
1295 (!old_ofport ? OFPPR_ADD
1296 : !new_ofport ? OFPPR_DELETE
1297 : OFPPR_MODIFY));
1298 ofport_free(old_ofport);
1299
1300 /* Update port groups. */
064af421
BP
1301 refresh_port_groups(p);
1302}
1303
1304static int
1305init_ports(struct ofproto *p)
1306{
1307 struct odp_port *ports;
1308 size_t n_ports;
1309 size_t i;
1310 int error;
1311
c228a364 1312 error = dpif_port_list(p->dpif, &ports, &n_ports);
064af421
BP
1313 if (error) {
1314 return error;
1315 }
1316
1317 for (i = 0; i < n_ports; i++) {
1318 const struct odp_port *odp_port = &ports[i];
1319 if (!ofport_conflicts(p, odp_port)) {
1320 struct ofport *ofport = make_ofport(odp_port);
1321 if (ofport) {
1322 ofport_install(p, ofport);
1323 }
1324 }
1325 }
1326 free(ports);
1327 refresh_port_groups(p);
1328 return 0;
1329}
1330\f
1331static struct ofconn *
1332ofconn_create(struct ofproto *p, struct rconn *rconn)
1333{
1334 struct ofconn *ofconn = xmalloc(sizeof *ofconn);
1335 list_push_back(&p->all_conns, &ofconn->node);
1336 ofconn->rconn = rconn;
1337 ofconn->pktbuf = NULL;
064af421
BP
1338 ofconn->miss_send_len = 0;
1339 ofconn->packet_in_counter = rconn_packet_counter_create ();
1340 ofconn->reply_counter = rconn_packet_counter_create ();
1341 return ofconn;
1342}
1343
1344static void
c475ae67 1345ofconn_destroy(struct ofconn *ofconn)
064af421 1346{
064af421
BP
1347 list_remove(&ofconn->node);
1348 rconn_destroy(ofconn->rconn);
1349 rconn_packet_counter_destroy(ofconn->packet_in_counter);
1350 rconn_packet_counter_destroy(ofconn->reply_counter);
1351 pktbuf_destroy(ofconn->pktbuf);
1352 free(ofconn);
1353}
1354
1355static void
1356ofconn_run(struct ofconn *ofconn, struct ofproto *p)
1357{
1358 int iteration;
1359
1360 rconn_run(ofconn->rconn);
1361
1362 if (rconn_packet_counter_read (ofconn->reply_counter) < OFCONN_REPLY_MAX) {
1363 /* Limit the number of iterations to prevent other tasks from
1364 * starving. */
1365 for (iteration = 0; iteration < 50; iteration++) {
1366 struct ofpbuf *of_msg = rconn_recv(ofconn->rconn);
1367 if (!of_msg) {
1368 break;
1369 }
7778bd15
BP
1370 if (p->fail_open) {
1371 fail_open_maybe_recover(p->fail_open);
1372 }
064af421
BP
1373 handle_openflow(ofconn, p, of_msg);
1374 ofpbuf_delete(of_msg);
1375 }
1376 }
1377
1378 if (ofconn != p->controller && !rconn_is_alive(ofconn->rconn)) {
c475ae67 1379 ofconn_destroy(ofconn);
064af421
BP
1380 }
1381}
1382
1383static void
1384ofconn_wait(struct ofconn *ofconn)
1385{
1386 rconn_run_wait(ofconn->rconn);
1387 if (rconn_packet_counter_read (ofconn->reply_counter) < OFCONN_REPLY_MAX) {
1388 rconn_recv_wait(ofconn->rconn);
1389 } else {
1390 COVERAGE_INC(ofproto_ofconn_stuck);
1391 }
1392}
1393\f
1394/* Caller is responsible for initializing the 'cr' member of the returned
1395 * rule. */
1396static struct rule *
0193b2af 1397rule_create(struct ofproto *ofproto, struct rule *super,
064af421 1398 const union ofp_action *actions, size_t n_actions,
ca069229
JP
1399 uint16_t idle_timeout, uint16_t hard_timeout,
1400 bool send_flow_removed)
064af421 1401{
ec6fde61 1402 struct rule *rule = xzalloc(sizeof *rule);
064af421
BP
1403 rule->idle_timeout = idle_timeout;
1404 rule->hard_timeout = hard_timeout;
1405 rule->used = rule->created = time_msec();
ca069229 1406 rule->send_flow_removed = send_flow_removed;
064af421
BP
1407 rule->super = super;
1408 if (super) {
1409 list_push_back(&super->list, &rule->list);
1410 } else {
1411 list_init(&rule->list);
1412 }
1413 rule->n_actions = n_actions;
1414 rule->actions = xmemdup(actions, n_actions * sizeof *actions);
0193b2af
JG
1415 netflow_flow_clear(&rule->nf_flow);
1416 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->created);
1417
064af421
BP
1418 return rule;
1419}
1420
1421static struct rule *
1422rule_from_cls_rule(const struct cls_rule *cls_rule)
1423{
1424 return cls_rule ? CONTAINER_OF(cls_rule, struct rule, cr) : NULL;
1425}
1426
1427static void
1428rule_free(struct rule *rule)
1429{
1430 free(rule->actions);
1431 free(rule->odp_actions);
1432 free(rule);
1433}
1434
1435/* Destroys 'rule'. If 'rule' is a subrule, also removes it from its
1436 * super-rule's list of subrules. If 'rule' is a super-rule, also iterates
1437 * through all of its subrules and revalidates them, destroying any that no
1438 * longer has a super-rule (which is probably all of them).
1439 *
1440 * Before calling this function, the caller must make have removed 'rule' from
1441 * the classifier. If 'rule' is an exact-match rule, the caller is also
1442 * responsible for ensuring that it has been uninstalled from the datapath. */
1443static void
1444rule_destroy(struct ofproto *ofproto, struct rule *rule)
1445{
1446 if (!rule->super) {
1447 struct rule *subrule, *next;
1448 LIST_FOR_EACH_SAFE (subrule, next, struct rule, list, &rule->list) {
1449 revalidate_rule(ofproto, subrule);
1450 }
1451 } else {
1452 list_remove(&rule->list);
1453 }
1454 rule_free(rule);
1455}
1456
1457static bool
1458rule_has_out_port(const struct rule *rule, uint16_t out_port)
1459{
1460 const union ofp_action *oa;
1461 struct actions_iterator i;
1462
1463 if (out_port == htons(OFPP_NONE)) {
1464 return true;
1465 }
1466 for (oa = actions_first(&i, rule->actions, rule->n_actions); oa;
1467 oa = actions_next(&i)) {
1468 if (oa->type == htons(OFPAT_OUTPUT) && oa->output.port == out_port) {
1469 return true;
1470 }
1471 }
1472 return false;
1473}
1474
1475/* Executes the actions indicated by 'rule' on 'packet', which is in flow
1476 * 'flow' and is considered to have arrived on ODP port 'in_port'.
1477 *
1478 * The flow that 'packet' actually contains does not need to actually match
1479 * 'rule'; the actions in 'rule' will be applied to it either way. Likewise,
1480 * the packet and byte counters for 'rule' will be credited for the packet sent
1481 * out whether or not the packet actually matches 'rule'.
1482 *
1483 * If 'rule' is an exact-match rule and 'flow' actually equals the rule's flow,
1484 * the caller must already have accurately composed ODP actions for it given
1485 * 'packet' using rule_make_actions(). If 'rule' is a wildcard rule, or if
1486 * 'rule' is an exact-match rule but 'flow' is not the rule's flow, then this
1487 * function will compose a set of ODP actions based on 'rule''s OpenFlow
1488 * actions and apply them to 'packet'. */
1489static void
1490rule_execute(struct ofproto *ofproto, struct rule *rule,
1491 struct ofpbuf *packet, const flow_t *flow)
1492{
1493 const union odp_action *actions;
1494 size_t n_actions;
1495 struct odp_actions a;
1496
1497 /* Grab or compose the ODP actions.
1498 *
1499 * The special case for an exact-match 'rule' where 'flow' is not the
1500 * rule's flow is important to avoid, e.g., sending a packet out its input
1501 * port simply because the ODP actions were composed for the wrong
1502 * scenario. */
1503 if (rule->cr.wc.wildcards || !flow_equal(flow, &rule->cr.flow)) {
1504 struct rule *super = rule->super ? rule->super : rule;
1505 if (xlate_actions(super->actions, super->n_actions, flow, ofproto,
6a07af36 1506 packet, &a, NULL, 0, NULL)) {
064af421
BP
1507 return;
1508 }
1509 actions = a.actions;
1510 n_actions = a.n_actions;
1511 } else {
1512 actions = rule->odp_actions;
1513 n_actions = rule->n_odp_actions;
1514 }
1515
1516 /* Execute the ODP actions. */
c228a364 1517 if (!dpif_execute(ofproto->dpif, flow->in_port,
064af421
BP
1518 actions, n_actions, packet)) {
1519 struct odp_flow_stats stats;
1520 flow_extract_stats(flow, packet, &stats);
0193b2af 1521 update_stats(ofproto, rule, &stats);
064af421 1522 rule->used = time_msec();
0193b2af 1523 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->used);
064af421
BP
1524 }
1525}
1526
1527static void
1528rule_insert(struct ofproto *p, struct rule *rule, struct ofpbuf *packet,
1529 uint16_t in_port)
1530{
1531 struct rule *displaced_rule;
1532
1533 /* Insert the rule in the classifier. */
1534 displaced_rule = rule_from_cls_rule(classifier_insert(&p->cls, &rule->cr));
1535 if (!rule->cr.wc.wildcards) {
1536 rule_make_actions(p, rule, packet);
1537 }
1538
1539 /* Send the packet and credit it to the rule. */
1540 if (packet) {
1541 flow_t flow;
1542 flow_extract(packet, in_port, &flow);
1543 rule_execute(p, rule, packet, &flow);
1544 }
1545
1546 /* Install the rule in the datapath only after sending the packet, to
1547 * avoid packet reordering. */
1548 if (rule->cr.wc.wildcards) {
1549 COVERAGE_INC(ofproto_add_wc_flow);
1550 p->need_revalidate = true;
1551 } else {
1552 rule_install(p, rule, displaced_rule);
1553 }
1554
1555 /* Free the rule that was displaced, if any. */
1556 if (displaced_rule) {
1557 rule_destroy(p, displaced_rule);
1558 }
1559}
1560
1561static struct rule *
1562rule_create_subrule(struct ofproto *ofproto, struct rule *rule,
1563 const flow_t *flow)
1564{
0193b2af 1565 struct rule *subrule = rule_create(ofproto, rule, NULL, 0,
ca069229
JP
1566 rule->idle_timeout, rule->hard_timeout,
1567 false);
064af421
BP
1568 COVERAGE_INC(ofproto_subrule_create);
1569 cls_rule_from_flow(&subrule->cr, flow, 0,
1570 (rule->cr.priority <= UINT16_MAX ? UINT16_MAX
1571 : rule->cr.priority));
1572 classifier_insert_exact(&ofproto->cls, &subrule->cr);
1573
1574 return subrule;
1575}
1576
1577static void
1578rule_remove(struct ofproto *ofproto, struct rule *rule)
1579{
1580 if (rule->cr.wc.wildcards) {
1581 COVERAGE_INC(ofproto_del_wc_flow);
1582 ofproto->need_revalidate = true;
1583 } else {
1584 rule_uninstall(ofproto, rule);
1585 }
1586 classifier_remove(&ofproto->cls, &rule->cr);
1587 rule_destroy(ofproto, rule);
1588}
1589
1590/* Returns true if the actions changed, false otherwise. */
1591static bool
1592rule_make_actions(struct ofproto *p, struct rule *rule,
1593 const struct ofpbuf *packet)
1594{
1595 const struct rule *super;
1596 struct odp_actions a;
1597 size_t actions_len;
1598
1599 assert(!rule->cr.wc.wildcards);
1600
1601 super = rule->super ? rule->super : rule;
1602 rule->tags = 0;
1603 xlate_actions(super->actions, super->n_actions, &rule->cr.flow, p,
6a07af36 1604 packet, &a, &rule->tags, &rule->may_install,
0193b2af 1605 &rule->nf_flow.output_iface);
064af421
BP
1606
1607 actions_len = a.n_actions * sizeof *a.actions;
1608 if (rule->n_odp_actions != a.n_actions
1609 || memcmp(rule->odp_actions, a.actions, actions_len)) {
1610 COVERAGE_INC(ofproto_odp_unchanged);
1611 free(rule->odp_actions);
1612 rule->n_odp_actions = a.n_actions;
1613 rule->odp_actions = xmemdup(a.actions, actions_len);
1614 return true;
1615 } else {
1616 return false;
1617 }
1618}
1619
1620static int
1621do_put_flow(struct ofproto *ofproto, struct rule *rule, int flags,
1622 struct odp_flow_put *put)
1623{
1624 memset(&put->flow.stats, 0, sizeof put->flow.stats);
1625 put->flow.key = rule->cr.flow;
1626 put->flow.actions = rule->odp_actions;
1627 put->flow.n_actions = rule->n_odp_actions;
1628 put->flags = flags;
c228a364 1629 return dpif_flow_put(ofproto->dpif, put);
064af421
BP
1630}
1631
1632static void
1633rule_install(struct ofproto *p, struct rule *rule, struct rule *displaced_rule)
1634{
1635 assert(!rule->cr.wc.wildcards);
1636
1637 if (rule->may_install) {
1638 struct odp_flow_put put;
1639 if (!do_put_flow(p, rule,
1640 ODPPF_CREATE | ODPPF_MODIFY | ODPPF_ZERO_STATS,
1641 &put)) {
1642 rule->installed = true;
1643 if (displaced_rule) {
14986b31 1644 update_stats(p, displaced_rule, &put.flow.stats);
064af421
BP
1645 rule_post_uninstall(p, displaced_rule);
1646 }
1647 }
1648 } else if (displaced_rule) {
1649 rule_uninstall(p, displaced_rule);
1650 }
1651}
1652
1653static void
1654rule_reinstall(struct ofproto *ofproto, struct rule *rule)
1655{
1656 if (rule->installed) {
1657 struct odp_flow_put put;
1658 COVERAGE_INC(ofproto_dp_missed);
1659 do_put_flow(ofproto, rule, ODPPF_CREATE | ODPPF_MODIFY, &put);
1660 } else {
1661 rule_install(ofproto, rule, NULL);
1662 }
1663}
1664
1665static void
1666rule_update_actions(struct ofproto *ofproto, struct rule *rule)
1667{
42c3641c
JG
1668 bool actions_changed;
1669 uint16_t new_out_iface, old_out_iface;
1670
1671 old_out_iface = rule->nf_flow.output_iface;
1672 actions_changed = rule_make_actions(ofproto, rule, NULL);
1673
064af421
BP
1674 if (rule->may_install) {
1675 if (rule->installed) {
1676 if (actions_changed) {
064af421 1677 struct odp_flow_put put;
42c3641c
JG
1678 do_put_flow(ofproto, rule, ODPPF_CREATE | ODPPF_MODIFY
1679 | ODPPF_ZERO_STATS, &put);
1680 update_stats(ofproto, rule, &put.flow.stats);
1681
1682 /* Temporarily set the old output iface so that NetFlow
1683 * messages have the correct output interface for the old
1684 * stats. */
1685 new_out_iface = rule->nf_flow.output_iface;
1686 rule->nf_flow.output_iface = old_out_iface;
1687 rule_post_uninstall(ofproto, rule);
1688 rule->nf_flow.output_iface = new_out_iface;
064af421
BP
1689 }
1690 } else {
1691 rule_install(ofproto, rule, NULL);
1692 }
1693 } else {
1694 rule_uninstall(ofproto, rule);
1695 }
1696}
1697
1698static void
1699rule_account(struct ofproto *ofproto, struct rule *rule, uint64_t extra_bytes)
1700{
1701 uint64_t total_bytes = rule->byte_count + extra_bytes;
1702
1703 if (ofproto->ofhooks->account_flow_cb
1704 && total_bytes > rule->accounted_bytes)
1705 {
1706 ofproto->ofhooks->account_flow_cb(
1707 &rule->cr.flow, rule->odp_actions, rule->n_odp_actions,
1708 total_bytes - rule->accounted_bytes, ofproto->aux);
1709 rule->accounted_bytes = total_bytes;
1710 }
1711}
1712
1713static void
1714rule_uninstall(struct ofproto *p, struct rule *rule)
1715{
1716 assert(!rule->cr.wc.wildcards);
1717 if (rule->installed) {
1718 struct odp_flow odp_flow;
1719
1720 odp_flow.key = rule->cr.flow;
1721 odp_flow.actions = NULL;
1722 odp_flow.n_actions = 0;
c228a364 1723 if (!dpif_flow_del(p->dpif, &odp_flow)) {
0193b2af 1724 update_stats(p, rule, &odp_flow.stats);
064af421
BP
1725 }
1726 rule->installed = false;
1727
1728 rule_post_uninstall(p, rule);
1729 }
1730}
1731
0193b2af
JG
1732static bool
1733is_controller_rule(struct rule *rule)
1734{
1735 /* If the only action is send to the controller then don't report
1736 * NetFlow expiration messages since it is just part of the control
1737 * logic for the network and not real traffic. */
1738
1739 if (rule && rule->super) {
1740 struct rule *super = rule->super;
1741
1742 return super->n_actions == 1 &&
1743 super->actions[0].type == htons(OFPAT_OUTPUT) &&
1744 super->actions[0].output.port == htons(OFPP_CONTROLLER);
1745 }
1746
1747 return false;
1748}
1749
064af421
BP
1750static void
1751rule_post_uninstall(struct ofproto *ofproto, struct rule *rule)
1752{
1753 struct rule *super = rule->super;
1754
1755 rule_account(ofproto, rule, 0);
6a07af36 1756
0193b2af 1757 if (ofproto->netflow && !is_controller_rule(rule)) {
064af421
BP
1758 struct ofexpired expired;
1759 expired.flow = rule->cr.flow;
1760 expired.packet_count = rule->packet_count;
1761 expired.byte_count = rule->byte_count;
1762 expired.used = rule->used;
0193b2af 1763 netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
064af421
BP
1764 }
1765 if (super) {
1766 super->packet_count += rule->packet_count;
1767 super->byte_count += rule->byte_count;
064af421 1768
0c0afbec
JG
1769 /* Reset counters to prevent double counting if the rule ever gets
1770 * reinstalled. */
1771 rule->packet_count = 0;
1772 rule->byte_count = 0;
1773 rule->accounted_bytes = 0;
0193b2af
JG
1774
1775 netflow_flow_clear(&rule->nf_flow);
0c0afbec 1776 }
064af421
BP
1777}
1778\f
1779static void
1780queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn,
1781 struct rconn_packet_counter *counter)
1782{
1783 update_openflow_length(msg);
1784 if (rconn_send(ofconn->rconn, msg, counter)) {
1785 ofpbuf_delete(msg);
1786 }
1787}
1788
1789static void
1790send_error(const struct ofconn *ofconn, const struct ofp_header *oh,
1791 int error, const void *data, size_t len)
1792{
1793 struct ofpbuf *buf;
1794 struct ofp_error_msg *oem;
1795
1796 if (!(error >> 16)) {
1797 VLOG_WARN_RL(&rl, "not sending bad error code %d to controller",
1798 error);
1799 return;
1800 }
1801
1802 COVERAGE_INC(ofproto_error);
1803 oem = make_openflow_xid(len + sizeof *oem, OFPT_ERROR,
1804 oh ? oh->xid : 0, &buf);
1805 oem->type = htons((unsigned int) error >> 16);
1806 oem->code = htons(error & 0xffff);
1807 memcpy(oem->data, data, len);
1808 queue_tx(buf, ofconn, ofconn->reply_counter);
1809}
1810
1811static void
1812send_error_oh(const struct ofconn *ofconn, const struct ofp_header *oh,
1813 int error)
1814{
1815 size_t oh_length = ntohs(oh->length);
1816 send_error(ofconn, oh, error, oh, MIN(oh_length, 64));
1817}
1818
1819static void
1820hton_ofp_phy_port(struct ofp_phy_port *opp)
1821{
1822 opp->port_no = htons(opp->port_no);
1823 opp->config = htonl(opp->config);
1824 opp->state = htonl(opp->state);
1825 opp->curr = htonl(opp->curr);
1826 opp->advertised = htonl(opp->advertised);
1827 opp->supported = htonl(opp->supported);
1828 opp->peer = htonl(opp->peer);
1829}
1830
1831static int
1832handle_echo_request(struct ofconn *ofconn, struct ofp_header *oh)
1833{
1834 struct ofp_header *rq = oh;
1835 queue_tx(make_echo_reply(rq), ofconn, ofconn->reply_counter);
1836 return 0;
1837}
1838
1839static int
1840handle_features_request(struct ofproto *p, struct ofconn *ofconn,
1841 struct ofp_header *oh)
1842{
1843 struct ofp_switch_features *osf;
1844 struct ofpbuf *buf;
1845 unsigned int port_no;
1846 struct ofport *port;
1847
1848 osf = make_openflow_xid(sizeof *osf, OFPT_FEATURES_REPLY, oh->xid, &buf);
1849 osf->datapath_id = htonll(p->datapath_id);
1850 osf->n_buffers = htonl(pktbuf_capacity());
1851 osf->n_tables = 2;
1852 osf->capabilities = htonl(OFPC_FLOW_STATS | OFPC_TABLE_STATS |
1853 OFPC_PORT_STATS | OFPC_MULTI_PHY_TX);
1854 osf->actions = htonl((1u << OFPAT_OUTPUT) |
1855 (1u << OFPAT_SET_VLAN_VID) |
1856 (1u << OFPAT_SET_VLAN_PCP) |
1857 (1u << OFPAT_STRIP_VLAN) |
1858 (1u << OFPAT_SET_DL_SRC) |
1859 (1u << OFPAT_SET_DL_DST) |
1860 (1u << OFPAT_SET_NW_SRC) |
1861 (1u << OFPAT_SET_NW_DST) |
959a2ecd 1862 (1u << OFPAT_SET_NW_TOS) |
064af421
BP
1863 (1u << OFPAT_SET_TP_SRC) |
1864 (1u << OFPAT_SET_TP_DST));
1865
1866 PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) {
1867 hton_ofp_phy_port(ofpbuf_put(buf, &port->opp, sizeof port->opp));
1868 }
1869
1870 queue_tx(buf, ofconn, ofconn->reply_counter);
1871 return 0;
1872}
1873
1874static int
1875handle_get_config_request(struct ofproto *p, struct ofconn *ofconn,
1876 struct ofp_header *oh)
1877{
1878 struct ofpbuf *buf;
1879 struct ofp_switch_config *osc;
1880 uint16_t flags;
1881 bool drop_frags;
1882
1883 /* Figure out flags. */
c228a364 1884 dpif_get_drop_frags(p->dpif, &drop_frags);
064af421 1885 flags = drop_frags ? OFPC_FRAG_DROP : OFPC_FRAG_NORMAL;
064af421
BP
1886
1887 /* Send reply. */
1888 osc = make_openflow_xid(sizeof *osc, OFPT_GET_CONFIG_REPLY, oh->xid, &buf);
1889 osc->flags = htons(flags);
1890 osc->miss_send_len = htons(ofconn->miss_send_len);
1891 queue_tx(buf, ofconn, ofconn->reply_counter);
1892
1893 return 0;
1894}
1895
1896static int
1897handle_set_config(struct ofproto *p, struct ofconn *ofconn,
1898 struct ofp_switch_config *osc)
1899{
1900 uint16_t flags;
1901 int error;
1902
1903 error = check_ofp_message(&osc->header, OFPT_SET_CONFIG, sizeof *osc);
1904 if (error) {
1905 return error;
1906 }
1907 flags = ntohs(osc->flags);
1908
064af421
BP
1909 if (ofconn == p->controller) {
1910 switch (flags & OFPC_FRAG_MASK) {
1911 case OFPC_FRAG_NORMAL:
c228a364 1912 dpif_set_drop_frags(p->dpif, false);
064af421
BP
1913 break;
1914 case OFPC_FRAG_DROP:
c228a364 1915 dpif_set_drop_frags(p->dpif, true);
064af421
BP
1916 break;
1917 default:
1918 VLOG_WARN_RL(&rl, "requested bad fragment mode (flags=%"PRIx16")",
1919 osc->flags);
1920 break;
1921 }
1922 }
1923
1924 if ((ntohs(osc->miss_send_len) != 0) != (ofconn->miss_send_len != 0)) {
1925 if (ntohs(osc->miss_send_len) != 0) {
1926 ofconn->pktbuf = pktbuf_create();
1927 } else {
1928 pktbuf_destroy(ofconn->pktbuf);
1929 }
1930 }
1931
1932 ofconn->miss_send_len = ntohs(osc->miss_send_len);
1933
1934 return 0;
1935}
1936
1937static void
6a07af36
JG
1938add_output_group_action(struct odp_actions *actions, uint16_t group,
1939 uint16_t *nf_output_iface)
064af421
BP
1940{
1941 odp_actions_add(actions, ODPAT_OUTPUT_GROUP)->output_group.group = group;
6a07af36
JG
1942
1943 if (group == DP_GROUP_ALL || group == DP_GROUP_FLOOD) {
1944 *nf_output_iface = NF_OUT_FLOOD;
1945 }
064af421
BP
1946}
1947
1948static void
1949add_controller_action(struct odp_actions *actions,
1950 const struct ofp_action_output *oao)
1951{
1952 union odp_action *a = odp_actions_add(actions, ODPAT_CONTROLLER);
1953 a->controller.arg = oao->max_len ? ntohs(oao->max_len) : UINT32_MAX;
1954}
1955
1956struct action_xlate_ctx {
1957 /* Input. */
1958 const flow_t *flow; /* Flow to which these actions correspond. */
1959 int recurse; /* Recursion level, via xlate_table_action. */
1960 struct ofproto *ofproto;
1961 const struct ofpbuf *packet; /* The packet corresponding to 'flow', or a
1962 * null pointer if we are revalidating
1963 * without a packet to refer to. */
1964
1965 /* Output. */
1966 struct odp_actions *out; /* Datapath actions. */
1967 tag_type *tags; /* Tags associated with OFPP_NORMAL actions. */
d6fbec6d 1968 bool may_set_up_flow; /* True ordinarily; false if the actions must
064af421 1969 * be reassessed for every packet. */
6a07af36 1970 uint16_t nf_output_iface; /* Output interface index for NetFlow. */
064af421
BP
1971};
1972
1973static void do_xlate_actions(const union ofp_action *in, size_t n_in,
1974 struct action_xlate_ctx *ctx);
1975
1976static void
1977add_output_action(struct action_xlate_ctx *ctx, uint16_t port)
1978{
1979 const struct ofport *ofport = port_array_get(&ctx->ofproto->ports, port);
6cfaf517
BP
1980
1981 if (ofport) {
1982 if (ofport->opp.config & OFPPC_NO_FWD) {
1983 /* Forwarding disabled on port. */
1984 return;
1985 }
1986 } else {
1987 /*
1988 * We don't have an ofport record for this port, but it doesn't hurt to
1989 * allow forwarding to it anyhow. Maybe such a port will appear later
1990 * and we're pre-populating the flow table.
1991 */
064af421 1992 }
6cfaf517
BP
1993
1994 odp_actions_add(ctx->out, ODPAT_OUTPUT)->output.port = port;
6a07af36 1995 ctx->nf_output_iface = port;
064af421
BP
1996}
1997
1998static struct rule *
1999lookup_valid_rule(struct ofproto *ofproto, const flow_t *flow)
2000{
2001 struct rule *rule;
2002 rule = rule_from_cls_rule(classifier_lookup(&ofproto->cls, flow));
2003
2004 /* The rule we found might not be valid, since we could be in need of
2005 * revalidation. If it is not valid, don't return it. */
2006 if (rule
2007 && rule->super
2008 && ofproto->need_revalidate
2009 && !revalidate_rule(ofproto, rule)) {
2010 COVERAGE_INC(ofproto_invalidated);
2011 return NULL;
2012 }
2013
2014 return rule;
2015}
2016
2017static void
2018xlate_table_action(struct action_xlate_ctx *ctx, uint16_t in_port)
2019{
2020 if (!ctx->recurse) {
2021 struct rule *rule;
2022 flow_t flow;
2023
2024 flow = *ctx->flow;
2025 flow.in_port = in_port;
2026
2027 rule = lookup_valid_rule(ctx->ofproto, &flow);
2028 if (rule) {
2029 if (rule->super) {
2030 rule = rule->super;
2031 }
2032
2033 ctx->recurse++;
2034 do_xlate_actions(rule->actions, rule->n_actions, ctx);
2035 ctx->recurse--;
2036 }
2037 }
2038}
2039
2040static void
2041xlate_output_action(struct action_xlate_ctx *ctx,
2042 const struct ofp_action_output *oao)
2043{
2044 uint16_t odp_port;
6a07af36
JG
2045 uint16_t prev_nf_output_iface = ctx->nf_output_iface;
2046
2047 ctx->nf_output_iface = NF_OUT_DROP;
064af421
BP
2048
2049 switch (ntohs(oao->port)) {
2050 case OFPP_IN_PORT:
2051 add_output_action(ctx, ctx->flow->in_port);
2052 break;
2053 case OFPP_TABLE:
2054 xlate_table_action(ctx, ctx->flow->in_port);
2055 break;
2056 case OFPP_NORMAL:
2057 if (!ctx->ofproto->ofhooks->normal_cb(ctx->flow, ctx->packet,
2058 ctx->out, ctx->tags,
6a07af36 2059 &ctx->nf_output_iface,
064af421
BP
2060 ctx->ofproto->aux)) {
2061 COVERAGE_INC(ofproto_uninstallable);
d6fbec6d 2062 ctx->may_set_up_flow = false;
064af421
BP
2063 }
2064 break;
2065 case OFPP_FLOOD:
6a07af36
JG
2066 add_output_group_action(ctx->out, DP_GROUP_FLOOD,
2067 &ctx->nf_output_iface);
064af421
BP
2068 break;
2069 case OFPP_ALL:
6a07af36 2070 add_output_group_action(ctx->out, DP_GROUP_ALL, &ctx->nf_output_iface);
064af421
BP
2071 break;
2072 case OFPP_CONTROLLER:
2073 add_controller_action(ctx->out, oao);
2074 break;
2075 case OFPP_LOCAL:
2076 add_output_action(ctx, ODPP_LOCAL);
2077 break;
2078 default:
2079 odp_port = ofp_port_to_odp_port(ntohs(oao->port));
2080 if (odp_port != ctx->flow->in_port) {
2081 add_output_action(ctx, odp_port);
2082 }
2083 break;
2084 }
6a07af36
JG
2085
2086 if (prev_nf_output_iface == NF_OUT_FLOOD) {
2087 ctx->nf_output_iface = NF_OUT_FLOOD;
2088 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
2089 ctx->nf_output_iface = prev_nf_output_iface;
2090 } else if (prev_nf_output_iface != NF_OUT_DROP &&
2091 ctx->nf_output_iface != NF_OUT_FLOOD) {
2092 ctx->nf_output_iface = NF_OUT_MULTI;
2093 }
064af421
BP
2094}
2095
2096static void
2097xlate_nicira_action(struct action_xlate_ctx *ctx,
2098 const struct nx_action_header *nah)
2099{
2100 const struct nx_action_resubmit *nar;
2101 int subtype = ntohs(nah->subtype);
2102
2103 assert(nah->vendor == htonl(NX_VENDOR_ID));
2104 switch (subtype) {
2105 case NXAST_RESUBMIT:
2106 nar = (const struct nx_action_resubmit *) nah;
2107 xlate_table_action(ctx, ofp_port_to_odp_port(ntohs(nar->in_port)));
2108 break;
2109
2110 default:
2111 VLOG_DBG_RL(&rl, "unknown Nicira action type %"PRIu16, subtype);
2112 break;
2113 }
2114}
2115
2116static void
2117do_xlate_actions(const union ofp_action *in, size_t n_in,
2118 struct action_xlate_ctx *ctx)
2119{
2120 struct actions_iterator iter;
2121 const union ofp_action *ia;
2122 const struct ofport *port;
2123
2124 port = port_array_get(&ctx->ofproto->ports, ctx->flow->in_port);
2125 if (port && port->opp.config & (OFPPC_NO_RECV | OFPPC_NO_RECV_STP) &&
2126 port->opp.config & (eth_addr_equals(ctx->flow->dl_dst, stp_eth_addr)
2127 ? OFPPC_NO_RECV_STP : OFPPC_NO_RECV)) {
2128 /* Drop this flow. */
2129 return;
2130 }
2131
2132 for (ia = actions_first(&iter, in, n_in); ia; ia = actions_next(&iter)) {
2133 uint16_t type = ntohs(ia->type);
2134 union odp_action *oa;
2135
2136 switch (type) {
2137 case OFPAT_OUTPUT:
2138 xlate_output_action(ctx, &ia->output);
2139 break;
2140
2141 case OFPAT_SET_VLAN_VID:
2142 oa = odp_actions_add(ctx->out, ODPAT_SET_VLAN_VID);
2143 oa->vlan_vid.vlan_vid = ia->vlan_vid.vlan_vid;
2144 break;
2145
2146 case OFPAT_SET_VLAN_PCP:
2147 oa = odp_actions_add(ctx->out, ODPAT_SET_VLAN_PCP);
2148 oa->vlan_pcp.vlan_pcp = ia->vlan_pcp.vlan_pcp;
2149 break;
2150
2151 case OFPAT_STRIP_VLAN:
2152 odp_actions_add(ctx->out, ODPAT_STRIP_VLAN);
2153 break;
2154
2155 case OFPAT_SET_DL_SRC:
2156 oa = odp_actions_add(ctx->out, ODPAT_SET_DL_SRC);
2157 memcpy(oa->dl_addr.dl_addr,
2158 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
2159 break;
2160
2161 case OFPAT_SET_DL_DST:
2162 oa = odp_actions_add(ctx->out, ODPAT_SET_DL_DST);
2163 memcpy(oa->dl_addr.dl_addr,
2164 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
2165 break;
2166
2167 case OFPAT_SET_NW_SRC:
2168 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_SRC);
2169 oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
2170 break;
2171
2d70a31a
JP
2172 case OFPAT_SET_NW_DST:
2173 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_DST);
2174 oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
959a2ecd
JP
2175
2176 case OFPAT_SET_NW_TOS:
2177 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_TOS);
2178 oa->nw_tos.nw_tos = ia->nw_tos.nw_tos;
2d70a31a
JP
2179 break;
2180
064af421
BP
2181 case OFPAT_SET_TP_SRC:
2182 oa = odp_actions_add(ctx->out, ODPAT_SET_TP_SRC);
2183 oa->tp_port.tp_port = ia->tp_port.tp_port;
2184 break;
2185
2d70a31a
JP
2186 case OFPAT_SET_TP_DST:
2187 oa = odp_actions_add(ctx->out, ODPAT_SET_TP_DST);
2188 oa->tp_port.tp_port = ia->tp_port.tp_port;
2189 break;
2190
064af421
BP
2191 case OFPAT_VENDOR:
2192 xlate_nicira_action(ctx, (const struct nx_action_header *) ia);
2193 break;
2194
2195 default:
2196 VLOG_DBG_RL(&rl, "unknown action type %"PRIu16, type);
2197 break;
2198 }
2199 }
2200}
2201
2202static int
2203xlate_actions(const union ofp_action *in, size_t n_in,
2204 const flow_t *flow, struct ofproto *ofproto,
2205 const struct ofpbuf *packet,
6a07af36
JG
2206 struct odp_actions *out, tag_type *tags, bool *may_set_up_flow,
2207 uint16_t *nf_output_iface)
064af421
BP
2208{
2209 tag_type no_tags = 0;
2210 struct action_xlate_ctx ctx;
2211 COVERAGE_INC(ofproto_ofp2odp);
2212 odp_actions_init(out);
2213 ctx.flow = flow;
2214 ctx.recurse = 0;
2215 ctx.ofproto = ofproto;
2216 ctx.packet = packet;
2217 ctx.out = out;
2218 ctx.tags = tags ? tags : &no_tags;
d6fbec6d 2219 ctx.may_set_up_flow = true;
6a07af36 2220 ctx.nf_output_iface = NF_OUT_DROP;
064af421 2221 do_xlate_actions(in, n_in, &ctx);
0ad9b732 2222
d6fbec6d 2223 /* Check with in-band control to see if we're allowed to set up this
0ad9b732
JP
2224 * flow. */
2225 if (!in_band_rule_check(ofproto->in_band, flow, out)) {
d6fbec6d 2226 ctx.may_set_up_flow = false;
0ad9b732
JP
2227 }
2228
d6fbec6d
BP
2229 if (may_set_up_flow) {
2230 *may_set_up_flow = ctx.may_set_up_flow;
064af421 2231 }
6a07af36
JG
2232 if (nf_output_iface) {
2233 *nf_output_iface = ctx.nf_output_iface;
064af421
BP
2234 }
2235 if (odp_actions_overflow(out)) {
2236 odp_actions_init(out);
2237 return ofp_mkerr(OFPET_BAD_ACTION, OFPBAC_TOO_MANY);
2238 }
2239 return 0;
2240}
2241
2242static int
2243handle_packet_out(struct ofproto *p, struct ofconn *ofconn,
2244 struct ofp_header *oh)
2245{
2246 struct ofp_packet_out *opo;
2247 struct ofpbuf payload, *buffer;
2248 struct odp_actions actions;
2249 int n_actions;
2250 uint16_t in_port;
2251 flow_t flow;
2252 int error;
2253
2254 error = check_ofp_packet_out(oh, &payload, &n_actions, p->max_ports);
2255 if (error) {
2256 return error;
2257 }
2258 opo = (struct ofp_packet_out *) oh;
2259
2260 COVERAGE_INC(ofproto_packet_out);
2261 if (opo->buffer_id != htonl(UINT32_MAX)) {
2262 error = pktbuf_retrieve(ofconn->pktbuf, ntohl(opo->buffer_id),
2263 &buffer, &in_port);
7778bd15 2264 if (error || !buffer) {
064af421
BP
2265 return error;
2266 }
2267 payload = *buffer;
2268 } else {
2269 buffer = NULL;
2270 }
2271
2272 flow_extract(&payload, ofp_port_to_odp_port(ntohs(opo->in_port)), &flow);
2273 error = xlate_actions((const union ofp_action *) opo->actions, n_actions,
6a07af36 2274 &flow, p, &payload, &actions, NULL, NULL, NULL);
064af421
BP
2275 if (error) {
2276 return error;
2277 }
2278
c228a364 2279 dpif_execute(p->dpif, flow.in_port, actions.actions, actions.n_actions,
064af421
BP
2280 &payload);
2281 ofpbuf_delete(buffer);
2282
2283 return 0;
2284}
2285
2286static void
2287update_port_config(struct ofproto *p, struct ofport *port,
2288 uint32_t config, uint32_t mask)
2289{
2290 mask &= config ^ port->opp.config;
2291 if (mask & OFPPC_PORT_DOWN) {
2292 if (config & OFPPC_PORT_DOWN) {
2293 netdev_turn_flags_off(port->netdev, NETDEV_UP, true);
2294 } else {
2295 netdev_turn_flags_on(port->netdev, NETDEV_UP, true);
2296 }
2297 }
2298#define REVALIDATE_BITS (OFPPC_NO_RECV | OFPPC_NO_RECV_STP | OFPPC_NO_FWD)
2299 if (mask & REVALIDATE_BITS) {
2300 COVERAGE_INC(ofproto_costly_flags);
2301 port->opp.config ^= mask & REVALIDATE_BITS;
2302 p->need_revalidate = true;
2303 }
2304#undef REVALIDATE_BITS
2305 if (mask & OFPPC_NO_FLOOD) {
2306 port->opp.config ^= OFPPC_NO_FLOOD;
72b06300 2307 refresh_port_groups(p);
064af421
BP
2308 }
2309 if (mask & OFPPC_NO_PACKET_IN) {
2310 port->opp.config ^= OFPPC_NO_PACKET_IN;
2311 }
2312}
2313
2314static int
2315handle_port_mod(struct ofproto *p, struct ofp_header *oh)
2316{
2317 const struct ofp_port_mod *opm;
2318 struct ofport *port;
2319 int error;
2320
2321 error = check_ofp_message(oh, OFPT_PORT_MOD, sizeof *opm);
2322 if (error) {
2323 return error;
2324 }
2325 opm = (struct ofp_port_mod *) oh;
2326
2327 port = port_array_get(&p->ports,
2328 ofp_port_to_odp_port(ntohs(opm->port_no)));
2329 if (!port) {
2330 return ofp_mkerr(OFPET_PORT_MOD_FAILED, OFPPMFC_BAD_PORT);
2331 } else if (memcmp(port->opp.hw_addr, opm->hw_addr, OFP_ETH_ALEN)) {
2332 return ofp_mkerr(OFPET_PORT_MOD_FAILED, OFPPMFC_BAD_HW_ADDR);
2333 } else {
2334 update_port_config(p, port, ntohl(opm->config), ntohl(opm->mask));
2335 if (opm->advertise) {
2336 netdev_set_advertisements(port->netdev, ntohl(opm->advertise));
2337 }
2338 }
2339 return 0;
2340}
2341
2342static struct ofpbuf *
2343make_stats_reply(uint32_t xid, uint16_t type, size_t body_len)
2344{
2345 struct ofp_stats_reply *osr;
2346 struct ofpbuf *msg;
2347
2348 msg = ofpbuf_new(MIN(sizeof *osr + body_len, UINT16_MAX));
2349 osr = put_openflow_xid(sizeof *osr, OFPT_STATS_REPLY, xid, msg);
2350 osr->type = type;
2351 osr->flags = htons(0);
2352 return msg;
2353}
2354
2355static struct ofpbuf *
2356start_stats_reply(const struct ofp_stats_request *request, size_t body_len)
2357{
2358 return make_stats_reply(request->header.xid, request->type, body_len);
2359}
2360
2361static void *
2362append_stats_reply(size_t nbytes, struct ofconn *ofconn, struct ofpbuf **msgp)
2363{
2364 struct ofpbuf *msg = *msgp;
2365 assert(nbytes <= UINT16_MAX - sizeof(struct ofp_stats_reply));
2366 if (nbytes + msg->size > UINT16_MAX) {
2367 struct ofp_stats_reply *reply = msg->data;
2368 reply->flags = htons(OFPSF_REPLY_MORE);
2369 *msgp = make_stats_reply(reply->header.xid, reply->type, nbytes);
2370 queue_tx(msg, ofconn, ofconn->reply_counter);
2371 }
2372 return ofpbuf_put_uninit(*msgp, nbytes);
2373}
2374
2375static int
2376handle_desc_stats_request(struct ofproto *p, struct ofconn *ofconn,
2377 struct ofp_stats_request *request)
2378{
2379 struct ofp_desc_stats *ods;
2380 struct ofpbuf *msg;
2381
2382 msg = start_stats_reply(request, sizeof *ods);
2383 ods = append_stats_reply(sizeof *ods, ofconn, &msg);
2384 strncpy(ods->mfr_desc, p->manufacturer, sizeof ods->mfr_desc);
2385 strncpy(ods->hw_desc, p->hardware, sizeof ods->hw_desc);
2386 strncpy(ods->sw_desc, p->software, sizeof ods->sw_desc);
2387 strncpy(ods->serial_num, p->serial, sizeof ods->serial_num);
2388 queue_tx(msg, ofconn, ofconn->reply_counter);
2389
2390 return 0;
2391}
2392
2393static void
2394count_subrules(struct cls_rule *cls_rule, void *n_subrules_)
2395{
2396 struct rule *rule = rule_from_cls_rule(cls_rule);
2397 int *n_subrules = n_subrules_;
2398
2399 if (rule->super) {
2400 (*n_subrules)++;
2401 }
2402}
2403
2404static int
2405handle_table_stats_request(struct ofproto *p, struct ofconn *ofconn,
2406 struct ofp_stats_request *request)
2407{
2408 struct ofp_table_stats *ots;
2409 struct ofpbuf *msg;
2410 struct odp_stats dpstats;
2411 int n_exact, n_subrules, n_wild;
2412
2413 msg = start_stats_reply(request, sizeof *ots * 2);
2414
2415 /* Count rules of various kinds. */
2416 n_subrules = 0;
2417 classifier_for_each(&p->cls, CLS_INC_EXACT, count_subrules, &n_subrules);
2418 n_exact = classifier_count_exact(&p->cls) - n_subrules;
2419 n_wild = classifier_count(&p->cls) - classifier_count_exact(&p->cls);
2420
2421 /* Hash table. */
c228a364 2422 dpif_get_dp_stats(p->dpif, &dpstats);
064af421
BP
2423 ots = append_stats_reply(sizeof *ots, ofconn, &msg);
2424 memset(ots, 0, sizeof *ots);
2425 ots->table_id = TABLEID_HASH;
2426 strcpy(ots->name, "hash");
2427 ots->wildcards = htonl(0);
2428 ots->max_entries = htonl(dpstats.max_capacity);
2429 ots->active_count = htonl(n_exact);
2430 ots->lookup_count = htonll(dpstats.n_frags + dpstats.n_hit +
2431 dpstats.n_missed);
2432 ots->matched_count = htonll(dpstats.n_hit); /* XXX */
2433
2434 /* Classifier table. */
2435 ots = append_stats_reply(sizeof *ots, ofconn, &msg);
2436 memset(ots, 0, sizeof *ots);
2437 ots->table_id = TABLEID_CLASSIFIER;
2438 strcpy(ots->name, "classifier");
2439 ots->wildcards = htonl(OFPFW_ALL);
2440 ots->max_entries = htonl(65536);
2441 ots->active_count = htonl(n_wild);
2442 ots->lookup_count = htonll(0); /* XXX */
2443 ots->matched_count = htonll(0); /* XXX */
2444
2445 queue_tx(msg, ofconn, ofconn->reply_counter);
2446 return 0;
2447}
2448
2449static int
2450handle_port_stats_request(struct ofproto *p, struct ofconn *ofconn,
2451 struct ofp_stats_request *request)
2452{
2453 struct ofp_port_stats *ops;
2454 struct ofpbuf *msg;
2455 struct ofport *port;
2456 unsigned int port_no;
2457
2458 msg = start_stats_reply(request, sizeof *ops * 16);
2459 PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) {
2460 struct netdev_stats stats;
2461
2462 /* Intentionally ignore return value, since errors will set 'stats' to
2463 * all-1s, which is correct for OpenFlow, and netdev_get_stats() will
2464 * log errors. */
2465 netdev_get_stats(port->netdev, &stats);
2466
2467 ops = append_stats_reply(sizeof *ops, ofconn, &msg);
2468 ops->port_no = htons(odp_port_to_ofp_port(port_no));
2469 memset(ops->pad, 0, sizeof ops->pad);
2470 ops->rx_packets = htonll(stats.rx_packets);
2471 ops->tx_packets = htonll(stats.tx_packets);
2472 ops->rx_bytes = htonll(stats.rx_bytes);
2473 ops->tx_bytes = htonll(stats.tx_bytes);
2474 ops->rx_dropped = htonll(stats.rx_dropped);
2475 ops->tx_dropped = htonll(stats.tx_dropped);
2476 ops->rx_errors = htonll(stats.rx_errors);
2477 ops->tx_errors = htonll(stats.tx_errors);
2478 ops->rx_frame_err = htonll(stats.rx_frame_errors);
2479 ops->rx_over_err = htonll(stats.rx_over_errors);
2480 ops->rx_crc_err = htonll(stats.rx_crc_errors);
2481 ops->collisions = htonll(stats.collisions);
2482 }
2483
2484 queue_tx(msg, ofconn, ofconn->reply_counter);
2485 return 0;
2486}
2487
2488struct flow_stats_cbdata {
2489 struct ofproto *ofproto;
2490 struct ofconn *ofconn;
2491 uint16_t out_port;
2492 struct ofpbuf *msg;
2493};
2494
2495static void
2496query_stats(struct ofproto *p, struct rule *rule,
2497 uint64_t *packet_countp, uint64_t *byte_countp)
2498{
2499 uint64_t packet_count, byte_count;
2500 struct rule *subrule;
2501 struct odp_flow *odp_flows;
2502 size_t n_odp_flows;
2503
b3137fe8
JG
2504 packet_count = rule->packet_count;
2505 byte_count = rule->byte_count;
2506
064af421 2507 n_odp_flows = rule->cr.wc.wildcards ? list_size(&rule->list) : 1;
ec6fde61 2508 odp_flows = xzalloc(n_odp_flows * sizeof *odp_flows);
064af421
BP
2509 if (rule->cr.wc.wildcards) {
2510 size_t i = 0;
2511 LIST_FOR_EACH (subrule, struct rule, list, &rule->list) {
2512 odp_flows[i++].key = subrule->cr.flow;
b3137fe8
JG
2513 packet_count += subrule->packet_count;
2514 byte_count += subrule->byte_count;
064af421
BP
2515 }
2516 } else {
2517 odp_flows[0].key = rule->cr.flow;
2518 }
2519
2520 packet_count = rule->packet_count;
2521 byte_count = rule->byte_count;
c228a364 2522 if (!dpif_flow_get_multiple(p->dpif, odp_flows, n_odp_flows)) {
064af421
BP
2523 size_t i;
2524 for (i = 0; i < n_odp_flows; i++) {
2525 struct odp_flow *odp_flow = &odp_flows[i];
2526 packet_count += odp_flow->stats.n_packets;
2527 byte_count += odp_flow->stats.n_bytes;
2528 }
2529 }
2530 free(odp_flows);
2531
2532 *packet_countp = packet_count;
2533 *byte_countp = byte_count;
2534}
2535
2536static void
2537flow_stats_cb(struct cls_rule *rule_, void *cbdata_)
2538{
2539 struct rule *rule = rule_from_cls_rule(rule_);
2540 struct flow_stats_cbdata *cbdata = cbdata_;
2541 struct ofp_flow_stats *ofs;
2542 uint64_t packet_count, byte_count;
2543 size_t act_len, len;
2544
2545 if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) {
2546 return;
2547 }
2548
2549 act_len = sizeof *rule->actions * rule->n_actions;
2550 len = offsetof(struct ofp_flow_stats, actions) + act_len;
2551
2552 query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
2553
2554 ofs = append_stats_reply(len, cbdata->ofconn, &cbdata->msg);
2555 ofs->length = htons(len);
2556 ofs->table_id = rule->cr.wc.wildcards ? TABLEID_CLASSIFIER : TABLEID_HASH;
2557 ofs->pad = 0;
2558 flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, &ofs->match);
2559 ofs->duration = htonl((time_msec() - rule->created) / 1000);
2560 ofs->priority = htons(rule->cr.priority);
2561 ofs->idle_timeout = htons(rule->idle_timeout);
2562 ofs->hard_timeout = htons(rule->hard_timeout);
959a2ecd 2563 ofs->pad2 = 0;
064af421
BP
2564 ofs->packet_count = htonll(packet_count);
2565 ofs->byte_count = htonll(byte_count);
2566 memcpy(ofs->actions, rule->actions, act_len);
2567}
2568
2569static int
2570table_id_to_include(uint8_t table_id)
2571{
2572 return (table_id == TABLEID_HASH ? CLS_INC_EXACT
2573 : table_id == TABLEID_CLASSIFIER ? CLS_INC_WILD
2574 : table_id == 0xff ? CLS_INC_ALL
2575 : 0);
2576}
2577
2578static int
2579handle_flow_stats_request(struct ofproto *p, struct ofconn *ofconn,
2580 const struct ofp_stats_request *osr,
2581 size_t arg_size)
2582{
2583 struct ofp_flow_stats_request *fsr;
2584 struct flow_stats_cbdata cbdata;
2585 struct cls_rule target;
2586
2587 if (arg_size != sizeof *fsr) {
49bdc010 2588 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
064af421
BP
2589 }
2590 fsr = (struct ofp_flow_stats_request *) osr->body;
2591
2592 COVERAGE_INC(ofproto_flows_req);
2593 cbdata.ofproto = p;
2594 cbdata.ofconn = ofconn;
2595 cbdata.out_port = fsr->out_port;
2596 cbdata.msg = start_stats_reply(osr, 1024);
2597 cls_rule_from_match(&target, &fsr->match, 0);
2598 classifier_for_each_match(&p->cls, &target,
2599 table_id_to_include(fsr->table_id),
2600 flow_stats_cb, &cbdata);
2601 queue_tx(cbdata.msg, ofconn, ofconn->reply_counter);
2602 return 0;
2603}
2604
4f2cad2c
JP
2605struct flow_stats_ds_cbdata {
2606 struct ofproto *ofproto;
2607 struct ds *results;
2608};
2609
2610static void
2611flow_stats_ds_cb(struct cls_rule *rule_, void *cbdata_)
2612{
2613 struct rule *rule = rule_from_cls_rule(rule_);
2614 struct flow_stats_ds_cbdata *cbdata = cbdata_;
2615 struct ds *results = cbdata->results;
2616 struct ofp_match match;
2617 uint64_t packet_count, byte_count;
2618 size_t act_len = sizeof *rule->actions * rule->n_actions;
2619
2620 /* Don't report on subrules. */
2621 if (rule->super != NULL) {
2622 return;
2623 }
2624
2625 query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
a26ef517 2626 flow_to_ovs_match(&rule->cr.flow, rule->cr.wc.wildcards, &match);
4f2cad2c
JP
2627
2628 ds_put_format(results, "duration=%llds, ",
2629 (time_msec() - rule->created) / 1000);
52ae00b3 2630 ds_put_format(results, "priority=%u, ", rule->cr.priority);
4f2cad2c
JP
2631 ds_put_format(results, "n_packets=%"PRIu64", ", packet_count);
2632 ds_put_format(results, "n_bytes=%"PRIu64", ", byte_count);
2633 ofp_print_match(results, &match, true);
2634 ofp_print_actions(results, &rule->actions->header, act_len);
2635 ds_put_cstr(results, "\n");
2636}
2637
2638/* Adds a pretty-printed description of all flows to 'results', including
2639 * those marked hidden by secchan (e.g., by in-band control). */
2640void
2641ofproto_get_all_flows(struct ofproto *p, struct ds *results)
2642{
2643 struct ofp_match match;
2644 struct cls_rule target;
2645 struct flow_stats_ds_cbdata cbdata;
2646
2647 memset(&match, 0, sizeof match);
2648 match.wildcards = htonl(OFPFW_ALL);
2649
2650 cbdata.ofproto = p;
2651 cbdata.results = results;
2652
2653 cls_rule_from_match(&target, &match, 0);
2654 classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
2655 flow_stats_ds_cb, &cbdata);
2656}
2657
064af421
BP
2658struct aggregate_stats_cbdata {
2659 struct ofproto *ofproto;
2660 uint16_t out_port;
2661 uint64_t packet_count;
2662 uint64_t byte_count;
2663 uint32_t n_flows;
2664};
2665
2666static void
2667aggregate_stats_cb(struct cls_rule *rule_, void *cbdata_)
2668{
2669 struct rule *rule = rule_from_cls_rule(rule_);
2670 struct aggregate_stats_cbdata *cbdata = cbdata_;
2671 uint64_t packet_count, byte_count;
2672
2673 if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) {
2674 return;
2675 }
2676
2677 query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
2678
2679 cbdata->packet_count += packet_count;
2680 cbdata->byte_count += byte_count;
2681 cbdata->n_flows++;
2682}
2683
2684static int
2685handle_aggregate_stats_request(struct ofproto *p, struct ofconn *ofconn,
2686 const struct ofp_stats_request *osr,
2687 size_t arg_size)
2688{
2689 struct ofp_aggregate_stats_request *asr;
2690 struct ofp_aggregate_stats_reply *reply;
2691 struct aggregate_stats_cbdata cbdata;
2692 struct cls_rule target;
2693 struct ofpbuf *msg;
2694
2695 if (arg_size != sizeof *asr) {
49bdc010 2696 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
064af421
BP
2697 }
2698 asr = (struct ofp_aggregate_stats_request *) osr->body;
2699
2700 COVERAGE_INC(ofproto_agg_request);
2701 cbdata.ofproto = p;
2702 cbdata.out_port = asr->out_port;
2703 cbdata.packet_count = 0;
2704 cbdata.byte_count = 0;
2705 cbdata.n_flows = 0;
2706 cls_rule_from_match(&target, &asr->match, 0);
2707 classifier_for_each_match(&p->cls, &target,
2708 table_id_to_include(asr->table_id),
2709 aggregate_stats_cb, &cbdata);
2710
2711 msg = start_stats_reply(osr, sizeof *reply);
2712 reply = append_stats_reply(sizeof *reply, ofconn, &msg);
2713 reply->flow_count = htonl(cbdata.n_flows);
2714 reply->packet_count = htonll(cbdata.packet_count);
2715 reply->byte_count = htonll(cbdata.byte_count);
2716 queue_tx(msg, ofconn, ofconn->reply_counter);
2717 return 0;
2718}
2719
2720static int
2721handle_stats_request(struct ofproto *p, struct ofconn *ofconn,
2722 struct ofp_header *oh)
2723{
2724 struct ofp_stats_request *osr;
2725 size_t arg_size;
2726 int error;
2727
2728 error = check_ofp_message_array(oh, OFPT_STATS_REQUEST, sizeof *osr,
2729 1, &arg_size);
2730 if (error) {
2731 return error;
2732 }
2733 osr = (struct ofp_stats_request *) oh;
2734
2735 switch (ntohs(osr->type)) {
2736 case OFPST_DESC:
2737 return handle_desc_stats_request(p, ofconn, osr);
2738
2739 case OFPST_FLOW:
2740 return handle_flow_stats_request(p, ofconn, osr, arg_size);
2741
2742 case OFPST_AGGREGATE:
2743 return handle_aggregate_stats_request(p, ofconn, osr, arg_size);
2744
2745 case OFPST_TABLE:
2746 return handle_table_stats_request(p, ofconn, osr);
2747
2748 case OFPST_PORT:
2749 return handle_port_stats_request(p, ofconn, osr);
2750
2751 case OFPST_VENDOR:
2752 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR);
2753
2754 default:
2755 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_STAT);
2756 }
2757}
2758
2759static long long int
2760msec_from_nsec(uint64_t sec, uint32_t nsec)
2761{
2762 return !sec ? 0 : sec * 1000 + nsec / 1000000;
2763}
2764
2765static void
0193b2af
JG
2766update_time(struct ofproto *ofproto, struct rule *rule,
2767 const struct odp_flow_stats *stats)
064af421
BP
2768{
2769 long long int used = msec_from_nsec(stats->used_sec, stats->used_nsec);
2770 if (used > rule->used) {
2771 rule->used = used;
4836f9f2
JP
2772 if (rule->super && used > rule->super->used) {
2773 rule->super->used = used;
2774 }
0193b2af 2775 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, used);
064af421
BP
2776 }
2777}
2778
2779static void
0193b2af
JG
2780update_stats(struct ofproto *ofproto, struct rule *rule,
2781 const struct odp_flow_stats *stats)
064af421 2782{
064af421 2783 if (stats->n_packets) {
0193b2af
JG
2784 update_time(ofproto, rule, stats);
2785 rule->packet_count += stats->n_packets;
2786 rule->byte_count += stats->n_bytes;
2787 netflow_flow_update_flags(&rule->nf_flow, stats->ip_tos,
2788 stats->tcp_flags);
064af421
BP
2789 }
2790}
2791
2792static int
2793add_flow(struct ofproto *p, struct ofconn *ofconn,
2794 struct ofp_flow_mod *ofm, size_t n_actions)
2795{
2796 struct ofpbuf *packet;
2797 struct rule *rule;
2798 uint16_t in_port;
2799 int error;
2800
49bdc010
JP
2801 if (ofm->flags & htons(OFPFF_CHECK_OVERLAP)) {
2802 flow_t flow;
2803 uint32_t wildcards;
2804
2805 flow_from_match(&flow, &wildcards, &ofm->match);
2806 if (classifier_rule_overlaps(&p->cls, &flow, wildcards,
2807 ntohs(ofm->priority))) {
2808 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_OVERLAP);
2809 }
2810 }
2811
0193b2af 2812 rule = rule_create(p, NULL, (const union ofp_action *) ofm->actions,
064af421 2813 n_actions, ntohs(ofm->idle_timeout),
ca069229
JP
2814 ntohs(ofm->hard_timeout),
2815 ofm->flags & htons(OFPFF_SEND_FLOW_REM));
064af421
BP
2816 cls_rule_from_match(&rule->cr, &ofm->match, ntohs(ofm->priority));
2817
064af421
BP
2818 error = 0;
2819 if (ofm->buffer_id != htonl(UINT32_MAX)) {
2820 error = pktbuf_retrieve(ofconn->pktbuf, ntohl(ofm->buffer_id),
2821 &packet, &in_port);
212fe71c
BP
2822 } else {
2823 packet = NULL;
165cd8a3 2824 in_port = UINT16_MAX;
064af421
BP
2825 }
2826
2827 rule_insert(p, rule, packet, in_port);
2828 ofpbuf_delete(packet);
2829 return error;
2830}
2831
2832static int
2833modify_flow(struct ofproto *p, const struct ofp_flow_mod *ofm,
2834 size_t n_actions, uint16_t command, struct rule *rule)
2835{
2836 if (rule_is_hidden(rule)) {
2837 return 0;
2838 }
2839
2840 if (command == OFPFC_DELETE) {
ca069229
JP
2841 long long int now = time_msec();
2842 send_flow_removed(p, rule, now, OFPRR_DELETE);
064af421
BP
2843 rule_remove(p, rule);
2844 } else {
2845 size_t actions_len = n_actions * sizeof *rule->actions;
2846
2847 if (n_actions == rule->n_actions
2848 && !memcmp(ofm->actions, rule->actions, actions_len))
2849 {
2850 return 0;
2851 }
2852
2853 free(rule->actions);
2854 rule->actions = xmemdup(ofm->actions, actions_len);
2855 rule->n_actions = n_actions;
2856
2857 if (rule->cr.wc.wildcards) {
2858 COVERAGE_INC(ofproto_mod_wc_flow);
2859 p->need_revalidate = true;
2860 } else {
2861 rule_update_actions(p, rule);
2862 }
2863 }
2864
2865 return 0;
2866}
2867
2868static int
2869modify_flows_strict(struct ofproto *p, const struct ofp_flow_mod *ofm,
2870 size_t n_actions, uint16_t command)
2871{
2872 struct rule *rule;
2873 uint32_t wildcards;
2874 flow_t flow;
2875
2876 flow_from_match(&flow, &wildcards, &ofm->match);
2877 rule = rule_from_cls_rule(classifier_find_rule_exactly(
2878 &p->cls, &flow, wildcards,
2879 ntohs(ofm->priority)));
2880
2881 if (rule) {
2882 if (command == OFPFC_DELETE
2883 && ofm->out_port != htons(OFPP_NONE)
2884 && !rule_has_out_port(rule, ofm->out_port)) {
2885 return 0;
2886 }
2887
2888 modify_flow(p, ofm, n_actions, command, rule);
2889 }
2890 return 0;
2891}
2892
2893struct modify_flows_cbdata {
2894 struct ofproto *ofproto;
2895 const struct ofp_flow_mod *ofm;
2896 uint16_t out_port;
2897 size_t n_actions;
2898 uint16_t command;
2899};
2900
2901static void
2902modify_flows_cb(struct cls_rule *rule_, void *cbdata_)
2903{
2904 struct rule *rule = rule_from_cls_rule(rule_);
2905 struct modify_flows_cbdata *cbdata = cbdata_;
2906
2907 if (cbdata->out_port != htons(OFPP_NONE)
2908 && !rule_has_out_port(rule, cbdata->out_port)) {
2909 return;
2910 }
2911
2912 modify_flow(cbdata->ofproto, cbdata->ofm, cbdata->n_actions,
2913 cbdata->command, rule);
2914}
2915
2916static int
2917modify_flows_loose(struct ofproto *p, const struct ofp_flow_mod *ofm,
2918 size_t n_actions, uint16_t command)
2919{
2920 struct modify_flows_cbdata cbdata;
2921 struct cls_rule target;
2922
2923 cbdata.ofproto = p;
2924 cbdata.ofm = ofm;
2925 cbdata.out_port = (command == OFPFC_DELETE ? ofm->out_port
2926 : htons(OFPP_NONE));
2927 cbdata.n_actions = n_actions;
2928 cbdata.command = command;
2929
2930 cls_rule_from_match(&target, &ofm->match, 0);
2931
2932 classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
2933 modify_flows_cb, &cbdata);
2934 return 0;
2935}
2936
2937static int
2938handle_flow_mod(struct ofproto *p, struct ofconn *ofconn,
2939 struct ofp_flow_mod *ofm)
2940{
2941 size_t n_actions;
2942 int error;
2943
2944 error = check_ofp_message_array(&ofm->header, OFPT_FLOW_MOD, sizeof *ofm,
2945 sizeof *ofm->actions, &n_actions);
2946 if (error) {
2947 return error;
2948 }
2949
49bdc010
JP
2950 /* We do not support the emergency flow cache. It will hopefully
2951 * get dropped from OpenFlow in the near future. */
2952 if (ofm->flags & htons(OFPFF_EMERG)) {
2953 /* There isn't a good fit for an error code, so just state that the
2954 * flow table is full. */
2955 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_ALL_TABLES_FULL);
2956 }
2957
064af421
BP
2958 normalize_match(&ofm->match);
2959 if (!ofm->match.wildcards) {
2960 ofm->priority = htons(UINT16_MAX);
2961 }
2962
2963 error = validate_actions((const union ofp_action *) ofm->actions,
2964 n_actions, p->max_ports);
2965 if (error) {
2966 return error;
2967 }
2968
2969 switch (ntohs(ofm->command)) {
2970 case OFPFC_ADD:
2971 return add_flow(p, ofconn, ofm, n_actions);
2972
2973 case OFPFC_MODIFY:
2974 return modify_flows_loose(p, ofm, n_actions, OFPFC_MODIFY);
2975
2976 case OFPFC_MODIFY_STRICT:
2977 return modify_flows_strict(p, ofm, n_actions, OFPFC_MODIFY);
2978
2979 case OFPFC_DELETE:
2980 return modify_flows_loose(p, ofm, n_actions, OFPFC_DELETE);
2981
2982 case OFPFC_DELETE_STRICT:
2983 return modify_flows_strict(p, ofm, n_actions, OFPFC_DELETE);
2984
2985 default:
2986 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_BAD_COMMAND);
2987 }
2988}
2989
064af421
BP
2990static int
2991handle_vendor(struct ofproto *p, struct ofconn *ofconn, void *msg)
2992{
2993 struct ofp_vendor_header *ovh = msg;
2994 struct nicira_header *nh;
2995
2996 if (ntohs(ovh->header.length) < sizeof(struct ofp_vendor_header)) {
49bdc010 2997 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
064af421
BP
2998 }
2999 if (ovh->vendor != htonl(NX_VENDOR_ID)) {
3000 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR);
3001 }
3002 if (ntohs(ovh->header.length) < sizeof(struct nicira_header)) {
49bdc010 3003 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
064af421
BP
3004 }
3005
3006 nh = msg;
3007 switch (ntohl(nh->subtype)) {
3008 case NXT_STATUS_REQUEST:
3009 return switch_status_handle_request(p->switch_status, ofconn->rconn,
3010 msg);
064af421
BP
3011 }
3012
3013 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE);
3014}
3015
3016static void
3017handle_openflow(struct ofconn *ofconn, struct ofproto *p,
3018 struct ofpbuf *ofp_msg)
3019{
3020 struct ofp_header *oh = ofp_msg->data;
3021 int error;
3022
3023 COVERAGE_INC(ofproto_recv_openflow);
3024 switch (oh->type) {
3025 case OFPT_ECHO_REQUEST:
3026 error = handle_echo_request(ofconn, oh);
3027 break;
3028
3029 case OFPT_ECHO_REPLY:
3030 error = 0;
3031 break;
3032
3033 case OFPT_FEATURES_REQUEST:
3034 error = handle_features_request(p, ofconn, oh);
3035 break;
3036
3037 case OFPT_GET_CONFIG_REQUEST:
3038 error = handle_get_config_request(p, ofconn, oh);
3039 break;
3040
3041 case OFPT_SET_CONFIG:
3042 error = handle_set_config(p, ofconn, ofp_msg->data);
3043 break;
3044
3045 case OFPT_PACKET_OUT:
3046 error = handle_packet_out(p, ofconn, ofp_msg->data);
3047 break;
3048
3049 case OFPT_PORT_MOD:
3050 error = handle_port_mod(p, oh);
3051 break;
3052
3053 case OFPT_FLOW_MOD:
3054 error = handle_flow_mod(p, ofconn, ofp_msg->data);
3055 break;
3056
3057 case OFPT_STATS_REQUEST:
3058 error = handle_stats_request(p, ofconn, oh);
3059 break;
3060
3061 case OFPT_VENDOR:
3062 error = handle_vendor(p, ofconn, ofp_msg->data);
3063 break;
3064
3065 default:
3066 if (VLOG_IS_WARN_ENABLED()) {
3067 char *s = ofp_to_string(oh, ntohs(oh->length), 2);
3068 VLOG_DBG_RL(&rl, "OpenFlow message ignored: %s", s);
3069 free(s);
3070 }
3071 error = ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_TYPE);
3072 break;
3073 }
3074
3075 if (error) {
3076 send_error_oh(ofconn, ofp_msg->data, error);
3077 }
3078}
3079\f
3080static void
72b06300 3081handle_odp_miss_msg(struct ofproto *p, struct ofpbuf *packet)
064af421
BP
3082{
3083 struct odp_msg *msg = packet->data;
3084 uint16_t in_port = odp_port_to_ofp_port(msg->port);
3085 struct rule *rule;
3086 struct ofpbuf payload;
3087 flow_t flow;
3088
064af421
BP
3089 payload.data = msg + 1;
3090 payload.size = msg->length - sizeof *msg;
3091 flow_extract(&payload, msg->port, &flow);
3092
0ad9b732
JP
3093 /* Check with in-band control to see if this packet should be sent
3094 * to the local port regardless of the flow table. */
3095 if (in_band_msg_in_hook(p->in_band, &flow, &payload)) {
3096 union odp_action action;
3097
3098 memset(&action, 0, sizeof(action));
3099 action.output.type = ODPAT_OUTPUT;
3100 action.output.port = ODPP_LOCAL;
f1acd62b 3101 dpif_execute(p->dpif, flow.in_port, &action, 1, &payload);
0ad9b732
JP
3102 }
3103
064af421
BP
3104 rule = lookup_valid_rule(p, &flow);
3105 if (!rule) {
3106 /* Don't send a packet-in if OFPPC_NO_PACKET_IN asserted. */
3107 struct ofport *port = port_array_get(&p->ports, msg->port);
3108 if (port) {
3109 if (port->opp.config & OFPPC_NO_PACKET_IN) {
3110 COVERAGE_INC(ofproto_no_packet_in);
3111 /* XXX install 'drop' flow entry */
3112 ofpbuf_delete(packet);
3113 return;
3114 }
3115 } else {
3116 VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16, msg->port);
3117 }
3118
3119 COVERAGE_INC(ofproto_packet_in);
3120 pinsched_send(p->miss_sched, in_port, packet, send_packet_in_miss, p);
3121 return;
3122 }
3123
3124 if (rule->cr.wc.wildcards) {
3125 rule = rule_create_subrule(p, rule, &flow);
3126 rule_make_actions(p, rule, packet);
3127 } else {
3128 if (!rule->may_install) {
3129 /* The rule is not installable, that is, we need to process every
3130 * packet, so process the current packet and set its actions into
3131 * 'subrule'. */
3132 rule_make_actions(p, rule, packet);
3133 } else {
3134 /* XXX revalidate rule if it needs it */
3135 }
3136 }
3137
3138 rule_execute(p, rule, &payload, &flow);
3139 rule_reinstall(p, rule);
7778bd15
BP
3140
3141 if (rule->super && rule->super->cr.priority == FAIL_OPEN_PRIORITY
3142 && rconn_is_connected(p->controller->rconn)) {
3143 /*
3144 * Extra-special case for fail-open mode.
3145 *
3146 * We are in fail-open mode and the packet matched the fail-open rule,
3147 * but we are connected to a controller too. We should send the packet
3148 * up to the controller in the hope that it will try to set up a flow
3149 * and thereby allow us to exit fail-open.
3150 *
3151 * See the top-level comment in fail-open.c for more information.
3152 */
3153 pinsched_send(p->miss_sched, in_port, packet, send_packet_in_miss, p);
3154 } else {
3155 ofpbuf_delete(packet);
3156 }
064af421 3157}
72b06300
BP
3158
3159static void
3160handle_odp_msg(struct ofproto *p, struct ofpbuf *packet)
3161{
3162 struct odp_msg *msg = packet->data;
3163
3164 switch (msg->type) {
3165 case _ODPL_ACTION_NR:
3166 COVERAGE_INC(ofproto_ctlr_action);
3167 pinsched_send(p->action_sched, odp_port_to_ofp_port(msg->port), packet,
3168 send_packet_in_action, p);
3169 break;
3170
3171 case _ODPL_SFLOW_NR:
3172 if (p->sflow) {
3173 ofproto_sflow_received(p->sflow, msg);
3174 }
3175 ofpbuf_delete(packet);
3176 break;
3177
3178 case _ODPL_MISS_NR:
3179 handle_odp_miss_msg(p, packet);
3180 break;
3181
3182 default:
3183 VLOG_WARN_RL(&rl, "received ODP message of unexpected type %"PRIu32,
3184 msg->type);
3185 break;
3186 }
3187}
064af421
BP
3188\f
3189static void
3190revalidate_cb(struct cls_rule *sub_, void *cbdata_)
3191{
3192 struct rule *sub = rule_from_cls_rule(sub_);
3193 struct revalidate_cbdata *cbdata = cbdata_;
3194
3195 if (cbdata->revalidate_all
3196 || (cbdata->revalidate_subrules && sub->super)
3197 || (tag_set_intersects(&cbdata->revalidate_set, sub->tags))) {
3198 revalidate_rule(cbdata->ofproto, sub);
3199 }
3200}
3201
3202static bool
3203revalidate_rule(struct ofproto *p, struct rule *rule)
3204{
3205 const flow_t *flow = &rule->cr.flow;
3206
3207 COVERAGE_INC(ofproto_revalidate_rule);
3208 if (rule->super) {
3209 struct rule *super;
3210 super = rule_from_cls_rule(classifier_lookup_wild(&p->cls, flow));
3211 if (!super) {
3212 rule_remove(p, rule);
3213 return false;
3214 } else if (super != rule->super) {
3215 COVERAGE_INC(ofproto_revalidate_moved);
3216 list_remove(&rule->list);
3217 list_push_back(&super->list, &rule->list);
3218 rule->super = super;
3219 rule->hard_timeout = super->hard_timeout;
3220 rule->idle_timeout = super->idle_timeout;
3221 rule->created = super->created;
3222 rule->used = 0;
3223 }
3224 }
3225
3226 rule_update_actions(p, rule);
3227 return true;
3228}
3229
3230static struct ofpbuf *
ca069229 3231compose_flow_removed(const struct rule *rule, long long int now, uint8_t reason)
064af421 3232{
ca069229 3233 struct ofp_flow_removed *ofr;
064af421
BP
3234 struct ofpbuf *buf;
3235
ca069229
JP
3236 ofr = make_openflow(sizeof *ofr, OFPT_FLOW_REMOVED, &buf);
3237 flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, &ofr->match);
3238 ofr->priority = htons(rule->cr.priority);
3239 ofr->reason = reason;
3240 ofr->duration = htonl((now - rule->created) / 1000);
3241 ofr->idle_timeout = htons(rule->idle_timeout);
3242 ofr->packet_count = htonll(rule->packet_count);
3243 ofr->byte_count = htonll(rule->byte_count);
064af421
BP
3244
3245 return buf;
3246}
3247
3248static void
ca069229
JP
3249uninstall_idle_flow(struct ofproto *ofproto, struct rule *rule)
3250{
3251 assert(rule->installed);
3252 assert(!rule->cr.wc.wildcards);
3253
3254 if (rule->super) {
3255 rule_remove(ofproto, rule);
3256 } else {
3257 rule_uninstall(ofproto, rule);
3258 }
3259}
3260static void
3261send_flow_removed(struct ofproto *p, struct rule *rule,
3262 long long int now, uint8_t reason)
064af421
BP
3263{
3264 struct ofconn *ofconn;
3265 struct ofconn *prev;
b9b0ce61 3266 struct ofpbuf *buf = NULL;
064af421
BP
3267
3268 /* We limit the maximum number of queued flow expirations it by accounting
3269 * them under the counter for replies. That works because preventing
3270 * OpenFlow requests from being processed also prevents new flows from
3271 * being added (and expiring). (It also prevents processing OpenFlow
3272 * requests that would not add new flows, so it is imperfect.) */
3273
3274 prev = NULL;
3275 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
ca069229 3276 if (rule->send_flow_removed && rconn_is_connected(ofconn->rconn)) {
064af421 3277 if (prev) {
431d8ad2 3278 queue_tx(ofpbuf_clone(buf), prev, prev->reply_counter);
064af421 3279 } else {
ca069229 3280 buf = compose_flow_removed(rule, now, reason);
064af421
BP
3281 }
3282 prev = ofconn;
3283 }
3284 }
3285 if (prev) {
431d8ad2 3286 queue_tx(buf, prev, prev->reply_counter);
064af421
BP
3287 }
3288}
3289
064af421
BP
3290
3291static void
3292expire_rule(struct cls_rule *cls_rule, void *p_)
3293{
3294 struct ofproto *p = p_;
3295 struct rule *rule = rule_from_cls_rule(cls_rule);
3296 long long int hard_expire, idle_expire, expire, now;
3297
3298 hard_expire = (rule->hard_timeout
3299 ? rule->created + rule->hard_timeout * 1000
3300 : LLONG_MAX);
3301 idle_expire = (rule->idle_timeout
3302 && (rule->super || list_is_empty(&rule->list))
3303 ? rule->used + rule->idle_timeout * 1000
3304 : LLONG_MAX);
3305 expire = MIN(hard_expire, idle_expire);
064af421
BP
3306
3307 now = time_msec();
3308 if (now < expire) {
3309 if (rule->installed && now >= rule->used + 5000) {
3310 uninstall_idle_flow(p, rule);
0193b2af
JG
3311 } else if (!rule->cr.wc.wildcards) {
3312 active_timeout(p, rule);
064af421 3313 }
0193b2af 3314
064af421
BP
3315 return;
3316 }
3317
3318 COVERAGE_INC(ofproto_expired);
46d6f36f
JG
3319
3320 /* Update stats. This code will be a no-op if the rule expired
3321 * due to an idle timeout. */
064af421 3322 if (rule->cr.wc.wildcards) {
064af421
BP
3323 struct rule *subrule, *next;
3324 LIST_FOR_EACH_SAFE (subrule, next, struct rule, list, &rule->list) {
3325 rule_remove(p, subrule);
3326 }
46d6f36f
JG
3327 } else {
3328 rule_uninstall(p, rule);
064af421
BP
3329 }
3330
8fe1a59d 3331 if (!rule_is_hidden(rule)) {
ca069229
JP
3332 send_flow_removed(p, rule, now,
3333 (now >= hard_expire
3334 ? OFPRR_HARD_TIMEOUT : OFPRR_IDLE_TIMEOUT));
8fe1a59d 3335 }
064af421
BP
3336 rule_remove(p, rule);
3337}
3338
0193b2af
JG
3339static void
3340active_timeout(struct ofproto *ofproto, struct rule *rule)
3341{
3342 if (ofproto->netflow && !is_controller_rule(rule) &&
3343 netflow_active_timeout_expired(ofproto->netflow, &rule->nf_flow)) {
3344 struct ofexpired expired;
3345 struct odp_flow odp_flow;
3346
3347 /* Get updated flow stats. */
3348 memset(&odp_flow, 0, sizeof odp_flow);
094e1514
JG
3349 if (rule->installed) {
3350 odp_flow.key = rule->cr.flow;
3351 odp_flow.flags = ODPFF_ZERO_TCP_FLAGS;
d65349ea 3352 dpif_flow_get(ofproto->dpif, &odp_flow);
094e1514
JG
3353
3354 if (odp_flow.stats.n_packets) {
3355 update_time(ofproto, rule, &odp_flow.stats);
3356 netflow_flow_update_flags(&rule->nf_flow, odp_flow.stats.ip_tos,
3357 odp_flow.stats.tcp_flags);
3358 }
0193b2af
JG
3359 }
3360
3361 expired.flow = rule->cr.flow;
3362 expired.packet_count = rule->packet_count +
3363 odp_flow.stats.n_packets;
3364 expired.byte_count = rule->byte_count + odp_flow.stats.n_bytes;
3365 expired.used = rule->used;
3366
3367 netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
3368
3369 /* Schedule us to send the accumulated records once we have
3370 * collected all of them. */
3371 poll_immediate_wake();
3372 }
3373}
3374
064af421
BP
3375static void
3376update_used(struct ofproto *p)
3377{
3378 struct odp_flow *flows;
3379 size_t n_flows;
3380 size_t i;
3381 int error;
3382
c228a364 3383 error = dpif_flow_list_all(p->dpif, &flows, &n_flows);
064af421
BP
3384 if (error) {
3385 return;
3386 }
3387
3388 for (i = 0; i < n_flows; i++) {
3389 struct odp_flow *f = &flows[i];
3390 struct rule *rule;
3391
3392 rule = rule_from_cls_rule(
3393 classifier_find_rule_exactly(&p->cls, &f->key, 0, UINT16_MAX));
3394 if (!rule || !rule->installed) {
3395 COVERAGE_INC(ofproto_unexpected_rule);
c228a364 3396 dpif_flow_del(p->dpif, f);
064af421
BP
3397 continue;
3398 }
3399
0193b2af 3400 update_time(p, rule, &f->stats);
064af421
BP
3401 rule_account(p, rule, f->stats.n_bytes);
3402 }
3403 free(flows);
3404}
3405
3406static void
3407do_send_packet_in(struct ofconn *ofconn, uint32_t buffer_id,
3408 const struct ofpbuf *packet, int send_len)
3409{
372179d4
BP
3410 struct odp_msg *msg = packet->data;
3411 struct ofpbuf payload;
3412 struct ofpbuf *opi;
3413 uint8_t reason;
064af421 3414
372179d4 3415 /* Extract packet payload from 'msg'. */
064af421
BP
3416 payload.data = msg + 1;
3417 payload.size = msg->length - sizeof *msg;
3418
372179d4
BP
3419 /* Construct ofp_packet_in message. */
3420 reason = msg->type == _ODPL_ACTION_NR ? OFPR_ACTION : OFPR_NO_MATCH;
3421 opi = make_packet_in(buffer_id, odp_port_to_ofp_port(msg->port), reason,
3422 &payload, send_len);
3423
3424 /* Send. */
3425 rconn_send_with_limit(ofconn->rconn, opi, ofconn->packet_in_counter, 100);
064af421
BP
3426}
3427
3428static void
3429send_packet_in_action(struct ofpbuf *packet, void *p_)
3430{
3431 struct ofproto *p = p_;
3432 struct ofconn *ofconn;
3433 struct odp_msg *msg;
3434
3435 msg = packet->data;
3436 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
3437 if (ofconn == p->controller || ofconn->miss_send_len) {
3438 do_send_packet_in(ofconn, UINT32_MAX, packet, msg->arg);
3439 }
3440 }
3441 ofpbuf_delete(packet);
3442}
3443
3444static void
3445send_packet_in_miss(struct ofpbuf *packet, void *p_)
3446{
3447 struct ofproto *p = p_;
7778bd15 3448 bool in_fail_open = p->fail_open && fail_open_is_active(p->fail_open);
064af421
BP
3449 struct ofconn *ofconn;
3450 struct ofpbuf payload;
3451 struct odp_msg *msg;
3452
3453 msg = packet->data;
3454 payload.data = msg + 1;
3455 payload.size = msg->length - sizeof *msg;
3456 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
3457 if (ofconn->miss_send_len) {
7778bd15
BP
3458 struct pktbuf *pb = ofconn->pktbuf;
3459 uint32_t buffer_id = (in_fail_open
3460 ? pktbuf_get_null()
3461 : pktbuf_save(pb, &payload, msg->port));
064af421
BP
3462 int send_len = (buffer_id != UINT32_MAX ? ofconn->miss_send_len
3463 : UINT32_MAX);
3464 do_send_packet_in(ofconn, buffer_id, packet, send_len);
3465 }
3466 }
3467 ofpbuf_delete(packet);
3468}
3469
3470static uint64_t
fa60c019 3471pick_datapath_id(const struct ofproto *ofproto)
064af421 3472{
fa60c019 3473 const struct ofport *port;
064af421 3474
fa60c019
BP
3475 port = port_array_get(&ofproto->ports, ODPP_LOCAL);
3476 if (port) {
3477 uint8_t ea[ETH_ADDR_LEN];
3478 int error;
3479
3480 error = netdev_get_etheraddr(port->netdev, ea);
064af421
BP
3481 if (!error) {
3482 return eth_addr_to_uint64(ea);
3483 }
3484 VLOG_WARN("could not get MAC address for %s (%s)",
fa60c019 3485 netdev_get_name(port->netdev), strerror(error));
064af421 3486 }
fa60c019 3487 return ofproto->fallback_dpid;
064af421
BP
3488}
3489
3490static uint64_t
3491pick_fallback_dpid(void)
3492{
3493 uint8_t ea[ETH_ADDR_LEN];
70150daf 3494 eth_addr_nicira_random(ea);
064af421
BP
3495 return eth_addr_to_uint64(ea);
3496}
3497\f
3498static bool
3499default_normal_ofhook_cb(const flow_t *flow, const struct ofpbuf *packet,
3500 struct odp_actions *actions, tag_type *tags,
6a07af36 3501 uint16_t *nf_output_iface, void *ofproto_)
064af421
BP
3502{
3503 struct ofproto *ofproto = ofproto_;
3504 int out_port;
3505
3506 /* Drop frames for reserved multicast addresses. */
3507 if (eth_addr_is_reserved(flow->dl_dst)) {
3508 return true;
3509 }
3510
3511 /* Learn source MAC (but don't try to learn from revalidation). */
3512 if (packet != NULL) {
3513 tag_type rev_tag = mac_learning_learn(ofproto->ml, flow->dl_src,
3514 0, flow->in_port);
3515 if (rev_tag) {
3516 /* The log messages here could actually be useful in debugging,
3517 * so keep the rate limit relatively high. */
3518 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
3519 VLOG_DBG_RL(&rl, "learned that "ETH_ADDR_FMT" is on port %"PRIu16,
3520 ETH_ADDR_ARGS(flow->dl_src), flow->in_port);
3521 ofproto_revalidate(ofproto, rev_tag);
3522 }
3523 }
3524
3525 /* Determine output port. */
3526 out_port = mac_learning_lookup_tag(ofproto->ml, flow->dl_dst, 0, tags);
3527 if (out_port < 0) {
6a07af36 3528 add_output_group_action(actions, DP_GROUP_FLOOD, nf_output_iface);
064af421
BP
3529 } else if (out_port != flow->in_port) {
3530 odp_actions_add(actions, ODPAT_OUTPUT)->output.port = out_port;
6a07af36 3531 *nf_output_iface = out_port;
064af421
BP
3532 } else {
3533 /* Drop. */
3534 }
3535
3536 return true;
3537}
3538
3539static const struct ofhooks default_ofhooks = {
3540 NULL,
3541 default_normal_ofhook_cb,
3542 NULL,
3543 NULL
3544};