]> git.proxmox.com Git - mirror_ovs.git/blame - ofproto/ofproto.c
ofproto: Add user-specifiable datapath description (OpenFlow 1.0)
[mirror_ovs.git] / ofproto / ofproto.c
CommitLineData
064af421 1/*
c475ae67 2 * Copyright (c) 2009, 2010 Nicira Networks.
064af421 3 *
a14bc59f
BP
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
064af421 7 *
a14bc59f
BP
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
064af421
BP
15 */
16
17#include <config.h>
18#include "ofproto.h"
19#include <errno.h>
20#include <inttypes.h>
21#include <net/if.h>
22#include <netinet/in.h>
23#include <stdbool.h>
24#include <stdlib.h>
25#include "classifier.h"
26#include "coverage.h"
27#include "discovery.h"
28#include "dpif.h"
4f2cad2c 29#include "dynamic-string.h"
064af421
BP
30#include "fail-open.h"
31#include "in-band.h"
32#include "mac-learning.h"
33#include "netdev.h"
34#include "netflow.h"
35#include "odp-util.h"
36#include "ofp-print.h"
72b06300 37#include "ofproto-sflow.h"
064af421
BP
38#include "ofpbuf.h"
39#include "openflow/nicira-ext.h"
40#include "openflow/openflow.h"
064af421
BP
41#include "openvswitch/datapath-protocol.h"
42#include "packets.h"
43#include "pinsched.h"
44#include "pktbuf.h"
45#include "poll-loop.h"
46#include "port-array.h"
47#include "rconn.h"
48#include "shash.h"
49#include "status.h"
50#include "stp.h"
fe55ad15 51#include "stream-ssl.h"
064af421
BP
52#include "svec.h"
53#include "tag.h"
54#include "timeval.h"
4f2cad2c 55#include "unixctl.h"
064af421 56#include "vconn.h"
064af421
BP
57#include "xtoxll.h"
58
59#define THIS_MODULE VLM_ofproto
60#include "vlog.h"
61
72b06300 62#include "sflow_api.h"
064af421
BP
63
64enum {
65 TABLEID_HASH = 0,
66 TABLEID_CLASSIFIER = 1
67};
68
69struct ofport {
70 struct netdev *netdev;
71 struct ofp_phy_port opp; /* In host byte order. */
72};
73
74static void ofport_free(struct ofport *);
75static void hton_ofp_phy_port(struct ofp_phy_port *);
76
77static int xlate_actions(const union ofp_action *in, size_t n_in,
78 const flow_t *flow, struct ofproto *ofproto,
79 const struct ofpbuf *packet,
80 struct odp_actions *out, tag_type *tags,
6a07af36 81 bool *may_set_up_flow, uint16_t *nf_output_iface);
064af421
BP
82
83struct rule {
84 struct cls_rule cr;
85
86 uint16_t idle_timeout; /* In seconds from time of last use. */
87 uint16_t hard_timeout; /* In seconds from time of creation. */
ca069229 88 bool send_flow_removed; /* Send a flow removed message? */
064af421
BP
89 long long int used; /* Last-used time (0 if never used). */
90 long long int created; /* Creation time. */
91 uint64_t packet_count; /* Number of packets received. */
92 uint64_t byte_count; /* Number of bytes received. */
93 uint64_t accounted_bytes; /* Number of bytes passed to account_cb. */
064af421 94 tag_type tags; /* Tags (set only by hooks). */
0193b2af 95 struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
064af421
BP
96
97 /* If 'super' is non-NULL, this rule is a subrule, that is, it is an
98 * exact-match rule (having cr.wc.wildcards of 0) generated from the
99 * wildcard rule 'super'. In this case, 'list' is an element of the
100 * super-rule's list.
101 *
102 * If 'super' is NULL, this rule is a super-rule, and 'list' is the head of
103 * a list of subrules. A super-rule with no wildcards (where
104 * cr.wc.wildcards is 0) will never have any subrules. */
105 struct rule *super;
106 struct list list;
107
108 /* OpenFlow actions.
109 *
110 * A subrule has no actions (it uses the super-rule's actions). */
111 int n_actions;
112 union ofp_action *actions;
113
114 /* Datapath actions.
115 *
116 * A super-rule with wildcard fields never has ODP actions (since the
117 * datapath only supports exact-match flows). */
118 bool installed; /* Installed in datapath? */
119 bool may_install; /* True ordinarily; false if actions must
120 * be reassessed for every packet. */
121 int n_odp_actions;
122 union odp_action *odp_actions;
123};
124
125static inline bool
126rule_is_hidden(const struct rule *rule)
127{
128 /* Subrules are merely an implementation detail, so hide them from the
129 * controller. */
130 if (rule->super != NULL) {
131 return true;
132 }
133
8cd4882f 134 /* Rules with priority higher than UINT16_MAX are set up by ofproto itself
064af421
BP
135 * (e.g. by in-band control) and are intentionally hidden from the
136 * controller. */
137 if (rule->cr.priority > UINT16_MAX) {
138 return true;
139 }
140
141 return false;
142}
143
0193b2af
JG
144static struct rule *rule_create(struct ofproto *, struct rule *super,
145 const union ofp_action *, size_t n_actions,
ca069229
JP
146 uint16_t idle_timeout, uint16_t hard_timeout,
147 bool send_flow_removed);
064af421
BP
148static void rule_free(struct rule *);
149static void rule_destroy(struct ofproto *, struct rule *);
150static struct rule *rule_from_cls_rule(const struct cls_rule *);
151static void rule_insert(struct ofproto *, struct rule *,
152 struct ofpbuf *packet, uint16_t in_port);
153static void rule_remove(struct ofproto *, struct rule *);
154static bool rule_make_actions(struct ofproto *, struct rule *,
155 const struct ofpbuf *packet);
156static void rule_install(struct ofproto *, struct rule *,
157 struct rule *displaced_rule);
158static void rule_uninstall(struct ofproto *, struct rule *);
159static void rule_post_uninstall(struct ofproto *, struct rule *);
ca069229
JP
160static void send_flow_removed(struct ofproto *p, struct rule *rule,
161 long long int now, uint8_t reason);
064af421
BP
162
163struct ofconn {
164 struct list node;
165 struct rconn *rconn;
166 struct pktbuf *pktbuf;
064af421
BP
167 int miss_send_len;
168
169 struct rconn_packet_counter *packet_in_counter;
170
171 /* Number of OpenFlow messages queued as replies to OpenFlow requests, and
172 * the maximum number before we stop reading OpenFlow requests. */
173#define OFCONN_REPLY_MAX 100
174 struct rconn_packet_counter *reply_counter;
175};
176
177static struct ofconn *ofconn_create(struct ofproto *, struct rconn *);
c475ae67 178static void ofconn_destroy(struct ofconn *);
064af421
BP
179static void ofconn_run(struct ofconn *, struct ofproto *);
180static void ofconn_wait(struct ofconn *);
181static void queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn,
182 struct rconn_packet_counter *counter);
183
184struct ofproto {
185 /* Settings. */
186 uint64_t datapath_id; /* Datapath ID. */
187 uint64_t fallback_dpid; /* Datapath ID if no better choice found. */
064af421
BP
188 char *manufacturer; /* Manufacturer. */
189 char *hardware; /* Hardware. */
190 char *software; /* Software version. */
191 char *serial; /* Serial number. */
8abc4ed7 192 char *dp_desc; /* Datapath description. */
064af421
BP
193
194 /* Datapath. */
c228a364 195 struct dpif *dpif;
e9e28be3 196 struct netdev_monitor *netdev_monitor;
064af421
BP
197 struct port_array ports; /* Index is ODP port nr; ofport->opp.port_no is
198 * OFP port nr. */
199 struct shash port_by_name;
200 uint32_t max_ports;
201
202 /* Configuration. */
203 struct switch_status *switch_status;
204 struct status_category *ss_cat;
205 struct in_band *in_band;
206 struct discovery *discovery;
207 struct fail_open *fail_open;
208 struct pinsched *miss_sched, *action_sched;
064af421 209 struct netflow *netflow;
72b06300 210 struct ofproto_sflow *sflow;
064af421
BP
211
212 /* Flow table. */
213 struct classifier cls;
214 bool need_revalidate;
215 long long int next_expiration;
216 struct tag_set revalidate_set;
217
218 /* OpenFlow connections. */
219 struct list all_conns;
220 struct ofconn *controller;
221 struct pvconn **listeners;
222 size_t n_listeners;
223 struct pvconn **snoops;
224 size_t n_snoops;
225
226 /* Hooks for ovs-vswitchd. */
227 const struct ofhooks *ofhooks;
228 void *aux;
229
230 /* Used by default ofhooks. */
231 struct mac_learning *ml;
232};
233
234static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
235
236static const struct ofhooks default_ofhooks;
237
fa60c019 238static uint64_t pick_datapath_id(const struct ofproto *);
064af421
BP
239static uint64_t pick_fallback_dpid(void);
240static void send_packet_in_miss(struct ofpbuf *, void *ofproto);
241static void send_packet_in_action(struct ofpbuf *, void *ofproto);
242static void update_used(struct ofproto *);
0193b2af
JG
243static void update_stats(struct ofproto *, struct rule *,
244 const struct odp_flow_stats *);
064af421 245static void expire_rule(struct cls_rule *, void *ofproto);
0193b2af 246static void active_timeout(struct ofproto *ofproto, struct rule *rule);
064af421
BP
247static bool revalidate_rule(struct ofproto *p, struct rule *rule);
248static void revalidate_cb(struct cls_rule *rule_, void *p_);
249
250static void handle_odp_msg(struct ofproto *, struct ofpbuf *);
251
252static void handle_openflow(struct ofconn *, struct ofproto *,
253 struct ofpbuf *);
254
72b06300
BP
255static void refresh_port_groups(struct ofproto *);
256
064af421
BP
257static void update_port(struct ofproto *, const char *devname);
258static int init_ports(struct ofproto *);
259static void reinit_ports(struct ofproto *);
260
261int
1a6f1e2a
JG
262ofproto_create(const char *datapath, const char *datapath_type,
263 const struct ofhooks *ofhooks, void *aux,
064af421
BP
264 struct ofproto **ofprotop)
265{
064af421
BP
266 struct odp_stats stats;
267 struct ofproto *p;
c228a364 268 struct dpif *dpif;
064af421
BP
269 int error;
270
271 *ofprotop = NULL;
272
273 /* Connect to datapath and start listening for messages. */
1a6f1e2a 274 error = dpif_open(datapath, datapath_type, &dpif);
064af421
BP
275 if (error) {
276 VLOG_ERR("failed to open datapath %s: %s", datapath, strerror(error));
277 return error;
278 }
c228a364 279 error = dpif_get_dp_stats(dpif, &stats);
064af421
BP
280 if (error) {
281 VLOG_ERR("failed to obtain stats for datapath %s: %s",
282 datapath, strerror(error));
c228a364 283 dpif_close(dpif);
064af421
BP
284 return error;
285 }
72b06300 286 error = dpif_recv_set_mask(dpif, ODPL_MISS | ODPL_ACTION | ODPL_SFLOW);
064af421
BP
287 if (error) {
288 VLOG_ERR("failed to listen on datapath %s: %s",
289 datapath, strerror(error));
c228a364 290 dpif_close(dpif);
064af421
BP
291 return error;
292 }
c228a364 293 dpif_flow_flush(dpif);
8f24562a 294 dpif_recv_purge(dpif);
064af421
BP
295
296 /* Initialize settings. */
ec6fde61 297 p = xzalloc(sizeof *p);
064af421 298 p->fallback_dpid = pick_fallback_dpid();
fa60c019 299 p->datapath_id = p->fallback_dpid;
064af421
BP
300 p->manufacturer = xstrdup("Nicira Networks, Inc.");
301 p->hardware = xstrdup("Reference Implementation");
302 p->software = xstrdup(VERSION BUILDNR);
303 p->serial = xstrdup("None");
8abc4ed7 304 p->dp_desc = xstrdup("None");
064af421
BP
305
306 /* Initialize datapath. */
307 p->dpif = dpif;
8b61709d 308 p->netdev_monitor = netdev_monitor_create();
064af421
BP
309 port_array_init(&p->ports);
310 shash_init(&p->port_by_name);
311 p->max_ports = stats.max_ports;
312
313 /* Initialize submodules. */
314 p->switch_status = switch_status_create(p);
315 p->in_band = NULL;
316 p->discovery = NULL;
317 p->fail_open = NULL;
318 p->miss_sched = p->action_sched = NULL;
064af421 319 p->netflow = NULL;
72b06300 320 p->sflow = NULL;
064af421
BP
321
322 /* Initialize flow table. */
323 classifier_init(&p->cls);
324 p->need_revalidate = false;
325 p->next_expiration = time_msec() + 1000;
326 tag_set_init(&p->revalidate_set);
327
328 /* Initialize OpenFlow connections. */
329 list_init(&p->all_conns);
f9fb1858 330 p->controller = ofconn_create(p, rconn_create(5, 8));
064af421
BP
331 p->controller->pktbuf = pktbuf_create();
332 p->controller->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN;
333 p->listeners = NULL;
334 p->n_listeners = 0;
335 p->snoops = NULL;
336 p->n_snoops = 0;
337
338 /* Initialize hooks. */
339 if (ofhooks) {
340 p->ofhooks = ofhooks;
341 p->aux = aux;
342 p->ml = NULL;
343 } else {
344 p->ofhooks = &default_ofhooks;
345 p->aux = p;
346 p->ml = mac_learning_create();
347 }
348
349 /* Register switch status category. */
350 p->ss_cat = switch_status_register(p->switch_status, "remote",
351 rconn_status_cb, p->controller->rconn);
352
fa60c019
BP
353 /* Pick final datapath ID. */
354 p->datapath_id = pick_datapath_id(p);
b123cc3c 355 VLOG_INFO("using datapath ID %016"PRIx64, p->datapath_id);
fa60c019 356
064af421
BP
357 *ofprotop = p;
358 return 0;
359}
360
361void
362ofproto_set_datapath_id(struct ofproto *p, uint64_t datapath_id)
363{
364 uint64_t old_dpid = p->datapath_id;
fa60c019 365 p->datapath_id = datapath_id ? datapath_id : pick_datapath_id(p);
064af421 366 if (p->datapath_id != old_dpid) {
b123cc3c 367 VLOG_INFO("datapath ID changed to %016"PRIx64, p->datapath_id);
064af421
BP
368 rconn_reconnect(p->controller->rconn);
369 }
370}
371
064af421
BP
372void
373ofproto_set_probe_interval(struct ofproto *p, int probe_interval)
374{
375 probe_interval = probe_interval ? MAX(probe_interval, 5) : 0;
376 rconn_set_probe_interval(p->controller->rconn, probe_interval);
377 if (p->fail_open) {
378 int trigger_duration = probe_interval ? probe_interval * 3 : 15;
379 fail_open_set_trigger_duration(p->fail_open, trigger_duration);
380 }
381}
382
383void
384ofproto_set_max_backoff(struct ofproto *p, int max_backoff)
385{
386 rconn_set_max_backoff(p->controller->rconn, max_backoff);
387}
388
389void
390ofproto_set_desc(struct ofproto *p,
391 const char *manufacturer, const char *hardware,
8abc4ed7
JP
392 const char *software, const char *serial,
393 const char *dp_desc)
064af421
BP
394{
395 if (manufacturer) {
396 free(p->manufacturer);
397 p->manufacturer = xstrdup(manufacturer);
398 }
399 if (hardware) {
400 free(p->hardware);
401 p->hardware = xstrdup(hardware);
402 }
403 if (software) {
404 free(p->software);
405 p->software = xstrdup(software);
406 }
407 if (serial) {
408 free(p->serial);
409 p->serial = xstrdup(serial);
410 }
8abc4ed7
JP
411 if (dp_desc) {
412 free(p->dp_desc);
413 p->dp_desc = xstrdup(dp_desc);
414 }
064af421
BP
415}
416
417int
418ofproto_set_in_band(struct ofproto *p, bool in_band)
419{
420 if (in_band != (p->in_band != NULL)) {
421 if (in_band) {
f1acd62b
BP
422 return in_band_create(p, p->dpif, p->switch_status,
423 p->controller->rconn, &p->in_band);
064af421
BP
424 } else {
425 ofproto_set_discovery(p, false, NULL, true);
426 in_band_destroy(p->in_band);
427 p->in_band = NULL;
428 }
429 rconn_reconnect(p->controller->rconn);
430 }
431 return 0;
432}
433
434int
435ofproto_set_discovery(struct ofproto *p, bool discovery,
436 const char *re, bool update_resolv_conf)
437{
438 if (discovery != (p->discovery != NULL)) {
439 if (discovery) {
440 int error = ofproto_set_in_band(p, true);
441 if (error) {
442 return error;
443 }
444 error = discovery_create(re, update_resolv_conf,
c228a364 445 p->dpif, p->switch_status,
064af421
BP
446 &p->discovery);
447 if (error) {
448 return error;
449 }
450 } else {
451 discovery_destroy(p->discovery);
452 p->discovery = NULL;
453 }
454 rconn_disconnect(p->controller->rconn);
455 } else if (discovery) {
456 discovery_set_update_resolv_conf(p->discovery, update_resolv_conf);
457 return discovery_set_accept_controller_re(p->discovery, re);
458 }
459 return 0;
460}
461
462int
463ofproto_set_controller(struct ofproto *ofproto, const char *controller)
464{
465 if (ofproto->discovery) {
466 return EINVAL;
467 } else if (controller) {
468 if (strcmp(rconn_get_name(ofproto->controller->rconn), controller)) {
469 return rconn_connect(ofproto->controller->rconn, controller);
470 } else {
471 return 0;
472 }
473 } else {
474 rconn_disconnect(ofproto->controller->rconn);
475 return 0;
476 }
477}
478
479static int
480set_pvconns(struct pvconn ***pvconnsp, size_t *n_pvconnsp,
481 const struct svec *svec)
482{
483 struct pvconn **pvconns = *pvconnsp;
484 size_t n_pvconns = *n_pvconnsp;
485 int retval = 0;
486 size_t i;
487
488 for (i = 0; i < n_pvconns; i++) {
489 pvconn_close(pvconns[i]);
490 }
491 free(pvconns);
492
493 pvconns = xmalloc(svec->n * sizeof *pvconns);
494 n_pvconns = 0;
495 for (i = 0; i < svec->n; i++) {
496 const char *name = svec->names[i];
497 struct pvconn *pvconn;
498 int error;
499
500 error = pvconn_open(name, &pvconn);
501 if (!error) {
502 pvconns[n_pvconns++] = pvconn;
503 } else {
504 VLOG_ERR("failed to listen on %s: %s", name, strerror(error));
505 if (!retval) {
506 retval = error;
507 }
508 }
509 }
510
511 *pvconnsp = pvconns;
512 *n_pvconnsp = n_pvconns;
513
514 return retval;
515}
516
517int
518ofproto_set_listeners(struct ofproto *ofproto, const struct svec *listeners)
519{
520 return set_pvconns(&ofproto->listeners, &ofproto->n_listeners, listeners);
521}
522
523int
524ofproto_set_snoops(struct ofproto *ofproto, const struct svec *snoops)
525{
526 return set_pvconns(&ofproto->snoops, &ofproto->n_snoops, snoops);
527}
528
529int
0193b2af
JG
530ofproto_set_netflow(struct ofproto *ofproto,
531 const struct netflow_options *nf_options)
064af421 532{
76343538 533 if (nf_options && nf_options->collectors.n) {
064af421
BP
534 if (!ofproto->netflow) {
535 ofproto->netflow = netflow_create();
536 }
0193b2af 537 return netflow_set_options(ofproto->netflow, nf_options);
064af421
BP
538 } else {
539 netflow_destroy(ofproto->netflow);
540 ofproto->netflow = NULL;
541 return 0;
542 }
543}
544
72b06300
BP
545void
546ofproto_set_sflow(struct ofproto *ofproto,
547 const struct ofproto_sflow_options *oso)
548{
549 struct ofproto_sflow *os = ofproto->sflow;
550 if (oso) {
551 if (!os) {
552 struct ofport *ofport;
553 unsigned int odp_port;
554
555 os = ofproto->sflow = ofproto_sflow_create(ofproto->dpif);
556 refresh_port_groups(ofproto);
557 PORT_ARRAY_FOR_EACH (ofport, &ofproto->ports, odp_port) {
558 ofproto_sflow_add_port(os, odp_port,
559 netdev_get_name(ofport->netdev));
560 }
561 }
562 ofproto_sflow_set_options(os, oso);
563 } else {
564 ofproto_sflow_destroy(os);
565 ofproto->sflow = NULL;
566 }
567}
568
064af421
BP
569void
570ofproto_set_failure(struct ofproto *ofproto, bool fail_open)
571{
572 if (fail_open) {
573 struct rconn *rconn = ofproto->controller->rconn;
574 int trigger_duration = rconn_get_probe_interval(rconn) * 3;
575 if (!ofproto->fail_open) {
576 ofproto->fail_open = fail_open_create(ofproto, trigger_duration,
577 ofproto->switch_status,
578 rconn);
579 } else {
580 fail_open_set_trigger_duration(ofproto->fail_open,
581 trigger_duration);
582 }
583 } else {
584 fail_open_destroy(ofproto->fail_open);
585 ofproto->fail_open = NULL;
586 }
587}
588
589void
590ofproto_set_rate_limit(struct ofproto *ofproto,
591 int rate_limit, int burst_limit)
592{
593 if (rate_limit > 0) {
594 if (!ofproto->miss_sched) {
595 ofproto->miss_sched = pinsched_create(rate_limit, burst_limit,
596 ofproto->switch_status);
597 ofproto->action_sched = pinsched_create(rate_limit, burst_limit,
598 NULL);
599 } else {
600 pinsched_set_limits(ofproto->miss_sched, rate_limit, burst_limit);
601 pinsched_set_limits(ofproto->action_sched,
602 rate_limit, burst_limit);
603 }
604 } else {
605 pinsched_destroy(ofproto->miss_sched);
606 ofproto->miss_sched = NULL;
607 pinsched_destroy(ofproto->action_sched);
608 ofproto->action_sched = NULL;
609 }
610}
611
612int
67a4917b 613ofproto_set_stp(struct ofproto *ofproto OVS_UNUSED, bool enable_stp)
064af421
BP
614{
615 /* XXX */
616 if (enable_stp) {
617 VLOG_WARN("STP is not yet implemented");
618 return EINVAL;
619 } else {
620 return 0;
621 }
622}
623
064af421
BP
624uint64_t
625ofproto_get_datapath_id(const struct ofproto *ofproto)
626{
627 return ofproto->datapath_id;
628}
629
630int
631ofproto_get_probe_interval(const struct ofproto *ofproto)
632{
633 return rconn_get_probe_interval(ofproto->controller->rconn);
634}
635
636int
637ofproto_get_max_backoff(const struct ofproto *ofproto)
638{
639 return rconn_get_max_backoff(ofproto->controller->rconn);
640}
641
642bool
643ofproto_get_in_band(const struct ofproto *ofproto)
644{
645 return ofproto->in_band != NULL;
646}
647
648bool
649ofproto_get_discovery(const struct ofproto *ofproto)
650{
651 return ofproto->discovery != NULL;
652}
653
654const char *
655ofproto_get_controller(const struct ofproto *ofproto)
656{
657 return rconn_get_name(ofproto->controller->rconn);
658}
659
660void
661ofproto_get_listeners(const struct ofproto *ofproto, struct svec *listeners)
662{
663 size_t i;
664
665 for (i = 0; i < ofproto->n_listeners; i++) {
666 svec_add(listeners, pvconn_get_name(ofproto->listeners[i]));
667 }
668}
669
670void
671ofproto_get_snoops(const struct ofproto *ofproto, struct svec *snoops)
672{
673 size_t i;
674
675 for (i = 0; i < ofproto->n_snoops; i++) {
676 svec_add(snoops, pvconn_get_name(ofproto->snoops[i]));
677 }
678}
679
680void
681ofproto_destroy(struct ofproto *p)
682{
683 struct ofconn *ofconn, *next_ofconn;
684 struct ofport *ofport;
685 unsigned int port_no;
686 size_t i;
687
688 if (!p) {
689 return;
690 }
691
2f6d3445
BP
692 /* Destroy fail-open early, because it touches the classifier. */
693 ofproto_set_failure(p, false);
694
064af421
BP
695 ofproto_flush_flows(p);
696 classifier_destroy(&p->cls);
697
698 LIST_FOR_EACH_SAFE (ofconn, next_ofconn, struct ofconn, node,
699 &p->all_conns) {
c475ae67 700 ofconn_destroy(ofconn);
064af421
BP
701 }
702
c228a364 703 dpif_close(p->dpif);
e9e28be3 704 netdev_monitor_destroy(p->netdev_monitor);
064af421
BP
705 PORT_ARRAY_FOR_EACH (ofport, &p->ports, port_no) {
706 ofport_free(ofport);
707 }
708 shash_destroy(&p->port_by_name);
709
710 switch_status_destroy(p->switch_status);
711 in_band_destroy(p->in_band);
712 discovery_destroy(p->discovery);
064af421
BP
713 pinsched_destroy(p->miss_sched);
714 pinsched_destroy(p->action_sched);
064af421 715 netflow_destroy(p->netflow);
72b06300 716 ofproto_sflow_destroy(p->sflow);
064af421
BP
717
718 switch_status_unregister(p->ss_cat);
719
720 for (i = 0; i < p->n_listeners; i++) {
721 pvconn_close(p->listeners[i]);
722 }
723 free(p->listeners);
724
725 for (i = 0; i < p->n_snoops; i++) {
726 pvconn_close(p->snoops[i]);
727 }
728 free(p->snoops);
729
730 mac_learning_destroy(p->ml);
731
732 free(p);
733}
734
735int
736ofproto_run(struct ofproto *p)
737{
738 int error = ofproto_run1(p);
739 if (!error) {
740 error = ofproto_run2(p, false);
741 }
742 return error;
743}
744
e9e28be3
BP
745static void
746process_port_change(struct ofproto *ofproto, int error, char *devname)
747{
748 if (error == ENOBUFS) {
749 reinit_ports(ofproto);
750 } else if (!error) {
751 update_port(ofproto, devname);
752 free(devname);
753 }
754}
755
064af421
BP
756int
757ofproto_run1(struct ofproto *p)
758{
759 struct ofconn *ofconn, *next_ofconn;
760 char *devname;
761 int error;
762 int i;
763
149f577a
JG
764 if (shash_is_empty(&p->port_by_name)) {
765 init_ports(p);
766 }
767
064af421
BP
768 for (i = 0; i < 50; i++) {
769 struct ofpbuf *buf;
770 int error;
771
c228a364 772 error = dpif_recv(p->dpif, &buf);
064af421
BP
773 if (error) {
774 if (error == ENODEV) {
775 /* Someone destroyed the datapath behind our back. The caller
776 * better destroy us and give up, because we're just going to
777 * spin from here on out. */
778 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
b29ba128 779 VLOG_ERR_RL(&rl, "%s: datapath was destroyed externally",
c228a364 780 dpif_name(p->dpif));
064af421
BP
781 return ENODEV;
782 }
783 break;
784 }
785
786 handle_odp_msg(p, buf);
787 }
788
e9e28be3
BP
789 while ((error = dpif_port_poll(p->dpif, &devname)) != EAGAIN) {
790 process_port_change(p, error, devname);
791 }
792 while ((error = netdev_monitor_poll(p->netdev_monitor,
793 &devname)) != EAGAIN) {
794 process_port_change(p, error, devname);
064af421
BP
795 }
796
797 if (p->in_band) {
798 in_band_run(p->in_band);
799 }
800 if (p->discovery) {
801 char *controller_name;
802 if (rconn_is_connectivity_questionable(p->controller->rconn)) {
803 discovery_question_connectivity(p->discovery);
804 }
805 if (discovery_run(p->discovery, &controller_name)) {
806 if (controller_name) {
807 rconn_connect(p->controller->rconn, controller_name);
808 } else {
809 rconn_disconnect(p->controller->rconn);
810 }
811 }
812 }
064af421
BP
813 pinsched_run(p->miss_sched, send_packet_in_miss, p);
814 pinsched_run(p->action_sched, send_packet_in_action, p);
064af421
BP
815
816 LIST_FOR_EACH_SAFE (ofconn, next_ofconn, struct ofconn, node,
817 &p->all_conns) {
818 ofconn_run(ofconn, p);
819 }
820
7778bd15
BP
821 /* Fail-open maintenance. Do this after processing the ofconns since
822 * fail-open checks the status of the controller rconn. */
823 if (p->fail_open) {
824 fail_open_run(p->fail_open);
825 }
826
064af421
BP
827 for (i = 0; i < p->n_listeners; i++) {
828 struct vconn *vconn;
829 int retval;
830
831 retval = pvconn_accept(p->listeners[i], OFP_VERSION, &vconn);
832 if (!retval) {
833 ofconn_create(p, rconn_new_from_vconn("passive", vconn));
834 } else if (retval != EAGAIN) {
835 VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval));
836 }
837 }
838
839 for (i = 0; i < p->n_snoops; i++) {
840 struct vconn *vconn;
841 int retval;
842
843 retval = pvconn_accept(p->snoops[i], OFP_VERSION, &vconn);
844 if (!retval) {
845 rconn_add_monitor(p->controller->rconn, vconn);
846 } else if (retval != EAGAIN) {
847 VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval));
848 }
849 }
850
851 if (time_msec() >= p->next_expiration) {
852 COVERAGE_INC(ofproto_expiration);
853 p->next_expiration = time_msec() + 1000;
854 update_used(p);
855
856 classifier_for_each(&p->cls, CLS_INC_ALL, expire_rule, p);
857
858 /* Let the hook know that we're at a stable point: all outstanding data
859 * in existing flows has been accounted to the account_cb. Thus, the
860 * hook can now reasonably do operations that depend on having accurate
861 * flow volume accounting (currently, that's just bond rebalancing). */
862 if (p->ofhooks->account_checkpoint_cb) {
863 p->ofhooks->account_checkpoint_cb(p->aux);
864 }
865 }
866
867 if (p->netflow) {
868 netflow_run(p->netflow);
869 }
72b06300
BP
870 if (p->sflow) {
871 ofproto_sflow_run(p->sflow);
872 }
064af421
BP
873
874 return 0;
875}
876
877struct revalidate_cbdata {
878 struct ofproto *ofproto;
879 bool revalidate_all; /* Revalidate all exact-match rules? */
880 bool revalidate_subrules; /* Revalidate all exact-match subrules? */
881 struct tag_set revalidate_set; /* Set of tags to revalidate. */
882};
883
884int
885ofproto_run2(struct ofproto *p, bool revalidate_all)
886{
887 if (p->need_revalidate || revalidate_all
888 || !tag_set_is_empty(&p->revalidate_set)) {
889 struct revalidate_cbdata cbdata;
890 cbdata.ofproto = p;
891 cbdata.revalidate_all = revalidate_all;
892 cbdata.revalidate_subrules = p->need_revalidate;
893 cbdata.revalidate_set = p->revalidate_set;
894 tag_set_init(&p->revalidate_set);
895 COVERAGE_INC(ofproto_revalidate);
896 classifier_for_each(&p->cls, CLS_INC_EXACT, revalidate_cb, &cbdata);
897 p->need_revalidate = false;
898 }
899
900 return 0;
901}
902
903void
904ofproto_wait(struct ofproto *p)
905{
906 struct ofconn *ofconn;
907 size_t i;
908
c228a364 909 dpif_recv_wait(p->dpif);
e9e28be3
BP
910 dpif_port_poll_wait(p->dpif);
911 netdev_monitor_poll_wait(p->netdev_monitor);
064af421
BP
912 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
913 ofconn_wait(ofconn);
914 }
915 if (p->in_band) {
916 in_band_wait(p->in_band);
917 }
918 if (p->discovery) {
919 discovery_wait(p->discovery);
920 }
921 if (p->fail_open) {
922 fail_open_wait(p->fail_open);
923 }
924 pinsched_wait(p->miss_sched);
925 pinsched_wait(p->action_sched);
72b06300
BP
926 if (p->sflow) {
927 ofproto_sflow_wait(p->sflow);
928 }
064af421
BP
929 if (!tag_set_is_empty(&p->revalidate_set)) {
930 poll_immediate_wake();
931 }
932 if (p->need_revalidate) {
933 /* Shouldn't happen, but if it does just go around again. */
934 VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
935 poll_immediate_wake();
936 } else if (p->next_expiration != LLONG_MAX) {
937 poll_timer_wait(p->next_expiration - time_msec());
938 }
939 for (i = 0; i < p->n_listeners; i++) {
940 pvconn_wait(p->listeners[i]);
941 }
942 for (i = 0; i < p->n_snoops; i++) {
943 pvconn_wait(p->snoops[i]);
944 }
945}
946
947void
948ofproto_revalidate(struct ofproto *ofproto, tag_type tag)
949{
950 tag_set_add(&ofproto->revalidate_set, tag);
951}
952
953struct tag_set *
954ofproto_get_revalidate_set(struct ofproto *ofproto)
955{
956 return &ofproto->revalidate_set;
957}
958
959bool
960ofproto_is_alive(const struct ofproto *p)
961{
962 return p->discovery || rconn_is_alive(p->controller->rconn);
963}
964
965int
966ofproto_send_packet(struct ofproto *p, const flow_t *flow,
967 const union ofp_action *actions, size_t n_actions,
968 const struct ofpbuf *packet)
969{
970 struct odp_actions odp_actions;
971 int error;
972
973 error = xlate_actions(actions, n_actions, flow, p, packet, &odp_actions,
6a07af36 974 NULL, NULL, NULL);
064af421
BP
975 if (error) {
976 return error;
977 }
978
979 /* XXX Should we translate the dpif_execute() errno value into an OpenFlow
980 * error code? */
c228a364 981 dpif_execute(p->dpif, flow->in_port, odp_actions.actions,
064af421
BP
982 odp_actions.n_actions, packet);
983 return 0;
984}
985
986void
987ofproto_add_flow(struct ofproto *p,
988 const flow_t *flow, uint32_t wildcards, unsigned int priority,
989 const union ofp_action *actions, size_t n_actions,
990 int idle_timeout)
991{
992 struct rule *rule;
0193b2af 993 rule = rule_create(p, NULL, actions, n_actions,
ca069229
JP
994 idle_timeout >= 0 ? idle_timeout : 5 /* XXX */,
995 0, false);
064af421
BP
996 cls_rule_from_flow(&rule->cr, flow, wildcards, priority);
997 rule_insert(p, rule, NULL, 0);
998}
999
1000void
1001ofproto_delete_flow(struct ofproto *ofproto, const flow_t *flow,
1002 uint32_t wildcards, unsigned int priority)
1003{
1004 struct rule *rule;
1005
1006 rule = rule_from_cls_rule(classifier_find_rule_exactly(&ofproto->cls,
1007 flow, wildcards,
1008 priority));
1009 if (rule) {
1010 rule_remove(ofproto, rule);
1011 }
1012}
1013
1014static void
1015destroy_rule(struct cls_rule *rule_, void *ofproto_)
1016{
1017 struct rule *rule = rule_from_cls_rule(rule_);
1018 struct ofproto *ofproto = ofproto_;
1019
1020 /* Mark the flow as not installed, even though it might really be
1021 * installed, so that rule_remove() doesn't bother trying to uninstall it.
1022 * There is no point in uninstalling it individually since we are about to
1023 * blow away all the flows with dpif_flow_flush(). */
1024 rule->installed = false;
1025
1026 rule_remove(ofproto, rule);
1027}
1028
1029void
1030ofproto_flush_flows(struct ofproto *ofproto)
1031{
1032 COVERAGE_INC(ofproto_flush);
1033 classifier_for_each(&ofproto->cls, CLS_INC_ALL, destroy_rule, ofproto);
c228a364 1034 dpif_flow_flush(ofproto->dpif);
064af421
BP
1035 if (ofproto->in_band) {
1036 in_band_flushed(ofproto->in_band);
1037 }
1038 if (ofproto->fail_open) {
1039 fail_open_flushed(ofproto->fail_open);
1040 }
1041}
1042\f
1043static void
1044reinit_ports(struct ofproto *p)
1045{
1046 struct svec devnames;
1047 struct ofport *ofport;
1048 unsigned int port_no;
1049 struct odp_port *odp_ports;
1050 size_t n_odp_ports;
1051 size_t i;
1052
1053 svec_init(&devnames);
1054 PORT_ARRAY_FOR_EACH (ofport, &p->ports, port_no) {
1055 svec_add (&devnames, (char *) ofport->opp.name);
1056 }
c228a364 1057 dpif_port_list(p->dpif, &odp_ports, &n_odp_ports);
064af421
BP
1058 for (i = 0; i < n_odp_ports; i++) {
1059 svec_add (&devnames, odp_ports[i].devname);
1060 }
1061 free(odp_ports);
1062
1063 svec_sort_unique(&devnames);
1064 for (i = 0; i < devnames.n; i++) {
1065 update_port(p, devnames.names[i]);
1066 }
1067 svec_destroy(&devnames);
1068}
1069
72b06300 1070static size_t
064af421
BP
1071refresh_port_group(struct ofproto *p, unsigned int group)
1072{
1073 uint16_t *ports;
1074 size_t n_ports;
1075 struct ofport *port;
1076 unsigned int port_no;
1077
1078 assert(group == DP_GROUP_ALL || group == DP_GROUP_FLOOD);
1079
1080 ports = xmalloc(port_array_count(&p->ports) * sizeof *ports);
1081 n_ports = 0;
1082 PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) {
1083 if (group == DP_GROUP_ALL || !(port->opp.config & OFPPC_NO_FLOOD)) {
1084 ports[n_ports++] = port_no;
1085 }
1086 }
c228a364 1087 dpif_port_group_set(p->dpif, group, ports, n_ports);
064af421 1088 free(ports);
72b06300
BP
1089
1090 return n_ports;
064af421
BP
1091}
1092
1093static void
1094refresh_port_groups(struct ofproto *p)
1095{
72b06300
BP
1096 size_t n_flood = refresh_port_group(p, DP_GROUP_FLOOD);
1097 size_t n_all = refresh_port_group(p, DP_GROUP_ALL);
1098 if (p->sflow) {
1099 ofproto_sflow_set_group_sizes(p->sflow, n_flood, n_all);
1100 }
064af421
BP
1101}
1102
1103static struct ofport *
1104make_ofport(const struct odp_port *odp_port)
1105{
149f577a 1106 struct netdev_options netdev_options;
064af421
BP
1107 enum netdev_flags flags;
1108 struct ofport *ofport;
1109 struct netdev *netdev;
1110 bool carrier;
1111 int error;
1112
149f577a
JG
1113 memset(&netdev_options, 0, sizeof netdev_options);
1114 netdev_options.name = odp_port->devname;
1115 netdev_options.ethertype = NETDEV_ETH_TYPE_NONE;
1116 netdev_options.may_open = true;
1117
1118 error = netdev_open(&netdev_options, &netdev);
064af421
BP
1119 if (error) {
1120 VLOG_WARN_RL(&rl, "ignoring port %s (%"PRIu16") because netdev %s "
1121 "cannot be opened (%s)",
1122 odp_port->devname, odp_port->port,
1123 odp_port->devname, strerror(error));
1124 return NULL;
1125 }
1126
1127 ofport = xmalloc(sizeof *ofport);
1128 ofport->netdev = netdev;
1129 ofport->opp.port_no = odp_port_to_ofp_port(odp_port->port);
80992a35 1130 netdev_get_etheraddr(netdev, ofport->opp.hw_addr);
064af421
BP
1131 memcpy(ofport->opp.name, odp_port->devname,
1132 MIN(sizeof ofport->opp.name, sizeof odp_port->devname));
1133 ofport->opp.name[sizeof ofport->opp.name - 1] = '\0';
1134
1135 netdev_get_flags(netdev, &flags);
1136 ofport->opp.config = flags & NETDEV_UP ? 0 : OFPPC_PORT_DOWN;
1137
1138 netdev_get_carrier(netdev, &carrier);
1139 ofport->opp.state = carrier ? 0 : OFPPS_LINK_DOWN;
1140
1141 netdev_get_features(netdev,
1142 &ofport->opp.curr, &ofport->opp.advertised,
1143 &ofport->opp.supported, &ofport->opp.peer);
1144 return ofport;
1145}
1146
1147static bool
1148ofport_conflicts(const struct ofproto *p, const struct odp_port *odp_port)
1149{
1150 if (port_array_get(&p->ports, odp_port->port)) {
1151 VLOG_WARN_RL(&rl, "ignoring duplicate port %"PRIu16" in datapath",
1152 odp_port->port);
1153 return true;
1154 } else if (shash_find(&p->port_by_name, odp_port->devname)) {
1155 VLOG_WARN_RL(&rl, "ignoring duplicate device %s in datapath",
1156 odp_port->devname);
1157 return true;
1158 } else {
1159 return false;
1160 }
1161}
1162
1163static int
1164ofport_equal(const struct ofport *a_, const struct ofport *b_)
1165{
1166 const struct ofp_phy_port *a = &a_->opp;
1167 const struct ofp_phy_port *b = &b_->opp;
1168
1169 BUILD_ASSERT_DECL(sizeof *a == 48); /* Detect ofp_phy_port changes. */
1170 return (a->port_no == b->port_no
1171 && !memcmp(a->hw_addr, b->hw_addr, sizeof a->hw_addr)
1172 && !strcmp((char *) a->name, (char *) b->name)
1173 && a->state == b->state
1174 && a->config == b->config
1175 && a->curr == b->curr
1176 && a->advertised == b->advertised
1177 && a->supported == b->supported
1178 && a->peer == b->peer);
1179}
1180
1181static void
1182send_port_status(struct ofproto *p, const struct ofport *ofport,
1183 uint8_t reason)
1184{
1185 /* XXX Should limit the number of queued port status change messages. */
1186 struct ofconn *ofconn;
1187 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
1188 struct ofp_port_status *ops;
1189 struct ofpbuf *b;
1190
1191 ops = make_openflow_xid(sizeof *ops, OFPT_PORT_STATUS, 0, &b);
1192 ops->reason = reason;
1193 ops->desc = ofport->opp;
1194 hton_ofp_phy_port(&ops->desc);
1195 queue_tx(b, ofconn, NULL);
1196 }
1197 if (p->ofhooks->port_changed_cb) {
1198 p->ofhooks->port_changed_cb(reason, &ofport->opp, p->aux);
1199 }
1200}
1201
1202static void
1203ofport_install(struct ofproto *p, struct ofport *ofport)
1204{
72b06300
BP
1205 uint16_t odp_port = ofp_port_to_odp_port(ofport->opp.port_no);
1206 const char *netdev_name = (const char *) ofport->opp.name;
1207
e9e28be3 1208 netdev_monitor_add(p->netdev_monitor, ofport->netdev);
72b06300
BP
1209 port_array_set(&p->ports, odp_port, ofport);
1210 shash_add(&p->port_by_name, netdev_name, ofport);
1211 if (p->sflow) {
1212 ofproto_sflow_add_port(p->sflow, odp_port, netdev_name);
1213 }
064af421
BP
1214}
1215
1216static void
1217ofport_remove(struct ofproto *p, struct ofport *ofport)
1218{
72b06300
BP
1219 uint16_t odp_port = ofp_port_to_odp_port(ofport->opp.port_no);
1220
e9e28be3 1221 netdev_monitor_remove(p->netdev_monitor, ofport->netdev);
72b06300 1222 port_array_set(&p->ports, odp_port, NULL);
064af421
BP
1223 shash_delete(&p->port_by_name,
1224 shash_find(&p->port_by_name, (char *) ofport->opp.name));
72b06300
BP
1225 if (p->sflow) {
1226 ofproto_sflow_del_port(p->sflow, odp_port);
1227 }
064af421
BP
1228}
1229
1230static void
1231ofport_free(struct ofport *ofport)
1232{
1233 if (ofport) {
1234 netdev_close(ofport->netdev);
1235 free(ofport);
1236 }
1237}
1238
1239static void
1240update_port(struct ofproto *p, const char *devname)
1241{
1242 struct odp_port odp_port;
c874dc6d
BP
1243 struct ofport *old_ofport;
1244 struct ofport *new_ofport;
064af421
BP
1245 int error;
1246
1247 COVERAGE_INC(ofproto_update_port);
c874dc6d
BP
1248
1249 /* Query the datapath for port information. */
c228a364 1250 error = dpif_port_query_by_name(p->dpif, devname, &odp_port);
064af421 1251
c874dc6d
BP
1252 /* Find the old ofport. */
1253 old_ofport = shash_find_data(&p->port_by_name, devname);
1254 if (!error) {
1255 if (!old_ofport) {
1256 /* There's no port named 'devname' but there might be a port with
1257 * the same port number. This could happen if a port is deleted
1258 * and then a new one added in its place very quickly, or if a port
1259 * is renamed. In the former case we want to send an OFPPR_DELETE
1260 * and an OFPPR_ADD, and in the latter case we want to send a
1261 * single OFPPR_MODIFY. We can distinguish the cases by comparing
1262 * the old port's ifindex against the new port, or perhaps less
1263 * reliably but more portably by comparing the old port's MAC
1264 * against the new port's MAC. However, this code isn't that smart
1265 * and always sends an OFPPR_MODIFY (XXX). */
1266 old_ofport = port_array_get(&p->ports, odp_port.port);
064af421 1267 }
c874dc6d 1268 } else if (error != ENOENT && error != ENODEV) {
064af421
BP
1269 VLOG_WARN_RL(&rl, "dpif_port_query_by_name returned unexpected error "
1270 "%s", strerror(error));
1271 return;
1272 }
c874dc6d
BP
1273
1274 /* Create a new ofport. */
1275 new_ofport = !error ? make_ofport(&odp_port) : NULL;
1276
1277 /* Eliminate a few pathological cases. */
1278 if (!old_ofport && !new_ofport) {
1279 return;
1280 } else if (old_ofport && new_ofport) {
1281 /* Most of the 'config' bits are OpenFlow soft state, but
1282 * OFPPC_PORT_DOWN is maintained the kernel. So transfer the OpenFlow
1283 * bits from old_ofport. (make_ofport() only sets OFPPC_PORT_DOWN and
1284 * leaves the other bits 0.) */
1285 new_ofport->opp.config |= old_ofport->opp.config & ~OFPPC_PORT_DOWN;
1286
1287 if (ofport_equal(old_ofport, new_ofport)) {
1288 /* False alarm--no change. */
1289 ofport_free(new_ofport);
1290 return;
1291 }
1292 }
1293
1294 /* Now deal with the normal cases. */
1295 if (old_ofport) {
1296 ofport_remove(p, old_ofport);
1297 }
1298 if (new_ofport) {
1299 ofport_install(p, new_ofport);
1300 }
1301 send_port_status(p, new_ofport ? new_ofport : old_ofport,
1302 (!old_ofport ? OFPPR_ADD
1303 : !new_ofport ? OFPPR_DELETE
1304 : OFPPR_MODIFY));
1305 ofport_free(old_ofport);
1306
1307 /* Update port groups. */
064af421
BP
1308 refresh_port_groups(p);
1309}
1310
1311static int
1312init_ports(struct ofproto *p)
1313{
1314 struct odp_port *ports;
1315 size_t n_ports;
1316 size_t i;
1317 int error;
1318
c228a364 1319 error = dpif_port_list(p->dpif, &ports, &n_ports);
064af421
BP
1320 if (error) {
1321 return error;
1322 }
1323
1324 for (i = 0; i < n_ports; i++) {
1325 const struct odp_port *odp_port = &ports[i];
1326 if (!ofport_conflicts(p, odp_port)) {
1327 struct ofport *ofport = make_ofport(odp_port);
1328 if (ofport) {
1329 ofport_install(p, ofport);
1330 }
1331 }
1332 }
1333 free(ports);
1334 refresh_port_groups(p);
1335 return 0;
1336}
1337\f
1338static struct ofconn *
1339ofconn_create(struct ofproto *p, struct rconn *rconn)
1340{
1341 struct ofconn *ofconn = xmalloc(sizeof *ofconn);
1342 list_push_back(&p->all_conns, &ofconn->node);
1343 ofconn->rconn = rconn;
1344 ofconn->pktbuf = NULL;
064af421
BP
1345 ofconn->miss_send_len = 0;
1346 ofconn->packet_in_counter = rconn_packet_counter_create ();
1347 ofconn->reply_counter = rconn_packet_counter_create ();
1348 return ofconn;
1349}
1350
1351static void
c475ae67 1352ofconn_destroy(struct ofconn *ofconn)
064af421 1353{
064af421
BP
1354 list_remove(&ofconn->node);
1355 rconn_destroy(ofconn->rconn);
1356 rconn_packet_counter_destroy(ofconn->packet_in_counter);
1357 rconn_packet_counter_destroy(ofconn->reply_counter);
1358 pktbuf_destroy(ofconn->pktbuf);
1359 free(ofconn);
1360}
1361
1362static void
1363ofconn_run(struct ofconn *ofconn, struct ofproto *p)
1364{
1365 int iteration;
1366
1367 rconn_run(ofconn->rconn);
1368
1369 if (rconn_packet_counter_read (ofconn->reply_counter) < OFCONN_REPLY_MAX) {
1370 /* Limit the number of iterations to prevent other tasks from
1371 * starving. */
1372 for (iteration = 0; iteration < 50; iteration++) {
1373 struct ofpbuf *of_msg = rconn_recv(ofconn->rconn);
1374 if (!of_msg) {
1375 break;
1376 }
7778bd15
BP
1377 if (p->fail_open) {
1378 fail_open_maybe_recover(p->fail_open);
1379 }
064af421
BP
1380 handle_openflow(ofconn, p, of_msg);
1381 ofpbuf_delete(of_msg);
1382 }
1383 }
1384
1385 if (ofconn != p->controller && !rconn_is_alive(ofconn->rconn)) {
c475ae67 1386 ofconn_destroy(ofconn);
064af421
BP
1387 }
1388}
1389
1390static void
1391ofconn_wait(struct ofconn *ofconn)
1392{
1393 rconn_run_wait(ofconn->rconn);
1394 if (rconn_packet_counter_read (ofconn->reply_counter) < OFCONN_REPLY_MAX) {
1395 rconn_recv_wait(ofconn->rconn);
1396 } else {
1397 COVERAGE_INC(ofproto_ofconn_stuck);
1398 }
1399}
1400\f
1401/* Caller is responsible for initializing the 'cr' member of the returned
1402 * rule. */
1403static struct rule *
0193b2af 1404rule_create(struct ofproto *ofproto, struct rule *super,
064af421 1405 const union ofp_action *actions, size_t n_actions,
ca069229
JP
1406 uint16_t idle_timeout, uint16_t hard_timeout,
1407 bool send_flow_removed)
064af421 1408{
ec6fde61 1409 struct rule *rule = xzalloc(sizeof *rule);
064af421
BP
1410 rule->idle_timeout = idle_timeout;
1411 rule->hard_timeout = hard_timeout;
1412 rule->used = rule->created = time_msec();
ca069229 1413 rule->send_flow_removed = send_flow_removed;
064af421
BP
1414 rule->super = super;
1415 if (super) {
1416 list_push_back(&super->list, &rule->list);
1417 } else {
1418 list_init(&rule->list);
1419 }
1420 rule->n_actions = n_actions;
1421 rule->actions = xmemdup(actions, n_actions * sizeof *actions);
0193b2af
JG
1422 netflow_flow_clear(&rule->nf_flow);
1423 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->created);
1424
064af421
BP
1425 return rule;
1426}
1427
1428static struct rule *
1429rule_from_cls_rule(const struct cls_rule *cls_rule)
1430{
1431 return cls_rule ? CONTAINER_OF(cls_rule, struct rule, cr) : NULL;
1432}
1433
1434static void
1435rule_free(struct rule *rule)
1436{
1437 free(rule->actions);
1438 free(rule->odp_actions);
1439 free(rule);
1440}
1441
1442/* Destroys 'rule'. If 'rule' is a subrule, also removes it from its
1443 * super-rule's list of subrules. If 'rule' is a super-rule, also iterates
1444 * through all of its subrules and revalidates them, destroying any that no
1445 * longer has a super-rule (which is probably all of them).
1446 *
1447 * Before calling this function, the caller must make have removed 'rule' from
1448 * the classifier. If 'rule' is an exact-match rule, the caller is also
1449 * responsible for ensuring that it has been uninstalled from the datapath. */
1450static void
1451rule_destroy(struct ofproto *ofproto, struct rule *rule)
1452{
1453 if (!rule->super) {
1454 struct rule *subrule, *next;
1455 LIST_FOR_EACH_SAFE (subrule, next, struct rule, list, &rule->list) {
1456 revalidate_rule(ofproto, subrule);
1457 }
1458 } else {
1459 list_remove(&rule->list);
1460 }
1461 rule_free(rule);
1462}
1463
1464static bool
1465rule_has_out_port(const struct rule *rule, uint16_t out_port)
1466{
1467 const union ofp_action *oa;
1468 struct actions_iterator i;
1469
1470 if (out_port == htons(OFPP_NONE)) {
1471 return true;
1472 }
1473 for (oa = actions_first(&i, rule->actions, rule->n_actions); oa;
1474 oa = actions_next(&i)) {
1475 if (oa->type == htons(OFPAT_OUTPUT) && oa->output.port == out_port) {
1476 return true;
1477 }
1478 }
1479 return false;
1480}
1481
1482/* Executes the actions indicated by 'rule' on 'packet', which is in flow
1483 * 'flow' and is considered to have arrived on ODP port 'in_port'.
1484 *
1485 * The flow that 'packet' actually contains does not need to actually match
1486 * 'rule'; the actions in 'rule' will be applied to it either way. Likewise,
1487 * the packet and byte counters for 'rule' will be credited for the packet sent
1488 * out whether or not the packet actually matches 'rule'.
1489 *
1490 * If 'rule' is an exact-match rule and 'flow' actually equals the rule's flow,
1491 * the caller must already have accurately composed ODP actions for it given
1492 * 'packet' using rule_make_actions(). If 'rule' is a wildcard rule, or if
1493 * 'rule' is an exact-match rule but 'flow' is not the rule's flow, then this
1494 * function will compose a set of ODP actions based on 'rule''s OpenFlow
1495 * actions and apply them to 'packet'. */
1496static void
1497rule_execute(struct ofproto *ofproto, struct rule *rule,
1498 struct ofpbuf *packet, const flow_t *flow)
1499{
1500 const union odp_action *actions;
1501 size_t n_actions;
1502 struct odp_actions a;
1503
1504 /* Grab or compose the ODP actions.
1505 *
1506 * The special case for an exact-match 'rule' where 'flow' is not the
1507 * rule's flow is important to avoid, e.g., sending a packet out its input
1508 * port simply because the ODP actions were composed for the wrong
1509 * scenario. */
1510 if (rule->cr.wc.wildcards || !flow_equal(flow, &rule->cr.flow)) {
1511 struct rule *super = rule->super ? rule->super : rule;
1512 if (xlate_actions(super->actions, super->n_actions, flow, ofproto,
6a07af36 1513 packet, &a, NULL, 0, NULL)) {
064af421
BP
1514 return;
1515 }
1516 actions = a.actions;
1517 n_actions = a.n_actions;
1518 } else {
1519 actions = rule->odp_actions;
1520 n_actions = rule->n_odp_actions;
1521 }
1522
1523 /* Execute the ODP actions. */
c228a364 1524 if (!dpif_execute(ofproto->dpif, flow->in_port,
064af421
BP
1525 actions, n_actions, packet)) {
1526 struct odp_flow_stats stats;
1527 flow_extract_stats(flow, packet, &stats);
0193b2af 1528 update_stats(ofproto, rule, &stats);
064af421 1529 rule->used = time_msec();
0193b2af 1530 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->used);
064af421
BP
1531 }
1532}
1533
1534static void
1535rule_insert(struct ofproto *p, struct rule *rule, struct ofpbuf *packet,
1536 uint16_t in_port)
1537{
1538 struct rule *displaced_rule;
1539
1540 /* Insert the rule in the classifier. */
1541 displaced_rule = rule_from_cls_rule(classifier_insert(&p->cls, &rule->cr));
1542 if (!rule->cr.wc.wildcards) {
1543 rule_make_actions(p, rule, packet);
1544 }
1545
1546 /* Send the packet and credit it to the rule. */
1547 if (packet) {
1548 flow_t flow;
1549 flow_extract(packet, in_port, &flow);
1550 rule_execute(p, rule, packet, &flow);
1551 }
1552
1553 /* Install the rule in the datapath only after sending the packet, to
1554 * avoid packet reordering. */
1555 if (rule->cr.wc.wildcards) {
1556 COVERAGE_INC(ofproto_add_wc_flow);
1557 p->need_revalidate = true;
1558 } else {
1559 rule_install(p, rule, displaced_rule);
1560 }
1561
1562 /* Free the rule that was displaced, if any. */
1563 if (displaced_rule) {
1564 rule_destroy(p, displaced_rule);
1565 }
1566}
1567
1568static struct rule *
1569rule_create_subrule(struct ofproto *ofproto, struct rule *rule,
1570 const flow_t *flow)
1571{
0193b2af 1572 struct rule *subrule = rule_create(ofproto, rule, NULL, 0,
ca069229
JP
1573 rule->idle_timeout, rule->hard_timeout,
1574 false);
064af421
BP
1575 COVERAGE_INC(ofproto_subrule_create);
1576 cls_rule_from_flow(&subrule->cr, flow, 0,
1577 (rule->cr.priority <= UINT16_MAX ? UINT16_MAX
1578 : rule->cr.priority));
1579 classifier_insert_exact(&ofproto->cls, &subrule->cr);
1580
1581 return subrule;
1582}
1583
1584static void
1585rule_remove(struct ofproto *ofproto, struct rule *rule)
1586{
1587 if (rule->cr.wc.wildcards) {
1588 COVERAGE_INC(ofproto_del_wc_flow);
1589 ofproto->need_revalidate = true;
1590 } else {
1591 rule_uninstall(ofproto, rule);
1592 }
1593 classifier_remove(&ofproto->cls, &rule->cr);
1594 rule_destroy(ofproto, rule);
1595}
1596
1597/* Returns true if the actions changed, false otherwise. */
1598static bool
1599rule_make_actions(struct ofproto *p, struct rule *rule,
1600 const struct ofpbuf *packet)
1601{
1602 const struct rule *super;
1603 struct odp_actions a;
1604 size_t actions_len;
1605
1606 assert(!rule->cr.wc.wildcards);
1607
1608 super = rule->super ? rule->super : rule;
1609 rule->tags = 0;
1610 xlate_actions(super->actions, super->n_actions, &rule->cr.flow, p,
6a07af36 1611 packet, &a, &rule->tags, &rule->may_install,
0193b2af 1612 &rule->nf_flow.output_iface);
064af421
BP
1613
1614 actions_len = a.n_actions * sizeof *a.actions;
1615 if (rule->n_odp_actions != a.n_actions
1616 || memcmp(rule->odp_actions, a.actions, actions_len)) {
1617 COVERAGE_INC(ofproto_odp_unchanged);
1618 free(rule->odp_actions);
1619 rule->n_odp_actions = a.n_actions;
1620 rule->odp_actions = xmemdup(a.actions, actions_len);
1621 return true;
1622 } else {
1623 return false;
1624 }
1625}
1626
1627static int
1628do_put_flow(struct ofproto *ofproto, struct rule *rule, int flags,
1629 struct odp_flow_put *put)
1630{
1631 memset(&put->flow.stats, 0, sizeof put->flow.stats);
1632 put->flow.key = rule->cr.flow;
1633 put->flow.actions = rule->odp_actions;
1634 put->flow.n_actions = rule->n_odp_actions;
1635 put->flags = flags;
c228a364 1636 return dpif_flow_put(ofproto->dpif, put);
064af421
BP
1637}
1638
1639static void
1640rule_install(struct ofproto *p, struct rule *rule, struct rule *displaced_rule)
1641{
1642 assert(!rule->cr.wc.wildcards);
1643
1644 if (rule->may_install) {
1645 struct odp_flow_put put;
1646 if (!do_put_flow(p, rule,
1647 ODPPF_CREATE | ODPPF_MODIFY | ODPPF_ZERO_STATS,
1648 &put)) {
1649 rule->installed = true;
1650 if (displaced_rule) {
14986b31 1651 update_stats(p, displaced_rule, &put.flow.stats);
064af421
BP
1652 rule_post_uninstall(p, displaced_rule);
1653 }
1654 }
1655 } else if (displaced_rule) {
1656 rule_uninstall(p, displaced_rule);
1657 }
1658}
1659
1660static void
1661rule_reinstall(struct ofproto *ofproto, struct rule *rule)
1662{
1663 if (rule->installed) {
1664 struct odp_flow_put put;
1665 COVERAGE_INC(ofproto_dp_missed);
1666 do_put_flow(ofproto, rule, ODPPF_CREATE | ODPPF_MODIFY, &put);
1667 } else {
1668 rule_install(ofproto, rule, NULL);
1669 }
1670}
1671
1672static void
1673rule_update_actions(struct ofproto *ofproto, struct rule *rule)
1674{
42c3641c
JG
1675 bool actions_changed;
1676 uint16_t new_out_iface, old_out_iface;
1677
1678 old_out_iface = rule->nf_flow.output_iface;
1679 actions_changed = rule_make_actions(ofproto, rule, NULL);
1680
064af421
BP
1681 if (rule->may_install) {
1682 if (rule->installed) {
1683 if (actions_changed) {
064af421 1684 struct odp_flow_put put;
42c3641c
JG
1685 do_put_flow(ofproto, rule, ODPPF_CREATE | ODPPF_MODIFY
1686 | ODPPF_ZERO_STATS, &put);
1687 update_stats(ofproto, rule, &put.flow.stats);
1688
1689 /* Temporarily set the old output iface so that NetFlow
1690 * messages have the correct output interface for the old
1691 * stats. */
1692 new_out_iface = rule->nf_flow.output_iface;
1693 rule->nf_flow.output_iface = old_out_iface;
1694 rule_post_uninstall(ofproto, rule);
1695 rule->nf_flow.output_iface = new_out_iface;
064af421
BP
1696 }
1697 } else {
1698 rule_install(ofproto, rule, NULL);
1699 }
1700 } else {
1701 rule_uninstall(ofproto, rule);
1702 }
1703}
1704
1705static void
1706rule_account(struct ofproto *ofproto, struct rule *rule, uint64_t extra_bytes)
1707{
1708 uint64_t total_bytes = rule->byte_count + extra_bytes;
1709
1710 if (ofproto->ofhooks->account_flow_cb
1711 && total_bytes > rule->accounted_bytes)
1712 {
1713 ofproto->ofhooks->account_flow_cb(
1714 &rule->cr.flow, rule->odp_actions, rule->n_odp_actions,
1715 total_bytes - rule->accounted_bytes, ofproto->aux);
1716 rule->accounted_bytes = total_bytes;
1717 }
1718}
1719
1720static void
1721rule_uninstall(struct ofproto *p, struct rule *rule)
1722{
1723 assert(!rule->cr.wc.wildcards);
1724 if (rule->installed) {
1725 struct odp_flow odp_flow;
1726
1727 odp_flow.key = rule->cr.flow;
1728 odp_flow.actions = NULL;
1729 odp_flow.n_actions = 0;
c228a364 1730 if (!dpif_flow_del(p->dpif, &odp_flow)) {
0193b2af 1731 update_stats(p, rule, &odp_flow.stats);
064af421
BP
1732 }
1733 rule->installed = false;
1734
1735 rule_post_uninstall(p, rule);
1736 }
1737}
1738
0193b2af
JG
1739static bool
1740is_controller_rule(struct rule *rule)
1741{
1742 /* If the only action is send to the controller then don't report
1743 * NetFlow expiration messages since it is just part of the control
1744 * logic for the network and not real traffic. */
1745
1746 if (rule && rule->super) {
1747 struct rule *super = rule->super;
1748
1749 return super->n_actions == 1 &&
1750 super->actions[0].type == htons(OFPAT_OUTPUT) &&
1751 super->actions[0].output.port == htons(OFPP_CONTROLLER);
1752 }
1753
1754 return false;
1755}
1756
064af421
BP
1757static void
1758rule_post_uninstall(struct ofproto *ofproto, struct rule *rule)
1759{
1760 struct rule *super = rule->super;
1761
1762 rule_account(ofproto, rule, 0);
6a07af36 1763
0193b2af 1764 if (ofproto->netflow && !is_controller_rule(rule)) {
064af421
BP
1765 struct ofexpired expired;
1766 expired.flow = rule->cr.flow;
1767 expired.packet_count = rule->packet_count;
1768 expired.byte_count = rule->byte_count;
1769 expired.used = rule->used;
0193b2af 1770 netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
064af421
BP
1771 }
1772 if (super) {
1773 super->packet_count += rule->packet_count;
1774 super->byte_count += rule->byte_count;
064af421 1775
0c0afbec
JG
1776 /* Reset counters to prevent double counting if the rule ever gets
1777 * reinstalled. */
1778 rule->packet_count = 0;
1779 rule->byte_count = 0;
1780 rule->accounted_bytes = 0;
0193b2af
JG
1781
1782 netflow_flow_clear(&rule->nf_flow);
0c0afbec 1783 }
064af421
BP
1784}
1785\f
1786static void
1787queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn,
1788 struct rconn_packet_counter *counter)
1789{
1790 update_openflow_length(msg);
1791 if (rconn_send(ofconn->rconn, msg, counter)) {
1792 ofpbuf_delete(msg);
1793 }
1794}
1795
1796static void
1797send_error(const struct ofconn *ofconn, const struct ofp_header *oh,
1798 int error, const void *data, size_t len)
1799{
1800 struct ofpbuf *buf;
1801 struct ofp_error_msg *oem;
1802
1803 if (!(error >> 16)) {
1804 VLOG_WARN_RL(&rl, "not sending bad error code %d to controller",
1805 error);
1806 return;
1807 }
1808
1809 COVERAGE_INC(ofproto_error);
1810 oem = make_openflow_xid(len + sizeof *oem, OFPT_ERROR,
1811 oh ? oh->xid : 0, &buf);
1812 oem->type = htons((unsigned int) error >> 16);
1813 oem->code = htons(error & 0xffff);
1814 memcpy(oem->data, data, len);
1815 queue_tx(buf, ofconn, ofconn->reply_counter);
1816}
1817
1818static void
1819send_error_oh(const struct ofconn *ofconn, const struct ofp_header *oh,
1820 int error)
1821{
1822 size_t oh_length = ntohs(oh->length);
1823 send_error(ofconn, oh, error, oh, MIN(oh_length, 64));
1824}
1825
1826static void
1827hton_ofp_phy_port(struct ofp_phy_port *opp)
1828{
1829 opp->port_no = htons(opp->port_no);
1830 opp->config = htonl(opp->config);
1831 opp->state = htonl(opp->state);
1832 opp->curr = htonl(opp->curr);
1833 opp->advertised = htonl(opp->advertised);
1834 opp->supported = htonl(opp->supported);
1835 opp->peer = htonl(opp->peer);
1836}
1837
1838static int
1839handle_echo_request(struct ofconn *ofconn, struct ofp_header *oh)
1840{
1841 struct ofp_header *rq = oh;
1842 queue_tx(make_echo_reply(rq), ofconn, ofconn->reply_counter);
1843 return 0;
1844}
1845
1846static int
1847handle_features_request(struct ofproto *p, struct ofconn *ofconn,
1848 struct ofp_header *oh)
1849{
1850 struct ofp_switch_features *osf;
1851 struct ofpbuf *buf;
1852 unsigned int port_no;
1853 struct ofport *port;
1854
1855 osf = make_openflow_xid(sizeof *osf, OFPT_FEATURES_REPLY, oh->xid, &buf);
1856 osf->datapath_id = htonll(p->datapath_id);
1857 osf->n_buffers = htonl(pktbuf_capacity());
1858 osf->n_tables = 2;
1859 osf->capabilities = htonl(OFPC_FLOW_STATS | OFPC_TABLE_STATS |
1860 OFPC_PORT_STATS | OFPC_MULTI_PHY_TX);
1861 osf->actions = htonl((1u << OFPAT_OUTPUT) |
1862 (1u << OFPAT_SET_VLAN_VID) |
1863 (1u << OFPAT_SET_VLAN_PCP) |
1864 (1u << OFPAT_STRIP_VLAN) |
1865 (1u << OFPAT_SET_DL_SRC) |
1866 (1u << OFPAT_SET_DL_DST) |
1867 (1u << OFPAT_SET_NW_SRC) |
1868 (1u << OFPAT_SET_NW_DST) |
959a2ecd 1869 (1u << OFPAT_SET_NW_TOS) |
064af421
BP
1870 (1u << OFPAT_SET_TP_SRC) |
1871 (1u << OFPAT_SET_TP_DST));
1872
1873 PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) {
1874 hton_ofp_phy_port(ofpbuf_put(buf, &port->opp, sizeof port->opp));
1875 }
1876
1877 queue_tx(buf, ofconn, ofconn->reply_counter);
1878 return 0;
1879}
1880
1881static int
1882handle_get_config_request(struct ofproto *p, struct ofconn *ofconn,
1883 struct ofp_header *oh)
1884{
1885 struct ofpbuf *buf;
1886 struct ofp_switch_config *osc;
1887 uint16_t flags;
1888 bool drop_frags;
1889
1890 /* Figure out flags. */
c228a364 1891 dpif_get_drop_frags(p->dpif, &drop_frags);
064af421 1892 flags = drop_frags ? OFPC_FRAG_DROP : OFPC_FRAG_NORMAL;
064af421
BP
1893
1894 /* Send reply. */
1895 osc = make_openflow_xid(sizeof *osc, OFPT_GET_CONFIG_REPLY, oh->xid, &buf);
1896 osc->flags = htons(flags);
1897 osc->miss_send_len = htons(ofconn->miss_send_len);
1898 queue_tx(buf, ofconn, ofconn->reply_counter);
1899
1900 return 0;
1901}
1902
1903static int
1904handle_set_config(struct ofproto *p, struct ofconn *ofconn,
1905 struct ofp_switch_config *osc)
1906{
1907 uint16_t flags;
1908 int error;
1909
1910 error = check_ofp_message(&osc->header, OFPT_SET_CONFIG, sizeof *osc);
1911 if (error) {
1912 return error;
1913 }
1914 flags = ntohs(osc->flags);
1915
064af421
BP
1916 if (ofconn == p->controller) {
1917 switch (flags & OFPC_FRAG_MASK) {
1918 case OFPC_FRAG_NORMAL:
c228a364 1919 dpif_set_drop_frags(p->dpif, false);
064af421
BP
1920 break;
1921 case OFPC_FRAG_DROP:
c228a364 1922 dpif_set_drop_frags(p->dpif, true);
064af421
BP
1923 break;
1924 default:
1925 VLOG_WARN_RL(&rl, "requested bad fragment mode (flags=%"PRIx16")",
1926 osc->flags);
1927 break;
1928 }
1929 }
1930
1931 if ((ntohs(osc->miss_send_len) != 0) != (ofconn->miss_send_len != 0)) {
1932 if (ntohs(osc->miss_send_len) != 0) {
1933 ofconn->pktbuf = pktbuf_create();
1934 } else {
1935 pktbuf_destroy(ofconn->pktbuf);
1936 }
1937 }
1938
1939 ofconn->miss_send_len = ntohs(osc->miss_send_len);
1940
1941 return 0;
1942}
1943
1944static void
6a07af36
JG
1945add_output_group_action(struct odp_actions *actions, uint16_t group,
1946 uint16_t *nf_output_iface)
064af421
BP
1947{
1948 odp_actions_add(actions, ODPAT_OUTPUT_GROUP)->output_group.group = group;
6a07af36
JG
1949
1950 if (group == DP_GROUP_ALL || group == DP_GROUP_FLOOD) {
1951 *nf_output_iface = NF_OUT_FLOOD;
1952 }
064af421
BP
1953}
1954
1955static void
1956add_controller_action(struct odp_actions *actions,
1957 const struct ofp_action_output *oao)
1958{
1959 union odp_action *a = odp_actions_add(actions, ODPAT_CONTROLLER);
1960 a->controller.arg = oao->max_len ? ntohs(oao->max_len) : UINT32_MAX;
1961}
1962
1963struct action_xlate_ctx {
1964 /* Input. */
1965 const flow_t *flow; /* Flow to which these actions correspond. */
1966 int recurse; /* Recursion level, via xlate_table_action. */
1967 struct ofproto *ofproto;
1968 const struct ofpbuf *packet; /* The packet corresponding to 'flow', or a
1969 * null pointer if we are revalidating
1970 * without a packet to refer to. */
1971
1972 /* Output. */
1973 struct odp_actions *out; /* Datapath actions. */
1974 tag_type *tags; /* Tags associated with OFPP_NORMAL actions. */
d6fbec6d 1975 bool may_set_up_flow; /* True ordinarily; false if the actions must
064af421 1976 * be reassessed for every packet. */
6a07af36 1977 uint16_t nf_output_iface; /* Output interface index for NetFlow. */
064af421
BP
1978};
1979
1980static void do_xlate_actions(const union ofp_action *in, size_t n_in,
1981 struct action_xlate_ctx *ctx);
1982
1983static void
1984add_output_action(struct action_xlate_ctx *ctx, uint16_t port)
1985{
1986 const struct ofport *ofport = port_array_get(&ctx->ofproto->ports, port);
6cfaf517
BP
1987
1988 if (ofport) {
1989 if (ofport->opp.config & OFPPC_NO_FWD) {
1990 /* Forwarding disabled on port. */
1991 return;
1992 }
1993 } else {
1994 /*
1995 * We don't have an ofport record for this port, but it doesn't hurt to
1996 * allow forwarding to it anyhow. Maybe such a port will appear later
1997 * and we're pre-populating the flow table.
1998 */
064af421 1999 }
6cfaf517
BP
2000
2001 odp_actions_add(ctx->out, ODPAT_OUTPUT)->output.port = port;
6a07af36 2002 ctx->nf_output_iface = port;
064af421
BP
2003}
2004
2005static struct rule *
2006lookup_valid_rule(struct ofproto *ofproto, const flow_t *flow)
2007{
2008 struct rule *rule;
2009 rule = rule_from_cls_rule(classifier_lookup(&ofproto->cls, flow));
2010
2011 /* The rule we found might not be valid, since we could be in need of
2012 * revalidation. If it is not valid, don't return it. */
2013 if (rule
2014 && rule->super
2015 && ofproto->need_revalidate
2016 && !revalidate_rule(ofproto, rule)) {
2017 COVERAGE_INC(ofproto_invalidated);
2018 return NULL;
2019 }
2020
2021 return rule;
2022}
2023
2024static void
2025xlate_table_action(struct action_xlate_ctx *ctx, uint16_t in_port)
2026{
2027 if (!ctx->recurse) {
2028 struct rule *rule;
2029 flow_t flow;
2030
2031 flow = *ctx->flow;
2032 flow.in_port = in_port;
2033
2034 rule = lookup_valid_rule(ctx->ofproto, &flow);
2035 if (rule) {
2036 if (rule->super) {
2037 rule = rule->super;
2038 }
2039
2040 ctx->recurse++;
2041 do_xlate_actions(rule->actions, rule->n_actions, ctx);
2042 ctx->recurse--;
2043 }
2044 }
2045}
2046
2047static void
2048xlate_output_action(struct action_xlate_ctx *ctx,
2049 const struct ofp_action_output *oao)
2050{
2051 uint16_t odp_port;
6a07af36
JG
2052 uint16_t prev_nf_output_iface = ctx->nf_output_iface;
2053
2054 ctx->nf_output_iface = NF_OUT_DROP;
064af421
BP
2055
2056 switch (ntohs(oao->port)) {
2057 case OFPP_IN_PORT:
2058 add_output_action(ctx, ctx->flow->in_port);
2059 break;
2060 case OFPP_TABLE:
2061 xlate_table_action(ctx, ctx->flow->in_port);
2062 break;
2063 case OFPP_NORMAL:
2064 if (!ctx->ofproto->ofhooks->normal_cb(ctx->flow, ctx->packet,
2065 ctx->out, ctx->tags,
6a07af36 2066 &ctx->nf_output_iface,
064af421
BP
2067 ctx->ofproto->aux)) {
2068 COVERAGE_INC(ofproto_uninstallable);
d6fbec6d 2069 ctx->may_set_up_flow = false;
064af421
BP
2070 }
2071 break;
2072 case OFPP_FLOOD:
6a07af36
JG
2073 add_output_group_action(ctx->out, DP_GROUP_FLOOD,
2074 &ctx->nf_output_iface);
064af421
BP
2075 break;
2076 case OFPP_ALL:
6a07af36 2077 add_output_group_action(ctx->out, DP_GROUP_ALL, &ctx->nf_output_iface);
064af421
BP
2078 break;
2079 case OFPP_CONTROLLER:
2080 add_controller_action(ctx->out, oao);
2081 break;
2082 case OFPP_LOCAL:
2083 add_output_action(ctx, ODPP_LOCAL);
2084 break;
2085 default:
2086 odp_port = ofp_port_to_odp_port(ntohs(oao->port));
2087 if (odp_port != ctx->flow->in_port) {
2088 add_output_action(ctx, odp_port);
2089 }
2090 break;
2091 }
6a07af36
JG
2092
2093 if (prev_nf_output_iface == NF_OUT_FLOOD) {
2094 ctx->nf_output_iface = NF_OUT_FLOOD;
2095 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
2096 ctx->nf_output_iface = prev_nf_output_iface;
2097 } else if (prev_nf_output_iface != NF_OUT_DROP &&
2098 ctx->nf_output_iface != NF_OUT_FLOOD) {
2099 ctx->nf_output_iface = NF_OUT_MULTI;
2100 }
064af421
BP
2101}
2102
2103static void
2104xlate_nicira_action(struct action_xlate_ctx *ctx,
2105 const struct nx_action_header *nah)
2106{
2107 const struct nx_action_resubmit *nar;
2108 int subtype = ntohs(nah->subtype);
2109
2110 assert(nah->vendor == htonl(NX_VENDOR_ID));
2111 switch (subtype) {
2112 case NXAST_RESUBMIT:
2113 nar = (const struct nx_action_resubmit *) nah;
2114 xlate_table_action(ctx, ofp_port_to_odp_port(ntohs(nar->in_port)));
2115 break;
2116
2117 default:
2118 VLOG_DBG_RL(&rl, "unknown Nicira action type %"PRIu16, subtype);
2119 break;
2120 }
2121}
2122
2123static void
2124do_xlate_actions(const union ofp_action *in, size_t n_in,
2125 struct action_xlate_ctx *ctx)
2126{
2127 struct actions_iterator iter;
2128 const union ofp_action *ia;
2129 const struct ofport *port;
2130
2131 port = port_array_get(&ctx->ofproto->ports, ctx->flow->in_port);
2132 if (port && port->opp.config & (OFPPC_NO_RECV | OFPPC_NO_RECV_STP) &&
2133 port->opp.config & (eth_addr_equals(ctx->flow->dl_dst, stp_eth_addr)
2134 ? OFPPC_NO_RECV_STP : OFPPC_NO_RECV)) {
2135 /* Drop this flow. */
2136 return;
2137 }
2138
2139 for (ia = actions_first(&iter, in, n_in); ia; ia = actions_next(&iter)) {
2140 uint16_t type = ntohs(ia->type);
2141 union odp_action *oa;
2142
2143 switch (type) {
2144 case OFPAT_OUTPUT:
2145 xlate_output_action(ctx, &ia->output);
2146 break;
2147
2148 case OFPAT_SET_VLAN_VID:
2149 oa = odp_actions_add(ctx->out, ODPAT_SET_VLAN_VID);
2150 oa->vlan_vid.vlan_vid = ia->vlan_vid.vlan_vid;
2151 break;
2152
2153 case OFPAT_SET_VLAN_PCP:
2154 oa = odp_actions_add(ctx->out, ODPAT_SET_VLAN_PCP);
2155 oa->vlan_pcp.vlan_pcp = ia->vlan_pcp.vlan_pcp;
2156 break;
2157
2158 case OFPAT_STRIP_VLAN:
2159 odp_actions_add(ctx->out, ODPAT_STRIP_VLAN);
2160 break;
2161
2162 case OFPAT_SET_DL_SRC:
2163 oa = odp_actions_add(ctx->out, ODPAT_SET_DL_SRC);
2164 memcpy(oa->dl_addr.dl_addr,
2165 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
2166 break;
2167
2168 case OFPAT_SET_DL_DST:
2169 oa = odp_actions_add(ctx->out, ODPAT_SET_DL_DST);
2170 memcpy(oa->dl_addr.dl_addr,
2171 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
2172 break;
2173
2174 case OFPAT_SET_NW_SRC:
2175 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_SRC);
2176 oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
2177 break;
2178
2d70a31a
JP
2179 case OFPAT_SET_NW_DST:
2180 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_DST);
2181 oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
959a2ecd
JP
2182
2183 case OFPAT_SET_NW_TOS:
2184 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_TOS);
2185 oa->nw_tos.nw_tos = ia->nw_tos.nw_tos;
2d70a31a
JP
2186 break;
2187
064af421
BP
2188 case OFPAT_SET_TP_SRC:
2189 oa = odp_actions_add(ctx->out, ODPAT_SET_TP_SRC);
2190 oa->tp_port.tp_port = ia->tp_port.tp_port;
2191 break;
2192
2d70a31a
JP
2193 case OFPAT_SET_TP_DST:
2194 oa = odp_actions_add(ctx->out, ODPAT_SET_TP_DST);
2195 oa->tp_port.tp_port = ia->tp_port.tp_port;
2196 break;
2197
064af421
BP
2198 case OFPAT_VENDOR:
2199 xlate_nicira_action(ctx, (const struct nx_action_header *) ia);
2200 break;
2201
2202 default:
2203 VLOG_DBG_RL(&rl, "unknown action type %"PRIu16, type);
2204 break;
2205 }
2206 }
2207}
2208
2209static int
2210xlate_actions(const union ofp_action *in, size_t n_in,
2211 const flow_t *flow, struct ofproto *ofproto,
2212 const struct ofpbuf *packet,
6a07af36
JG
2213 struct odp_actions *out, tag_type *tags, bool *may_set_up_flow,
2214 uint16_t *nf_output_iface)
064af421
BP
2215{
2216 tag_type no_tags = 0;
2217 struct action_xlate_ctx ctx;
2218 COVERAGE_INC(ofproto_ofp2odp);
2219 odp_actions_init(out);
2220 ctx.flow = flow;
2221 ctx.recurse = 0;
2222 ctx.ofproto = ofproto;
2223 ctx.packet = packet;
2224 ctx.out = out;
2225 ctx.tags = tags ? tags : &no_tags;
d6fbec6d 2226 ctx.may_set_up_flow = true;
6a07af36 2227 ctx.nf_output_iface = NF_OUT_DROP;
064af421 2228 do_xlate_actions(in, n_in, &ctx);
0ad9b732 2229
d6fbec6d 2230 /* Check with in-band control to see if we're allowed to set up this
0ad9b732
JP
2231 * flow. */
2232 if (!in_band_rule_check(ofproto->in_band, flow, out)) {
d6fbec6d 2233 ctx.may_set_up_flow = false;
0ad9b732
JP
2234 }
2235
d6fbec6d
BP
2236 if (may_set_up_flow) {
2237 *may_set_up_flow = ctx.may_set_up_flow;
064af421 2238 }
6a07af36
JG
2239 if (nf_output_iface) {
2240 *nf_output_iface = ctx.nf_output_iface;
064af421
BP
2241 }
2242 if (odp_actions_overflow(out)) {
2243 odp_actions_init(out);
2244 return ofp_mkerr(OFPET_BAD_ACTION, OFPBAC_TOO_MANY);
2245 }
2246 return 0;
2247}
2248
2249static int
2250handle_packet_out(struct ofproto *p, struct ofconn *ofconn,
2251 struct ofp_header *oh)
2252{
2253 struct ofp_packet_out *opo;
2254 struct ofpbuf payload, *buffer;
2255 struct odp_actions actions;
2256 int n_actions;
2257 uint16_t in_port;
2258 flow_t flow;
2259 int error;
2260
2261 error = check_ofp_packet_out(oh, &payload, &n_actions, p->max_ports);
2262 if (error) {
2263 return error;
2264 }
2265 opo = (struct ofp_packet_out *) oh;
2266
2267 COVERAGE_INC(ofproto_packet_out);
2268 if (opo->buffer_id != htonl(UINT32_MAX)) {
2269 error = pktbuf_retrieve(ofconn->pktbuf, ntohl(opo->buffer_id),
2270 &buffer, &in_port);
7778bd15 2271 if (error || !buffer) {
064af421
BP
2272 return error;
2273 }
2274 payload = *buffer;
2275 } else {
2276 buffer = NULL;
2277 }
2278
2279 flow_extract(&payload, ofp_port_to_odp_port(ntohs(opo->in_port)), &flow);
2280 error = xlate_actions((const union ofp_action *) opo->actions, n_actions,
6a07af36 2281 &flow, p, &payload, &actions, NULL, NULL, NULL);
064af421
BP
2282 if (error) {
2283 return error;
2284 }
2285
c228a364 2286 dpif_execute(p->dpif, flow.in_port, actions.actions, actions.n_actions,
064af421
BP
2287 &payload);
2288 ofpbuf_delete(buffer);
2289
2290 return 0;
2291}
2292
2293static void
2294update_port_config(struct ofproto *p, struct ofport *port,
2295 uint32_t config, uint32_t mask)
2296{
2297 mask &= config ^ port->opp.config;
2298 if (mask & OFPPC_PORT_DOWN) {
2299 if (config & OFPPC_PORT_DOWN) {
2300 netdev_turn_flags_off(port->netdev, NETDEV_UP, true);
2301 } else {
2302 netdev_turn_flags_on(port->netdev, NETDEV_UP, true);
2303 }
2304 }
2305#define REVALIDATE_BITS (OFPPC_NO_RECV | OFPPC_NO_RECV_STP | OFPPC_NO_FWD)
2306 if (mask & REVALIDATE_BITS) {
2307 COVERAGE_INC(ofproto_costly_flags);
2308 port->opp.config ^= mask & REVALIDATE_BITS;
2309 p->need_revalidate = true;
2310 }
2311#undef REVALIDATE_BITS
2312 if (mask & OFPPC_NO_FLOOD) {
2313 port->opp.config ^= OFPPC_NO_FLOOD;
72b06300 2314 refresh_port_groups(p);
064af421
BP
2315 }
2316 if (mask & OFPPC_NO_PACKET_IN) {
2317 port->opp.config ^= OFPPC_NO_PACKET_IN;
2318 }
2319}
2320
2321static int
2322handle_port_mod(struct ofproto *p, struct ofp_header *oh)
2323{
2324 const struct ofp_port_mod *opm;
2325 struct ofport *port;
2326 int error;
2327
2328 error = check_ofp_message(oh, OFPT_PORT_MOD, sizeof *opm);
2329 if (error) {
2330 return error;
2331 }
2332 opm = (struct ofp_port_mod *) oh;
2333
2334 port = port_array_get(&p->ports,
2335 ofp_port_to_odp_port(ntohs(opm->port_no)));
2336 if (!port) {
2337 return ofp_mkerr(OFPET_PORT_MOD_FAILED, OFPPMFC_BAD_PORT);
2338 } else if (memcmp(port->opp.hw_addr, opm->hw_addr, OFP_ETH_ALEN)) {
2339 return ofp_mkerr(OFPET_PORT_MOD_FAILED, OFPPMFC_BAD_HW_ADDR);
2340 } else {
2341 update_port_config(p, port, ntohl(opm->config), ntohl(opm->mask));
2342 if (opm->advertise) {
2343 netdev_set_advertisements(port->netdev, ntohl(opm->advertise));
2344 }
2345 }
2346 return 0;
2347}
2348
2349static struct ofpbuf *
2350make_stats_reply(uint32_t xid, uint16_t type, size_t body_len)
2351{
2352 struct ofp_stats_reply *osr;
2353 struct ofpbuf *msg;
2354
2355 msg = ofpbuf_new(MIN(sizeof *osr + body_len, UINT16_MAX));
2356 osr = put_openflow_xid(sizeof *osr, OFPT_STATS_REPLY, xid, msg);
2357 osr->type = type;
2358 osr->flags = htons(0);
2359 return msg;
2360}
2361
2362static struct ofpbuf *
2363start_stats_reply(const struct ofp_stats_request *request, size_t body_len)
2364{
2365 return make_stats_reply(request->header.xid, request->type, body_len);
2366}
2367
2368static void *
2369append_stats_reply(size_t nbytes, struct ofconn *ofconn, struct ofpbuf **msgp)
2370{
2371 struct ofpbuf *msg = *msgp;
2372 assert(nbytes <= UINT16_MAX - sizeof(struct ofp_stats_reply));
2373 if (nbytes + msg->size > UINT16_MAX) {
2374 struct ofp_stats_reply *reply = msg->data;
2375 reply->flags = htons(OFPSF_REPLY_MORE);
2376 *msgp = make_stats_reply(reply->header.xid, reply->type, nbytes);
2377 queue_tx(msg, ofconn, ofconn->reply_counter);
2378 }
2379 return ofpbuf_put_uninit(*msgp, nbytes);
2380}
2381
2382static int
2383handle_desc_stats_request(struct ofproto *p, struct ofconn *ofconn,
2384 struct ofp_stats_request *request)
2385{
2386 struct ofp_desc_stats *ods;
2387 struct ofpbuf *msg;
2388
2389 msg = start_stats_reply(request, sizeof *ods);
2390 ods = append_stats_reply(sizeof *ods, ofconn, &msg);
2391 strncpy(ods->mfr_desc, p->manufacturer, sizeof ods->mfr_desc);
2392 strncpy(ods->hw_desc, p->hardware, sizeof ods->hw_desc);
2393 strncpy(ods->sw_desc, p->software, sizeof ods->sw_desc);
2394 strncpy(ods->serial_num, p->serial, sizeof ods->serial_num);
8abc4ed7 2395 strncpy(ods->dp_desc, p->dp_desc, sizeof ods->dp_desc);
064af421
BP
2396 queue_tx(msg, ofconn, ofconn->reply_counter);
2397
2398 return 0;
2399}
2400
2401static void
2402count_subrules(struct cls_rule *cls_rule, void *n_subrules_)
2403{
2404 struct rule *rule = rule_from_cls_rule(cls_rule);
2405 int *n_subrules = n_subrules_;
2406
2407 if (rule->super) {
2408 (*n_subrules)++;
2409 }
2410}
2411
2412static int
2413handle_table_stats_request(struct ofproto *p, struct ofconn *ofconn,
2414 struct ofp_stats_request *request)
2415{
2416 struct ofp_table_stats *ots;
2417 struct ofpbuf *msg;
2418 struct odp_stats dpstats;
2419 int n_exact, n_subrules, n_wild;
2420
2421 msg = start_stats_reply(request, sizeof *ots * 2);
2422
2423 /* Count rules of various kinds. */
2424 n_subrules = 0;
2425 classifier_for_each(&p->cls, CLS_INC_EXACT, count_subrules, &n_subrules);
2426 n_exact = classifier_count_exact(&p->cls) - n_subrules;
2427 n_wild = classifier_count(&p->cls) - classifier_count_exact(&p->cls);
2428
2429 /* Hash table. */
c228a364 2430 dpif_get_dp_stats(p->dpif, &dpstats);
064af421
BP
2431 ots = append_stats_reply(sizeof *ots, ofconn, &msg);
2432 memset(ots, 0, sizeof *ots);
2433 ots->table_id = TABLEID_HASH;
2434 strcpy(ots->name, "hash");
2435 ots->wildcards = htonl(0);
2436 ots->max_entries = htonl(dpstats.max_capacity);
2437 ots->active_count = htonl(n_exact);
2438 ots->lookup_count = htonll(dpstats.n_frags + dpstats.n_hit +
2439 dpstats.n_missed);
2440 ots->matched_count = htonll(dpstats.n_hit); /* XXX */
2441
2442 /* Classifier table. */
2443 ots = append_stats_reply(sizeof *ots, ofconn, &msg);
2444 memset(ots, 0, sizeof *ots);
2445 ots->table_id = TABLEID_CLASSIFIER;
2446 strcpy(ots->name, "classifier");
2447 ots->wildcards = htonl(OFPFW_ALL);
2448 ots->max_entries = htonl(65536);
2449 ots->active_count = htonl(n_wild);
2450 ots->lookup_count = htonll(0); /* XXX */
2451 ots->matched_count = htonll(0); /* XXX */
2452
2453 queue_tx(msg, ofconn, ofconn->reply_counter);
2454 return 0;
2455}
2456
2457static int
2458handle_port_stats_request(struct ofproto *p, struct ofconn *ofconn,
2459 struct ofp_stats_request *request)
2460{
2461 struct ofp_port_stats *ops;
2462 struct ofpbuf *msg;
2463 struct ofport *port;
2464 unsigned int port_no;
2465
2466 msg = start_stats_reply(request, sizeof *ops * 16);
2467 PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) {
2468 struct netdev_stats stats;
2469
2470 /* Intentionally ignore return value, since errors will set 'stats' to
2471 * all-1s, which is correct for OpenFlow, and netdev_get_stats() will
2472 * log errors. */
2473 netdev_get_stats(port->netdev, &stats);
2474
2475 ops = append_stats_reply(sizeof *ops, ofconn, &msg);
2476 ops->port_no = htons(odp_port_to_ofp_port(port_no));
2477 memset(ops->pad, 0, sizeof ops->pad);
2478 ops->rx_packets = htonll(stats.rx_packets);
2479 ops->tx_packets = htonll(stats.tx_packets);
2480 ops->rx_bytes = htonll(stats.rx_bytes);
2481 ops->tx_bytes = htonll(stats.tx_bytes);
2482 ops->rx_dropped = htonll(stats.rx_dropped);
2483 ops->tx_dropped = htonll(stats.tx_dropped);
2484 ops->rx_errors = htonll(stats.rx_errors);
2485 ops->tx_errors = htonll(stats.tx_errors);
2486 ops->rx_frame_err = htonll(stats.rx_frame_errors);
2487 ops->rx_over_err = htonll(stats.rx_over_errors);
2488 ops->rx_crc_err = htonll(stats.rx_crc_errors);
2489 ops->collisions = htonll(stats.collisions);
2490 }
2491
2492 queue_tx(msg, ofconn, ofconn->reply_counter);
2493 return 0;
2494}
2495
2496struct flow_stats_cbdata {
2497 struct ofproto *ofproto;
2498 struct ofconn *ofconn;
2499 uint16_t out_port;
2500 struct ofpbuf *msg;
2501};
2502
2503static void
2504query_stats(struct ofproto *p, struct rule *rule,
2505 uint64_t *packet_countp, uint64_t *byte_countp)
2506{
2507 uint64_t packet_count, byte_count;
2508 struct rule *subrule;
2509 struct odp_flow *odp_flows;
2510 size_t n_odp_flows;
2511
b3137fe8
JG
2512 packet_count = rule->packet_count;
2513 byte_count = rule->byte_count;
2514
064af421 2515 n_odp_flows = rule->cr.wc.wildcards ? list_size(&rule->list) : 1;
ec6fde61 2516 odp_flows = xzalloc(n_odp_flows * sizeof *odp_flows);
064af421
BP
2517 if (rule->cr.wc.wildcards) {
2518 size_t i = 0;
2519 LIST_FOR_EACH (subrule, struct rule, list, &rule->list) {
2520 odp_flows[i++].key = subrule->cr.flow;
b3137fe8
JG
2521 packet_count += subrule->packet_count;
2522 byte_count += subrule->byte_count;
064af421
BP
2523 }
2524 } else {
2525 odp_flows[0].key = rule->cr.flow;
2526 }
2527
2528 packet_count = rule->packet_count;
2529 byte_count = rule->byte_count;
c228a364 2530 if (!dpif_flow_get_multiple(p->dpif, odp_flows, n_odp_flows)) {
064af421
BP
2531 size_t i;
2532 for (i = 0; i < n_odp_flows; i++) {
2533 struct odp_flow *odp_flow = &odp_flows[i];
2534 packet_count += odp_flow->stats.n_packets;
2535 byte_count += odp_flow->stats.n_bytes;
2536 }
2537 }
2538 free(odp_flows);
2539
2540 *packet_countp = packet_count;
2541 *byte_countp = byte_count;
2542}
2543
2544static void
2545flow_stats_cb(struct cls_rule *rule_, void *cbdata_)
2546{
2547 struct rule *rule = rule_from_cls_rule(rule_);
2548 struct flow_stats_cbdata *cbdata = cbdata_;
2549 struct ofp_flow_stats *ofs;
2550 uint64_t packet_count, byte_count;
2551 size_t act_len, len;
2552
2553 if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) {
2554 return;
2555 }
2556
2557 act_len = sizeof *rule->actions * rule->n_actions;
2558 len = offsetof(struct ofp_flow_stats, actions) + act_len;
2559
2560 query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
2561
2562 ofs = append_stats_reply(len, cbdata->ofconn, &cbdata->msg);
2563 ofs->length = htons(len);
2564 ofs->table_id = rule->cr.wc.wildcards ? TABLEID_CLASSIFIER : TABLEID_HASH;
2565 ofs->pad = 0;
2566 flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, &ofs->match);
2567 ofs->duration = htonl((time_msec() - rule->created) / 1000);
2568 ofs->priority = htons(rule->cr.priority);
2569 ofs->idle_timeout = htons(rule->idle_timeout);
2570 ofs->hard_timeout = htons(rule->hard_timeout);
959a2ecd 2571 ofs->pad2 = 0;
064af421
BP
2572 ofs->packet_count = htonll(packet_count);
2573 ofs->byte_count = htonll(byte_count);
2574 memcpy(ofs->actions, rule->actions, act_len);
2575}
2576
2577static int
2578table_id_to_include(uint8_t table_id)
2579{
2580 return (table_id == TABLEID_HASH ? CLS_INC_EXACT
2581 : table_id == TABLEID_CLASSIFIER ? CLS_INC_WILD
2582 : table_id == 0xff ? CLS_INC_ALL
2583 : 0);
2584}
2585
2586static int
2587handle_flow_stats_request(struct ofproto *p, struct ofconn *ofconn,
2588 const struct ofp_stats_request *osr,
2589 size_t arg_size)
2590{
2591 struct ofp_flow_stats_request *fsr;
2592 struct flow_stats_cbdata cbdata;
2593 struct cls_rule target;
2594
2595 if (arg_size != sizeof *fsr) {
49bdc010 2596 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
064af421
BP
2597 }
2598 fsr = (struct ofp_flow_stats_request *) osr->body;
2599
2600 COVERAGE_INC(ofproto_flows_req);
2601 cbdata.ofproto = p;
2602 cbdata.ofconn = ofconn;
2603 cbdata.out_port = fsr->out_port;
2604 cbdata.msg = start_stats_reply(osr, 1024);
2605 cls_rule_from_match(&target, &fsr->match, 0);
2606 classifier_for_each_match(&p->cls, &target,
2607 table_id_to_include(fsr->table_id),
2608 flow_stats_cb, &cbdata);
2609 queue_tx(cbdata.msg, ofconn, ofconn->reply_counter);
2610 return 0;
2611}
2612
4f2cad2c
JP
2613struct flow_stats_ds_cbdata {
2614 struct ofproto *ofproto;
2615 struct ds *results;
2616};
2617
2618static void
2619flow_stats_ds_cb(struct cls_rule *rule_, void *cbdata_)
2620{
2621 struct rule *rule = rule_from_cls_rule(rule_);
2622 struct flow_stats_ds_cbdata *cbdata = cbdata_;
2623 struct ds *results = cbdata->results;
2624 struct ofp_match match;
2625 uint64_t packet_count, byte_count;
2626 size_t act_len = sizeof *rule->actions * rule->n_actions;
2627
2628 /* Don't report on subrules. */
2629 if (rule->super != NULL) {
2630 return;
2631 }
2632
2633 query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
a26ef517 2634 flow_to_ovs_match(&rule->cr.flow, rule->cr.wc.wildcards, &match);
4f2cad2c
JP
2635
2636 ds_put_format(results, "duration=%llds, ",
2637 (time_msec() - rule->created) / 1000);
52ae00b3 2638 ds_put_format(results, "priority=%u, ", rule->cr.priority);
4f2cad2c
JP
2639 ds_put_format(results, "n_packets=%"PRIu64", ", packet_count);
2640 ds_put_format(results, "n_bytes=%"PRIu64", ", byte_count);
2641 ofp_print_match(results, &match, true);
2642 ofp_print_actions(results, &rule->actions->header, act_len);
2643 ds_put_cstr(results, "\n");
2644}
2645
2646/* Adds a pretty-printed description of all flows to 'results', including
2647 * those marked hidden by secchan (e.g., by in-band control). */
2648void
2649ofproto_get_all_flows(struct ofproto *p, struct ds *results)
2650{
2651 struct ofp_match match;
2652 struct cls_rule target;
2653 struct flow_stats_ds_cbdata cbdata;
2654
2655 memset(&match, 0, sizeof match);
2656 match.wildcards = htonl(OFPFW_ALL);
2657
2658 cbdata.ofproto = p;
2659 cbdata.results = results;
2660
2661 cls_rule_from_match(&target, &match, 0);
2662 classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
2663 flow_stats_ds_cb, &cbdata);
2664}
2665
064af421
BP
2666struct aggregate_stats_cbdata {
2667 struct ofproto *ofproto;
2668 uint16_t out_port;
2669 uint64_t packet_count;
2670 uint64_t byte_count;
2671 uint32_t n_flows;
2672};
2673
2674static void
2675aggregate_stats_cb(struct cls_rule *rule_, void *cbdata_)
2676{
2677 struct rule *rule = rule_from_cls_rule(rule_);
2678 struct aggregate_stats_cbdata *cbdata = cbdata_;
2679 uint64_t packet_count, byte_count;
2680
2681 if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) {
2682 return;
2683 }
2684
2685 query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
2686
2687 cbdata->packet_count += packet_count;
2688 cbdata->byte_count += byte_count;
2689 cbdata->n_flows++;
2690}
2691
2692static int
2693handle_aggregate_stats_request(struct ofproto *p, struct ofconn *ofconn,
2694 const struct ofp_stats_request *osr,
2695 size_t arg_size)
2696{
2697 struct ofp_aggregate_stats_request *asr;
2698 struct ofp_aggregate_stats_reply *reply;
2699 struct aggregate_stats_cbdata cbdata;
2700 struct cls_rule target;
2701 struct ofpbuf *msg;
2702
2703 if (arg_size != sizeof *asr) {
49bdc010 2704 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
064af421
BP
2705 }
2706 asr = (struct ofp_aggregate_stats_request *) osr->body;
2707
2708 COVERAGE_INC(ofproto_agg_request);
2709 cbdata.ofproto = p;
2710 cbdata.out_port = asr->out_port;
2711 cbdata.packet_count = 0;
2712 cbdata.byte_count = 0;
2713 cbdata.n_flows = 0;
2714 cls_rule_from_match(&target, &asr->match, 0);
2715 classifier_for_each_match(&p->cls, &target,
2716 table_id_to_include(asr->table_id),
2717 aggregate_stats_cb, &cbdata);
2718
2719 msg = start_stats_reply(osr, sizeof *reply);
2720 reply = append_stats_reply(sizeof *reply, ofconn, &msg);
2721 reply->flow_count = htonl(cbdata.n_flows);
2722 reply->packet_count = htonll(cbdata.packet_count);
2723 reply->byte_count = htonll(cbdata.byte_count);
2724 queue_tx(msg, ofconn, ofconn->reply_counter);
2725 return 0;
2726}
2727
2728static int
2729handle_stats_request(struct ofproto *p, struct ofconn *ofconn,
2730 struct ofp_header *oh)
2731{
2732 struct ofp_stats_request *osr;
2733 size_t arg_size;
2734 int error;
2735
2736 error = check_ofp_message_array(oh, OFPT_STATS_REQUEST, sizeof *osr,
2737 1, &arg_size);
2738 if (error) {
2739 return error;
2740 }
2741 osr = (struct ofp_stats_request *) oh;
2742
2743 switch (ntohs(osr->type)) {
2744 case OFPST_DESC:
2745 return handle_desc_stats_request(p, ofconn, osr);
2746
2747 case OFPST_FLOW:
2748 return handle_flow_stats_request(p, ofconn, osr, arg_size);
2749
2750 case OFPST_AGGREGATE:
2751 return handle_aggregate_stats_request(p, ofconn, osr, arg_size);
2752
2753 case OFPST_TABLE:
2754 return handle_table_stats_request(p, ofconn, osr);
2755
2756 case OFPST_PORT:
2757 return handle_port_stats_request(p, ofconn, osr);
2758
2759 case OFPST_VENDOR:
2760 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR);
2761
2762 default:
2763 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_STAT);
2764 }
2765}
2766
2767static long long int
2768msec_from_nsec(uint64_t sec, uint32_t nsec)
2769{
2770 return !sec ? 0 : sec * 1000 + nsec / 1000000;
2771}
2772
2773static void
0193b2af
JG
2774update_time(struct ofproto *ofproto, struct rule *rule,
2775 const struct odp_flow_stats *stats)
064af421
BP
2776{
2777 long long int used = msec_from_nsec(stats->used_sec, stats->used_nsec);
2778 if (used > rule->used) {
2779 rule->used = used;
4836f9f2
JP
2780 if (rule->super && used > rule->super->used) {
2781 rule->super->used = used;
2782 }
0193b2af 2783 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, used);
064af421
BP
2784 }
2785}
2786
2787static void
0193b2af
JG
2788update_stats(struct ofproto *ofproto, struct rule *rule,
2789 const struct odp_flow_stats *stats)
064af421 2790{
064af421 2791 if (stats->n_packets) {
0193b2af
JG
2792 update_time(ofproto, rule, stats);
2793 rule->packet_count += stats->n_packets;
2794 rule->byte_count += stats->n_bytes;
2795 netflow_flow_update_flags(&rule->nf_flow, stats->ip_tos,
2796 stats->tcp_flags);
064af421
BP
2797 }
2798}
2799
2800static int
2801add_flow(struct ofproto *p, struct ofconn *ofconn,
2802 struct ofp_flow_mod *ofm, size_t n_actions)
2803{
2804 struct ofpbuf *packet;
2805 struct rule *rule;
2806 uint16_t in_port;
2807 int error;
2808
49bdc010
JP
2809 if (ofm->flags & htons(OFPFF_CHECK_OVERLAP)) {
2810 flow_t flow;
2811 uint32_t wildcards;
2812
2813 flow_from_match(&flow, &wildcards, &ofm->match);
2814 if (classifier_rule_overlaps(&p->cls, &flow, wildcards,
2815 ntohs(ofm->priority))) {
2816 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_OVERLAP);
2817 }
2818 }
2819
0193b2af 2820 rule = rule_create(p, NULL, (const union ofp_action *) ofm->actions,
064af421 2821 n_actions, ntohs(ofm->idle_timeout),
ca069229
JP
2822 ntohs(ofm->hard_timeout),
2823 ofm->flags & htons(OFPFF_SEND_FLOW_REM));
064af421
BP
2824 cls_rule_from_match(&rule->cr, &ofm->match, ntohs(ofm->priority));
2825
064af421
BP
2826 error = 0;
2827 if (ofm->buffer_id != htonl(UINT32_MAX)) {
2828 error = pktbuf_retrieve(ofconn->pktbuf, ntohl(ofm->buffer_id),
2829 &packet, &in_port);
212fe71c
BP
2830 } else {
2831 packet = NULL;
165cd8a3 2832 in_port = UINT16_MAX;
064af421
BP
2833 }
2834
2835 rule_insert(p, rule, packet, in_port);
2836 ofpbuf_delete(packet);
2837 return error;
2838}
2839
2840static int
2841modify_flow(struct ofproto *p, const struct ofp_flow_mod *ofm,
2842 size_t n_actions, uint16_t command, struct rule *rule)
2843{
2844 if (rule_is_hidden(rule)) {
2845 return 0;
2846 }
2847
2848 if (command == OFPFC_DELETE) {
ca069229
JP
2849 long long int now = time_msec();
2850 send_flow_removed(p, rule, now, OFPRR_DELETE);
064af421
BP
2851 rule_remove(p, rule);
2852 } else {
2853 size_t actions_len = n_actions * sizeof *rule->actions;
2854
2855 if (n_actions == rule->n_actions
2856 && !memcmp(ofm->actions, rule->actions, actions_len))
2857 {
2858 return 0;
2859 }
2860
2861 free(rule->actions);
2862 rule->actions = xmemdup(ofm->actions, actions_len);
2863 rule->n_actions = n_actions;
2864
2865 if (rule->cr.wc.wildcards) {
2866 COVERAGE_INC(ofproto_mod_wc_flow);
2867 p->need_revalidate = true;
2868 } else {
2869 rule_update_actions(p, rule);
2870 }
2871 }
2872
2873 return 0;
2874}
2875
2876static int
2877modify_flows_strict(struct ofproto *p, const struct ofp_flow_mod *ofm,
2878 size_t n_actions, uint16_t command)
2879{
2880 struct rule *rule;
2881 uint32_t wildcards;
2882 flow_t flow;
2883
2884 flow_from_match(&flow, &wildcards, &ofm->match);
2885 rule = rule_from_cls_rule(classifier_find_rule_exactly(
2886 &p->cls, &flow, wildcards,
2887 ntohs(ofm->priority)));
2888
2889 if (rule) {
2890 if (command == OFPFC_DELETE
2891 && ofm->out_port != htons(OFPP_NONE)
2892 && !rule_has_out_port(rule, ofm->out_port)) {
2893 return 0;
2894 }
2895
2896 modify_flow(p, ofm, n_actions, command, rule);
2897 }
2898 return 0;
2899}
2900
2901struct modify_flows_cbdata {
2902 struct ofproto *ofproto;
2903 const struct ofp_flow_mod *ofm;
2904 uint16_t out_port;
2905 size_t n_actions;
2906 uint16_t command;
2907};
2908
2909static void
2910modify_flows_cb(struct cls_rule *rule_, void *cbdata_)
2911{
2912 struct rule *rule = rule_from_cls_rule(rule_);
2913 struct modify_flows_cbdata *cbdata = cbdata_;
2914
2915 if (cbdata->out_port != htons(OFPP_NONE)
2916 && !rule_has_out_port(rule, cbdata->out_port)) {
2917 return;
2918 }
2919
2920 modify_flow(cbdata->ofproto, cbdata->ofm, cbdata->n_actions,
2921 cbdata->command, rule);
2922}
2923
2924static int
2925modify_flows_loose(struct ofproto *p, const struct ofp_flow_mod *ofm,
2926 size_t n_actions, uint16_t command)
2927{
2928 struct modify_flows_cbdata cbdata;
2929 struct cls_rule target;
2930
2931 cbdata.ofproto = p;
2932 cbdata.ofm = ofm;
2933 cbdata.out_port = (command == OFPFC_DELETE ? ofm->out_port
2934 : htons(OFPP_NONE));
2935 cbdata.n_actions = n_actions;
2936 cbdata.command = command;
2937
2938 cls_rule_from_match(&target, &ofm->match, 0);
2939
2940 classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
2941 modify_flows_cb, &cbdata);
2942 return 0;
2943}
2944
2945static int
2946handle_flow_mod(struct ofproto *p, struct ofconn *ofconn,
2947 struct ofp_flow_mod *ofm)
2948{
2949 size_t n_actions;
2950 int error;
2951
2952 error = check_ofp_message_array(&ofm->header, OFPT_FLOW_MOD, sizeof *ofm,
2953 sizeof *ofm->actions, &n_actions);
2954 if (error) {
2955 return error;
2956 }
2957
49bdc010
JP
2958 /* We do not support the emergency flow cache. It will hopefully
2959 * get dropped from OpenFlow in the near future. */
2960 if (ofm->flags & htons(OFPFF_EMERG)) {
2961 /* There isn't a good fit for an error code, so just state that the
2962 * flow table is full. */
2963 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_ALL_TABLES_FULL);
2964 }
2965
064af421
BP
2966 normalize_match(&ofm->match);
2967 if (!ofm->match.wildcards) {
2968 ofm->priority = htons(UINT16_MAX);
2969 }
2970
2971 error = validate_actions((const union ofp_action *) ofm->actions,
2972 n_actions, p->max_ports);
2973 if (error) {
2974 return error;
2975 }
2976
2977 switch (ntohs(ofm->command)) {
2978 case OFPFC_ADD:
2979 return add_flow(p, ofconn, ofm, n_actions);
2980
2981 case OFPFC_MODIFY:
2982 return modify_flows_loose(p, ofm, n_actions, OFPFC_MODIFY);
2983
2984 case OFPFC_MODIFY_STRICT:
2985 return modify_flows_strict(p, ofm, n_actions, OFPFC_MODIFY);
2986
2987 case OFPFC_DELETE:
2988 return modify_flows_loose(p, ofm, n_actions, OFPFC_DELETE);
2989
2990 case OFPFC_DELETE_STRICT:
2991 return modify_flows_strict(p, ofm, n_actions, OFPFC_DELETE);
2992
2993 default:
2994 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_BAD_COMMAND);
2995 }
2996}
2997
064af421
BP
2998static int
2999handle_vendor(struct ofproto *p, struct ofconn *ofconn, void *msg)
3000{
3001 struct ofp_vendor_header *ovh = msg;
3002 struct nicira_header *nh;
3003
3004 if (ntohs(ovh->header.length) < sizeof(struct ofp_vendor_header)) {
49bdc010 3005 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
064af421
BP
3006 }
3007 if (ovh->vendor != htonl(NX_VENDOR_ID)) {
3008 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR);
3009 }
3010 if (ntohs(ovh->header.length) < sizeof(struct nicira_header)) {
49bdc010 3011 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
064af421
BP
3012 }
3013
3014 nh = msg;
3015 switch (ntohl(nh->subtype)) {
3016 case NXT_STATUS_REQUEST:
3017 return switch_status_handle_request(p->switch_status, ofconn->rconn,
3018 msg);
064af421
BP
3019 }
3020
3021 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE);
3022}
3023
246e61ea
JP
3024static int
3025handle_barrier_request(struct ofconn *ofconn, struct ofp_header *oh)
3026{
3027 struct ofp_header *ob;
3028 struct ofpbuf *buf;
3029
3030 /* Currently, everything executes synchronously, so we can just
3031 * immediately send the barrier reply. */
3032 ob = make_openflow_xid(sizeof *ob, OFPT_BARRIER_REPLY, oh->xid, &buf);
3033 queue_tx(buf, ofconn, ofconn->reply_counter);
3034 return 0;
3035}
3036
064af421
BP
3037static void
3038handle_openflow(struct ofconn *ofconn, struct ofproto *p,
3039 struct ofpbuf *ofp_msg)
3040{
3041 struct ofp_header *oh = ofp_msg->data;
3042 int error;
3043
3044 COVERAGE_INC(ofproto_recv_openflow);
3045 switch (oh->type) {
3046 case OFPT_ECHO_REQUEST:
3047 error = handle_echo_request(ofconn, oh);
3048 break;
3049
3050 case OFPT_ECHO_REPLY:
3051 error = 0;
3052 break;
3053
3054 case OFPT_FEATURES_REQUEST:
3055 error = handle_features_request(p, ofconn, oh);
3056 break;
3057
3058 case OFPT_GET_CONFIG_REQUEST:
3059 error = handle_get_config_request(p, ofconn, oh);
3060 break;
3061
3062 case OFPT_SET_CONFIG:
3063 error = handle_set_config(p, ofconn, ofp_msg->data);
3064 break;
3065
3066 case OFPT_PACKET_OUT:
3067 error = handle_packet_out(p, ofconn, ofp_msg->data);
3068 break;
3069
3070 case OFPT_PORT_MOD:
3071 error = handle_port_mod(p, oh);
3072 break;
3073
3074 case OFPT_FLOW_MOD:
3075 error = handle_flow_mod(p, ofconn, ofp_msg->data);
3076 break;
3077
3078 case OFPT_STATS_REQUEST:
3079 error = handle_stats_request(p, ofconn, oh);
3080 break;
3081
3082 case OFPT_VENDOR:
3083 error = handle_vendor(p, ofconn, ofp_msg->data);
3084 break;
3085
246e61ea
JP
3086 case OFPT_BARRIER_REQUEST:
3087 error = handle_barrier_request(ofconn, oh);
3088 break;
3089
064af421
BP
3090 default:
3091 if (VLOG_IS_WARN_ENABLED()) {
3092 char *s = ofp_to_string(oh, ntohs(oh->length), 2);
3093 VLOG_DBG_RL(&rl, "OpenFlow message ignored: %s", s);
3094 free(s);
3095 }
3096 error = ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_TYPE);
3097 break;
3098 }
3099
3100 if (error) {
3101 send_error_oh(ofconn, ofp_msg->data, error);
3102 }
3103}
3104\f
3105static void
72b06300 3106handle_odp_miss_msg(struct ofproto *p, struct ofpbuf *packet)
064af421
BP
3107{
3108 struct odp_msg *msg = packet->data;
3109 uint16_t in_port = odp_port_to_ofp_port(msg->port);
3110 struct rule *rule;
3111 struct ofpbuf payload;
3112 flow_t flow;
3113
064af421
BP
3114 payload.data = msg + 1;
3115 payload.size = msg->length - sizeof *msg;
3116 flow_extract(&payload, msg->port, &flow);
3117
0ad9b732
JP
3118 /* Check with in-band control to see if this packet should be sent
3119 * to the local port regardless of the flow table. */
3120 if (in_band_msg_in_hook(p->in_band, &flow, &payload)) {
3121 union odp_action action;
3122
3123 memset(&action, 0, sizeof(action));
3124 action.output.type = ODPAT_OUTPUT;
3125 action.output.port = ODPP_LOCAL;
f1acd62b 3126 dpif_execute(p->dpif, flow.in_port, &action, 1, &payload);
0ad9b732
JP
3127 }
3128
064af421
BP
3129 rule = lookup_valid_rule(p, &flow);
3130 if (!rule) {
3131 /* Don't send a packet-in if OFPPC_NO_PACKET_IN asserted. */
3132 struct ofport *port = port_array_get(&p->ports, msg->port);
3133 if (port) {
3134 if (port->opp.config & OFPPC_NO_PACKET_IN) {
3135 COVERAGE_INC(ofproto_no_packet_in);
3136 /* XXX install 'drop' flow entry */
3137 ofpbuf_delete(packet);
3138 return;
3139 }
3140 } else {
3141 VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16, msg->port);
3142 }
3143
3144 COVERAGE_INC(ofproto_packet_in);
3145 pinsched_send(p->miss_sched, in_port, packet, send_packet_in_miss, p);
3146 return;
3147 }
3148
3149 if (rule->cr.wc.wildcards) {
3150 rule = rule_create_subrule(p, rule, &flow);
3151 rule_make_actions(p, rule, packet);
3152 } else {
3153 if (!rule->may_install) {
3154 /* The rule is not installable, that is, we need to process every
3155 * packet, so process the current packet and set its actions into
3156 * 'subrule'. */
3157 rule_make_actions(p, rule, packet);
3158 } else {
3159 /* XXX revalidate rule if it needs it */
3160 }
3161 }
3162
3163 rule_execute(p, rule, &payload, &flow);
3164 rule_reinstall(p, rule);
7778bd15
BP
3165
3166 if (rule->super && rule->super->cr.priority == FAIL_OPEN_PRIORITY
3167 && rconn_is_connected(p->controller->rconn)) {
3168 /*
3169 * Extra-special case for fail-open mode.
3170 *
3171 * We are in fail-open mode and the packet matched the fail-open rule,
3172 * but we are connected to a controller too. We should send the packet
3173 * up to the controller in the hope that it will try to set up a flow
3174 * and thereby allow us to exit fail-open.
3175 *
3176 * See the top-level comment in fail-open.c for more information.
3177 */
3178 pinsched_send(p->miss_sched, in_port, packet, send_packet_in_miss, p);
3179 } else {
3180 ofpbuf_delete(packet);
3181 }
064af421 3182}
72b06300
BP
3183
3184static void
3185handle_odp_msg(struct ofproto *p, struct ofpbuf *packet)
3186{
3187 struct odp_msg *msg = packet->data;
3188
3189 switch (msg->type) {
3190 case _ODPL_ACTION_NR:
3191 COVERAGE_INC(ofproto_ctlr_action);
3192 pinsched_send(p->action_sched, odp_port_to_ofp_port(msg->port), packet,
3193 send_packet_in_action, p);
3194 break;
3195
3196 case _ODPL_SFLOW_NR:
3197 if (p->sflow) {
3198 ofproto_sflow_received(p->sflow, msg);
3199 }
3200 ofpbuf_delete(packet);
3201 break;
3202
3203 case _ODPL_MISS_NR:
3204 handle_odp_miss_msg(p, packet);
3205 break;
3206
3207 default:
3208 VLOG_WARN_RL(&rl, "received ODP message of unexpected type %"PRIu32,
3209 msg->type);
3210 break;
3211 }
3212}
064af421
BP
3213\f
3214static void
3215revalidate_cb(struct cls_rule *sub_, void *cbdata_)
3216{
3217 struct rule *sub = rule_from_cls_rule(sub_);
3218 struct revalidate_cbdata *cbdata = cbdata_;
3219
3220 if (cbdata->revalidate_all
3221 || (cbdata->revalidate_subrules && sub->super)
3222 || (tag_set_intersects(&cbdata->revalidate_set, sub->tags))) {
3223 revalidate_rule(cbdata->ofproto, sub);
3224 }
3225}
3226
3227static bool
3228revalidate_rule(struct ofproto *p, struct rule *rule)
3229{
3230 const flow_t *flow = &rule->cr.flow;
3231
3232 COVERAGE_INC(ofproto_revalidate_rule);
3233 if (rule->super) {
3234 struct rule *super;
3235 super = rule_from_cls_rule(classifier_lookup_wild(&p->cls, flow));
3236 if (!super) {
3237 rule_remove(p, rule);
3238 return false;
3239 } else if (super != rule->super) {
3240 COVERAGE_INC(ofproto_revalidate_moved);
3241 list_remove(&rule->list);
3242 list_push_back(&super->list, &rule->list);
3243 rule->super = super;
3244 rule->hard_timeout = super->hard_timeout;
3245 rule->idle_timeout = super->idle_timeout;
3246 rule->created = super->created;
3247 rule->used = 0;
3248 }
3249 }
3250
3251 rule_update_actions(p, rule);
3252 return true;
3253}
3254
3255static struct ofpbuf *
ca069229 3256compose_flow_removed(const struct rule *rule, long long int now, uint8_t reason)
064af421 3257{
ca069229 3258 struct ofp_flow_removed *ofr;
064af421
BP
3259 struct ofpbuf *buf;
3260
ca069229
JP
3261 ofr = make_openflow(sizeof *ofr, OFPT_FLOW_REMOVED, &buf);
3262 flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, &ofr->match);
3263 ofr->priority = htons(rule->cr.priority);
3264 ofr->reason = reason;
3265 ofr->duration = htonl((now - rule->created) / 1000);
3266 ofr->idle_timeout = htons(rule->idle_timeout);
3267 ofr->packet_count = htonll(rule->packet_count);
3268 ofr->byte_count = htonll(rule->byte_count);
064af421
BP
3269
3270 return buf;
3271}
3272
3273static void
ca069229
JP
3274uninstall_idle_flow(struct ofproto *ofproto, struct rule *rule)
3275{
3276 assert(rule->installed);
3277 assert(!rule->cr.wc.wildcards);
3278
3279 if (rule->super) {
3280 rule_remove(ofproto, rule);
3281 } else {
3282 rule_uninstall(ofproto, rule);
3283 }
3284}
3285static void
3286send_flow_removed(struct ofproto *p, struct rule *rule,
3287 long long int now, uint8_t reason)
064af421
BP
3288{
3289 struct ofconn *ofconn;
3290 struct ofconn *prev;
b9b0ce61 3291 struct ofpbuf *buf = NULL;
064af421
BP
3292
3293 /* We limit the maximum number of queued flow expirations it by accounting
3294 * them under the counter for replies. That works because preventing
3295 * OpenFlow requests from being processed also prevents new flows from
3296 * being added (and expiring). (It also prevents processing OpenFlow
3297 * requests that would not add new flows, so it is imperfect.) */
3298
3299 prev = NULL;
3300 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
ca069229 3301 if (rule->send_flow_removed && rconn_is_connected(ofconn->rconn)) {
064af421 3302 if (prev) {
431d8ad2 3303 queue_tx(ofpbuf_clone(buf), prev, prev->reply_counter);
064af421 3304 } else {
ca069229 3305 buf = compose_flow_removed(rule, now, reason);
064af421
BP
3306 }
3307 prev = ofconn;
3308 }
3309 }
3310 if (prev) {
431d8ad2 3311 queue_tx(buf, prev, prev->reply_counter);
064af421
BP
3312 }
3313}
3314
064af421
BP
3315
3316static void
3317expire_rule(struct cls_rule *cls_rule, void *p_)
3318{
3319 struct ofproto *p = p_;
3320 struct rule *rule = rule_from_cls_rule(cls_rule);
3321 long long int hard_expire, idle_expire, expire, now;
3322
3323 hard_expire = (rule->hard_timeout
3324 ? rule->created + rule->hard_timeout * 1000
3325 : LLONG_MAX);
3326 idle_expire = (rule->idle_timeout
3327 && (rule->super || list_is_empty(&rule->list))
3328 ? rule->used + rule->idle_timeout * 1000
3329 : LLONG_MAX);
3330 expire = MIN(hard_expire, idle_expire);
064af421
BP
3331
3332 now = time_msec();
3333 if (now < expire) {
3334 if (rule->installed && now >= rule->used + 5000) {
3335 uninstall_idle_flow(p, rule);
0193b2af
JG
3336 } else if (!rule->cr.wc.wildcards) {
3337 active_timeout(p, rule);
064af421 3338 }
0193b2af 3339
064af421
BP
3340 return;
3341 }
3342
3343 COVERAGE_INC(ofproto_expired);
46d6f36f
JG
3344
3345 /* Update stats. This code will be a no-op if the rule expired
3346 * due to an idle timeout. */
064af421 3347 if (rule->cr.wc.wildcards) {
064af421
BP
3348 struct rule *subrule, *next;
3349 LIST_FOR_EACH_SAFE (subrule, next, struct rule, list, &rule->list) {
3350 rule_remove(p, subrule);
3351 }
46d6f36f
JG
3352 } else {
3353 rule_uninstall(p, rule);
064af421
BP
3354 }
3355
8fe1a59d 3356 if (!rule_is_hidden(rule)) {
ca069229
JP
3357 send_flow_removed(p, rule, now,
3358 (now >= hard_expire
3359 ? OFPRR_HARD_TIMEOUT : OFPRR_IDLE_TIMEOUT));
8fe1a59d 3360 }
064af421
BP
3361 rule_remove(p, rule);
3362}
3363
0193b2af
JG
3364static void
3365active_timeout(struct ofproto *ofproto, struct rule *rule)
3366{
3367 if (ofproto->netflow && !is_controller_rule(rule) &&
3368 netflow_active_timeout_expired(ofproto->netflow, &rule->nf_flow)) {
3369 struct ofexpired expired;
3370 struct odp_flow odp_flow;
3371
3372 /* Get updated flow stats. */
3373 memset(&odp_flow, 0, sizeof odp_flow);
094e1514
JG
3374 if (rule->installed) {
3375 odp_flow.key = rule->cr.flow;
3376 odp_flow.flags = ODPFF_ZERO_TCP_FLAGS;
d65349ea 3377 dpif_flow_get(ofproto->dpif, &odp_flow);
094e1514
JG
3378
3379 if (odp_flow.stats.n_packets) {
3380 update_time(ofproto, rule, &odp_flow.stats);
3381 netflow_flow_update_flags(&rule->nf_flow, odp_flow.stats.ip_tos,
3382 odp_flow.stats.tcp_flags);
3383 }
0193b2af
JG
3384 }
3385
3386 expired.flow = rule->cr.flow;
3387 expired.packet_count = rule->packet_count +
3388 odp_flow.stats.n_packets;
3389 expired.byte_count = rule->byte_count + odp_flow.stats.n_bytes;
3390 expired.used = rule->used;
3391
3392 netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
3393
3394 /* Schedule us to send the accumulated records once we have
3395 * collected all of them. */
3396 poll_immediate_wake();
3397 }
3398}
3399
064af421
BP
3400static void
3401update_used(struct ofproto *p)
3402{
3403 struct odp_flow *flows;
3404 size_t n_flows;
3405 size_t i;
3406 int error;
3407
c228a364 3408 error = dpif_flow_list_all(p->dpif, &flows, &n_flows);
064af421
BP
3409 if (error) {
3410 return;
3411 }
3412
3413 for (i = 0; i < n_flows; i++) {
3414 struct odp_flow *f = &flows[i];
3415 struct rule *rule;
3416
3417 rule = rule_from_cls_rule(
3418 classifier_find_rule_exactly(&p->cls, &f->key, 0, UINT16_MAX));
3419 if (!rule || !rule->installed) {
3420 COVERAGE_INC(ofproto_unexpected_rule);
c228a364 3421 dpif_flow_del(p->dpif, f);
064af421
BP
3422 continue;
3423 }
3424
0193b2af 3425 update_time(p, rule, &f->stats);
064af421
BP
3426 rule_account(p, rule, f->stats.n_bytes);
3427 }
3428 free(flows);
3429}
3430
3431static void
3432do_send_packet_in(struct ofconn *ofconn, uint32_t buffer_id,
3433 const struct ofpbuf *packet, int send_len)
3434{
372179d4
BP
3435 struct odp_msg *msg = packet->data;
3436 struct ofpbuf payload;
3437 struct ofpbuf *opi;
3438 uint8_t reason;
064af421 3439
372179d4 3440 /* Extract packet payload from 'msg'. */
064af421
BP
3441 payload.data = msg + 1;
3442 payload.size = msg->length - sizeof *msg;
3443
372179d4
BP
3444 /* Construct ofp_packet_in message. */
3445 reason = msg->type == _ODPL_ACTION_NR ? OFPR_ACTION : OFPR_NO_MATCH;
3446 opi = make_packet_in(buffer_id, odp_port_to_ofp_port(msg->port), reason,
3447 &payload, send_len);
3448
3449 /* Send. */
3450 rconn_send_with_limit(ofconn->rconn, opi, ofconn->packet_in_counter, 100);
064af421
BP
3451}
3452
3453static void
3454send_packet_in_action(struct ofpbuf *packet, void *p_)
3455{
3456 struct ofproto *p = p_;
3457 struct ofconn *ofconn;
3458 struct odp_msg *msg;
3459
3460 msg = packet->data;
3461 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
3462 if (ofconn == p->controller || ofconn->miss_send_len) {
3463 do_send_packet_in(ofconn, UINT32_MAX, packet, msg->arg);
3464 }
3465 }
3466 ofpbuf_delete(packet);
3467}
3468
3469static void
3470send_packet_in_miss(struct ofpbuf *packet, void *p_)
3471{
3472 struct ofproto *p = p_;
7778bd15 3473 bool in_fail_open = p->fail_open && fail_open_is_active(p->fail_open);
064af421
BP
3474 struct ofconn *ofconn;
3475 struct ofpbuf payload;
3476 struct odp_msg *msg;
3477
3478 msg = packet->data;
3479 payload.data = msg + 1;
3480 payload.size = msg->length - sizeof *msg;
3481 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
3482 if (ofconn->miss_send_len) {
7778bd15
BP
3483 struct pktbuf *pb = ofconn->pktbuf;
3484 uint32_t buffer_id = (in_fail_open
3485 ? pktbuf_get_null()
3486 : pktbuf_save(pb, &payload, msg->port));
064af421
BP
3487 int send_len = (buffer_id != UINT32_MAX ? ofconn->miss_send_len
3488 : UINT32_MAX);
3489 do_send_packet_in(ofconn, buffer_id, packet, send_len);
3490 }
3491 }
3492 ofpbuf_delete(packet);
3493}
3494
3495static uint64_t
fa60c019 3496pick_datapath_id(const struct ofproto *ofproto)
064af421 3497{
fa60c019 3498 const struct ofport *port;
064af421 3499
fa60c019
BP
3500 port = port_array_get(&ofproto->ports, ODPP_LOCAL);
3501 if (port) {
3502 uint8_t ea[ETH_ADDR_LEN];
3503 int error;
3504
3505 error = netdev_get_etheraddr(port->netdev, ea);
064af421
BP
3506 if (!error) {
3507 return eth_addr_to_uint64(ea);
3508 }
3509 VLOG_WARN("could not get MAC address for %s (%s)",
fa60c019 3510 netdev_get_name(port->netdev), strerror(error));
064af421 3511 }
fa60c019 3512 return ofproto->fallback_dpid;
064af421
BP
3513}
3514
3515static uint64_t
3516pick_fallback_dpid(void)
3517{
3518 uint8_t ea[ETH_ADDR_LEN];
70150daf 3519 eth_addr_nicira_random(ea);
064af421
BP
3520 return eth_addr_to_uint64(ea);
3521}
3522\f
3523static bool
3524default_normal_ofhook_cb(const flow_t *flow, const struct ofpbuf *packet,
3525 struct odp_actions *actions, tag_type *tags,
6a07af36 3526 uint16_t *nf_output_iface, void *ofproto_)
064af421
BP
3527{
3528 struct ofproto *ofproto = ofproto_;
3529 int out_port;
3530
3531 /* Drop frames for reserved multicast addresses. */
3532 if (eth_addr_is_reserved(flow->dl_dst)) {
3533 return true;
3534 }
3535
3536 /* Learn source MAC (but don't try to learn from revalidation). */
3537 if (packet != NULL) {
3538 tag_type rev_tag = mac_learning_learn(ofproto->ml, flow->dl_src,
3539 0, flow->in_port);
3540 if (rev_tag) {
3541 /* The log messages here could actually be useful in debugging,
3542 * so keep the rate limit relatively high. */
3543 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
3544 VLOG_DBG_RL(&rl, "learned that "ETH_ADDR_FMT" is on port %"PRIu16,
3545 ETH_ADDR_ARGS(flow->dl_src), flow->in_port);
3546 ofproto_revalidate(ofproto, rev_tag);
3547 }
3548 }
3549
3550 /* Determine output port. */
3551 out_port = mac_learning_lookup_tag(ofproto->ml, flow->dl_dst, 0, tags);
3552 if (out_port < 0) {
6a07af36 3553 add_output_group_action(actions, DP_GROUP_FLOOD, nf_output_iface);
064af421
BP
3554 } else if (out_port != flow->in_port) {
3555 odp_actions_add(actions, ODPAT_OUTPUT)->output.port = out_port;
6a07af36 3556 *nf_output_iface = out_port;
064af421
BP
3557 } else {
3558 /* Drop. */
3559 }
3560
3561 return true;
3562}
3563
3564static const struct ofhooks default_ofhooks = {
3565 NULL,
3566 default_normal_ofhook_cb,
3567 NULL,
3568 NULL
3569};