]> git.proxmox.com Git - mirror_ovs.git/blame - ofproto/ofproto.c
dpif-netdev: Fix validation of VLAN PCP value in userspace datapath.
[mirror_ovs.git] / ofproto / ofproto.c
CommitLineData
064af421 1/*
67a4917b 2 * Copyright (c) 2009, 2010 Nicira Networks.
064af421 3 *
a14bc59f
BP
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
064af421 7 *
a14bc59f
BP
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
064af421
BP
15 */
16
17#include <config.h>
18#include "ofproto.h"
19#include <errno.h>
20#include <inttypes.h>
21#include <net/if.h>
22#include <netinet/in.h>
23#include <stdbool.h>
24#include <stdlib.h>
25#include "classifier.h"
26#include "coverage.h"
27#include "discovery.h"
28#include "dpif.h"
4f2cad2c 29#include "dynamic-string.h"
064af421
BP
30#include "executer.h"
31#include "fail-open.h"
32#include "in-band.h"
33#include "mac-learning.h"
34#include "netdev.h"
35#include "netflow.h"
36#include "odp-util.h"
37#include "ofp-print.h"
72b06300 38#include "ofproto-sflow.h"
064af421
BP
39#include "ofpbuf.h"
40#include "openflow/nicira-ext.h"
41#include "openflow/openflow.h"
42#include "openflow/openflow-mgmt.h"
43#include "openvswitch/datapath-protocol.h"
44#include "packets.h"
45#include "pinsched.h"
46#include "pktbuf.h"
47#include "poll-loop.h"
48#include "port-array.h"
49#include "rconn.h"
50#include "shash.h"
51#include "status.h"
52#include "stp.h"
53#include "svec.h"
54#include "tag.h"
55#include "timeval.h"
4f2cad2c 56#include "unixctl.h"
064af421
BP
57#include "vconn.h"
58#include "vconn-ssl.h"
59#include "xtoxll.h"
60
61#define THIS_MODULE VLM_ofproto
62#include "vlog.h"
63
72b06300 64#include "sflow_api.h"
064af421
BP
65
66enum {
67 TABLEID_HASH = 0,
68 TABLEID_CLASSIFIER = 1
69};
70
71struct ofport {
72 struct netdev *netdev;
73 struct ofp_phy_port opp; /* In host byte order. */
74};
75
76static void ofport_free(struct ofport *);
77static void hton_ofp_phy_port(struct ofp_phy_port *);
78
79static int xlate_actions(const union ofp_action *in, size_t n_in,
80 const flow_t *flow, struct ofproto *ofproto,
81 const struct ofpbuf *packet,
82 struct odp_actions *out, tag_type *tags,
6a07af36 83 bool *may_set_up_flow, uint16_t *nf_output_iface);
064af421
BP
84
85struct rule {
86 struct cls_rule cr;
87
88 uint16_t idle_timeout; /* In seconds from time of last use. */
89 uint16_t hard_timeout; /* In seconds from time of creation. */
90 long long int used; /* Last-used time (0 if never used). */
91 long long int created; /* Creation time. */
92 uint64_t packet_count; /* Number of packets received. */
93 uint64_t byte_count; /* Number of bytes received. */
94 uint64_t accounted_bytes; /* Number of bytes passed to account_cb. */
064af421 95 tag_type tags; /* Tags (set only by hooks). */
0193b2af 96 struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
064af421
BP
97
98 /* If 'super' is non-NULL, this rule is a subrule, that is, it is an
99 * exact-match rule (having cr.wc.wildcards of 0) generated from the
100 * wildcard rule 'super'. In this case, 'list' is an element of the
101 * super-rule's list.
102 *
103 * If 'super' is NULL, this rule is a super-rule, and 'list' is the head of
104 * a list of subrules. A super-rule with no wildcards (where
105 * cr.wc.wildcards is 0) will never have any subrules. */
106 struct rule *super;
107 struct list list;
108
109 /* OpenFlow actions.
110 *
111 * A subrule has no actions (it uses the super-rule's actions). */
112 int n_actions;
113 union ofp_action *actions;
114
115 /* Datapath actions.
116 *
117 * A super-rule with wildcard fields never has ODP actions (since the
118 * datapath only supports exact-match flows). */
119 bool installed; /* Installed in datapath? */
120 bool may_install; /* True ordinarily; false if actions must
121 * be reassessed for every packet. */
122 int n_odp_actions;
123 union odp_action *odp_actions;
124};
125
126static inline bool
127rule_is_hidden(const struct rule *rule)
128{
129 /* Subrules are merely an implementation detail, so hide them from the
130 * controller. */
131 if (rule->super != NULL) {
132 return true;
133 }
134
8cd4882f 135 /* Rules with priority higher than UINT16_MAX are set up by ofproto itself
064af421
BP
136 * (e.g. by in-band control) and are intentionally hidden from the
137 * controller. */
138 if (rule->cr.priority > UINT16_MAX) {
139 return true;
140 }
141
142 return false;
143}
144
0193b2af
JG
145static struct rule *rule_create(struct ofproto *, struct rule *super,
146 const union ofp_action *, size_t n_actions,
147 uint16_t idle_timeout, uint16_t hard_timeout);
064af421
BP
148static void rule_free(struct rule *);
149static void rule_destroy(struct ofproto *, struct rule *);
150static struct rule *rule_from_cls_rule(const struct cls_rule *);
151static void rule_insert(struct ofproto *, struct rule *,
152 struct ofpbuf *packet, uint16_t in_port);
153static void rule_remove(struct ofproto *, struct rule *);
154static bool rule_make_actions(struct ofproto *, struct rule *,
155 const struct ofpbuf *packet);
156static void rule_install(struct ofproto *, struct rule *,
157 struct rule *displaced_rule);
158static void rule_uninstall(struct ofproto *, struct rule *);
159static void rule_post_uninstall(struct ofproto *, struct rule *);
160
161struct ofconn {
162 struct list node;
163 struct rconn *rconn;
164 struct pktbuf *pktbuf;
165 bool send_flow_exp;
166 int miss_send_len;
167
168 struct rconn_packet_counter *packet_in_counter;
169
170 /* Number of OpenFlow messages queued as replies to OpenFlow requests, and
171 * the maximum number before we stop reading OpenFlow requests. */
172#define OFCONN_REPLY_MAX 100
173 struct rconn_packet_counter *reply_counter;
174};
175
176static struct ofconn *ofconn_create(struct ofproto *, struct rconn *);
177static void ofconn_destroy(struct ofconn *, struct ofproto *);
178static void ofconn_run(struct ofconn *, struct ofproto *);
179static void ofconn_wait(struct ofconn *);
180static void queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn,
181 struct rconn_packet_counter *counter);
182
183struct ofproto {
184 /* Settings. */
185 uint64_t datapath_id; /* Datapath ID. */
186 uint64_t fallback_dpid; /* Datapath ID if no better choice found. */
187 uint64_t mgmt_id; /* Management channel identifier. */
188 char *manufacturer; /* Manufacturer. */
189 char *hardware; /* Hardware. */
190 char *software; /* Software version. */
191 char *serial; /* Serial number. */
192
193 /* Datapath. */
c228a364 194 struct dpif *dpif;
e9e28be3 195 struct netdev_monitor *netdev_monitor;
064af421
BP
196 struct port_array ports; /* Index is ODP port nr; ofport->opp.port_no is
197 * OFP port nr. */
198 struct shash port_by_name;
199 uint32_t max_ports;
200
201 /* Configuration. */
202 struct switch_status *switch_status;
203 struct status_category *ss_cat;
204 struct in_band *in_band;
205 struct discovery *discovery;
206 struct fail_open *fail_open;
207 struct pinsched *miss_sched, *action_sched;
208 struct executer *executer;
209 struct netflow *netflow;
72b06300 210 struct ofproto_sflow *sflow;
064af421
BP
211
212 /* Flow table. */
213 struct classifier cls;
214 bool need_revalidate;
215 long long int next_expiration;
216 struct tag_set revalidate_set;
217
218 /* OpenFlow connections. */
219 struct list all_conns;
220 struct ofconn *controller;
221 struct pvconn **listeners;
222 size_t n_listeners;
223 struct pvconn **snoops;
224 size_t n_snoops;
225
226 /* Hooks for ovs-vswitchd. */
227 const struct ofhooks *ofhooks;
228 void *aux;
229
230 /* Used by default ofhooks. */
231 struct mac_learning *ml;
232};
233
234static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
235
236static const struct ofhooks default_ofhooks;
237
fa60c019 238static uint64_t pick_datapath_id(const struct ofproto *);
064af421
BP
239static uint64_t pick_fallback_dpid(void);
240static void send_packet_in_miss(struct ofpbuf *, void *ofproto);
241static void send_packet_in_action(struct ofpbuf *, void *ofproto);
242static void update_used(struct ofproto *);
0193b2af
JG
243static void update_stats(struct ofproto *, struct rule *,
244 const struct odp_flow_stats *);
064af421 245static void expire_rule(struct cls_rule *, void *ofproto);
0193b2af 246static void active_timeout(struct ofproto *ofproto, struct rule *rule);
064af421
BP
247static bool revalidate_rule(struct ofproto *p, struct rule *rule);
248static void revalidate_cb(struct cls_rule *rule_, void *p_);
249
250static void handle_odp_msg(struct ofproto *, struct ofpbuf *);
251
252static void handle_openflow(struct ofconn *, struct ofproto *,
253 struct ofpbuf *);
254
72b06300
BP
255static void refresh_port_groups(struct ofproto *);
256
064af421
BP
257static void update_port(struct ofproto *, const char *devname);
258static int init_ports(struct ofproto *);
259static void reinit_ports(struct ofproto *);
260
261int
262ofproto_create(const char *datapath, const struct ofhooks *ofhooks, void *aux,
263 struct ofproto **ofprotop)
264{
064af421
BP
265 struct odp_stats stats;
266 struct ofproto *p;
c228a364 267 struct dpif *dpif;
064af421
BP
268 int error;
269
270 *ofprotop = NULL;
271
272 /* Connect to datapath and start listening for messages. */
273 error = dpif_open(datapath, &dpif);
274 if (error) {
275 VLOG_ERR("failed to open datapath %s: %s", datapath, strerror(error));
276 return error;
277 }
c228a364 278 error = dpif_get_dp_stats(dpif, &stats);
064af421
BP
279 if (error) {
280 VLOG_ERR("failed to obtain stats for datapath %s: %s",
281 datapath, strerror(error));
c228a364 282 dpif_close(dpif);
064af421
BP
283 return error;
284 }
72b06300 285 error = dpif_recv_set_mask(dpif, ODPL_MISS | ODPL_ACTION | ODPL_SFLOW);
064af421
BP
286 if (error) {
287 VLOG_ERR("failed to listen on datapath %s: %s",
288 datapath, strerror(error));
c228a364 289 dpif_close(dpif);
064af421
BP
290 return error;
291 }
c228a364 292 dpif_flow_flush(dpif);
8f24562a 293 dpif_recv_purge(dpif);
064af421
BP
294
295 /* Initialize settings. */
296 p = xcalloc(1, sizeof *p);
297 p->fallback_dpid = pick_fallback_dpid();
fa60c019 298 p->datapath_id = p->fallback_dpid;
064af421
BP
299 p->manufacturer = xstrdup("Nicira Networks, Inc.");
300 p->hardware = xstrdup("Reference Implementation");
301 p->software = xstrdup(VERSION BUILDNR);
302 p->serial = xstrdup("None");
303
304 /* Initialize datapath. */
305 p->dpif = dpif;
8b61709d 306 p->netdev_monitor = netdev_monitor_create();
064af421
BP
307 port_array_init(&p->ports);
308 shash_init(&p->port_by_name);
309 p->max_ports = stats.max_ports;
310
311 /* Initialize submodules. */
312 p->switch_status = switch_status_create(p);
313 p->in_band = NULL;
314 p->discovery = NULL;
315 p->fail_open = NULL;
316 p->miss_sched = p->action_sched = NULL;
317 p->executer = NULL;
318 p->netflow = NULL;
72b06300 319 p->sflow = NULL;
064af421
BP
320
321 /* Initialize flow table. */
322 classifier_init(&p->cls);
323 p->need_revalidate = false;
324 p->next_expiration = time_msec() + 1000;
325 tag_set_init(&p->revalidate_set);
326
327 /* Initialize OpenFlow connections. */
328 list_init(&p->all_conns);
f9fb1858 329 p->controller = ofconn_create(p, rconn_create(5, 8));
064af421
BP
330 p->controller->pktbuf = pktbuf_create();
331 p->controller->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN;
332 p->listeners = NULL;
333 p->n_listeners = 0;
334 p->snoops = NULL;
335 p->n_snoops = 0;
336
337 /* Initialize hooks. */
338 if (ofhooks) {
339 p->ofhooks = ofhooks;
340 p->aux = aux;
341 p->ml = NULL;
342 } else {
343 p->ofhooks = &default_ofhooks;
344 p->aux = p;
345 p->ml = mac_learning_create();
346 }
347
348 /* Register switch status category. */
349 p->ss_cat = switch_status_register(p->switch_status, "remote",
350 rconn_status_cb, p->controller->rconn);
351
352 /* Almost done... */
353 error = init_ports(p);
354 if (error) {
355 ofproto_destroy(p);
356 return error;
357 }
358
fa60c019
BP
359 /* Pick final datapath ID. */
360 p->datapath_id = pick_datapath_id(p);
361 VLOG_INFO("using datapath ID %012"PRIx64, p->datapath_id);
362
064af421
BP
363 *ofprotop = p;
364 return 0;
365}
366
367void
368ofproto_set_datapath_id(struct ofproto *p, uint64_t datapath_id)
369{
370 uint64_t old_dpid = p->datapath_id;
fa60c019 371 p->datapath_id = datapath_id ? datapath_id : pick_datapath_id(p);
064af421
BP
372 if (p->datapath_id != old_dpid) {
373 VLOG_INFO("datapath ID changed to %012"PRIx64, p->datapath_id);
374 rconn_reconnect(p->controller->rconn);
375 }
376}
377
378void
379ofproto_set_mgmt_id(struct ofproto *p, uint64_t mgmt_id)
380{
381 p->mgmt_id = mgmt_id;
382}
383
384void
385ofproto_set_probe_interval(struct ofproto *p, int probe_interval)
386{
387 probe_interval = probe_interval ? MAX(probe_interval, 5) : 0;
388 rconn_set_probe_interval(p->controller->rconn, probe_interval);
389 if (p->fail_open) {
390 int trigger_duration = probe_interval ? probe_interval * 3 : 15;
391 fail_open_set_trigger_duration(p->fail_open, trigger_duration);
392 }
393}
394
395void
396ofproto_set_max_backoff(struct ofproto *p, int max_backoff)
397{
398 rconn_set_max_backoff(p->controller->rconn, max_backoff);
399}
400
401void
402ofproto_set_desc(struct ofproto *p,
403 const char *manufacturer, const char *hardware,
404 const char *software, const char *serial)
405{
406 if (manufacturer) {
407 free(p->manufacturer);
408 p->manufacturer = xstrdup(manufacturer);
409 }
410 if (hardware) {
411 free(p->hardware);
412 p->hardware = xstrdup(hardware);
413 }
414 if (software) {
415 free(p->software);
416 p->software = xstrdup(software);
417 }
418 if (serial) {
419 free(p->serial);
420 p->serial = xstrdup(serial);
421 }
422}
423
424int
425ofproto_set_in_band(struct ofproto *p, bool in_band)
426{
427 if (in_band != (p->in_band != NULL)) {
428 if (in_band) {
f1acd62b
BP
429 return in_band_create(p, p->dpif, p->switch_status,
430 p->controller->rconn, &p->in_band);
064af421
BP
431 } else {
432 ofproto_set_discovery(p, false, NULL, true);
433 in_band_destroy(p->in_band);
434 p->in_band = NULL;
435 }
436 rconn_reconnect(p->controller->rconn);
437 }
438 return 0;
439}
440
441int
442ofproto_set_discovery(struct ofproto *p, bool discovery,
443 const char *re, bool update_resolv_conf)
444{
445 if (discovery != (p->discovery != NULL)) {
446 if (discovery) {
447 int error = ofproto_set_in_band(p, true);
448 if (error) {
449 return error;
450 }
451 error = discovery_create(re, update_resolv_conf,
c228a364 452 p->dpif, p->switch_status,
064af421
BP
453 &p->discovery);
454 if (error) {
455 return error;
456 }
457 } else {
458 discovery_destroy(p->discovery);
459 p->discovery = NULL;
460 }
461 rconn_disconnect(p->controller->rconn);
462 } else if (discovery) {
463 discovery_set_update_resolv_conf(p->discovery, update_resolv_conf);
464 return discovery_set_accept_controller_re(p->discovery, re);
465 }
466 return 0;
467}
468
469int
470ofproto_set_controller(struct ofproto *ofproto, const char *controller)
471{
472 if (ofproto->discovery) {
473 return EINVAL;
474 } else if (controller) {
475 if (strcmp(rconn_get_name(ofproto->controller->rconn), controller)) {
476 return rconn_connect(ofproto->controller->rconn, controller);
477 } else {
478 return 0;
479 }
480 } else {
481 rconn_disconnect(ofproto->controller->rconn);
482 return 0;
483 }
484}
485
486static int
487set_pvconns(struct pvconn ***pvconnsp, size_t *n_pvconnsp,
488 const struct svec *svec)
489{
490 struct pvconn **pvconns = *pvconnsp;
491 size_t n_pvconns = *n_pvconnsp;
492 int retval = 0;
493 size_t i;
494
495 for (i = 0; i < n_pvconns; i++) {
496 pvconn_close(pvconns[i]);
497 }
498 free(pvconns);
499
500 pvconns = xmalloc(svec->n * sizeof *pvconns);
501 n_pvconns = 0;
502 for (i = 0; i < svec->n; i++) {
503 const char *name = svec->names[i];
504 struct pvconn *pvconn;
505 int error;
506
507 error = pvconn_open(name, &pvconn);
508 if (!error) {
509 pvconns[n_pvconns++] = pvconn;
510 } else {
511 VLOG_ERR("failed to listen on %s: %s", name, strerror(error));
512 if (!retval) {
513 retval = error;
514 }
515 }
516 }
517
518 *pvconnsp = pvconns;
519 *n_pvconnsp = n_pvconns;
520
521 return retval;
522}
523
524int
525ofproto_set_listeners(struct ofproto *ofproto, const struct svec *listeners)
526{
527 return set_pvconns(&ofproto->listeners, &ofproto->n_listeners, listeners);
528}
529
530int
531ofproto_set_snoops(struct ofproto *ofproto, const struct svec *snoops)
532{
533 return set_pvconns(&ofproto->snoops, &ofproto->n_snoops, snoops);
534}
535
536int
0193b2af
JG
537ofproto_set_netflow(struct ofproto *ofproto,
538 const struct netflow_options *nf_options)
064af421 539{
0193b2af 540 if (nf_options->collectors.n) {
064af421
BP
541 if (!ofproto->netflow) {
542 ofproto->netflow = netflow_create();
543 }
0193b2af 544 return netflow_set_options(ofproto->netflow, nf_options);
064af421
BP
545 } else {
546 netflow_destroy(ofproto->netflow);
547 ofproto->netflow = NULL;
548 return 0;
549 }
550}
551
72b06300
BP
552void
553ofproto_set_sflow(struct ofproto *ofproto,
554 const struct ofproto_sflow_options *oso)
555{
556 struct ofproto_sflow *os = ofproto->sflow;
557 if (oso) {
558 if (!os) {
559 struct ofport *ofport;
560 unsigned int odp_port;
561
562 os = ofproto->sflow = ofproto_sflow_create(ofproto->dpif);
563 refresh_port_groups(ofproto);
564 PORT_ARRAY_FOR_EACH (ofport, &ofproto->ports, odp_port) {
565 ofproto_sflow_add_port(os, odp_port,
566 netdev_get_name(ofport->netdev));
567 }
568 }
569 ofproto_sflow_set_options(os, oso);
570 } else {
571 ofproto_sflow_destroy(os);
572 ofproto->sflow = NULL;
573 }
574}
575
064af421
BP
576void
577ofproto_set_failure(struct ofproto *ofproto, bool fail_open)
578{
579 if (fail_open) {
580 struct rconn *rconn = ofproto->controller->rconn;
581 int trigger_duration = rconn_get_probe_interval(rconn) * 3;
582 if (!ofproto->fail_open) {
583 ofproto->fail_open = fail_open_create(ofproto, trigger_duration,
584 ofproto->switch_status,
585 rconn);
586 } else {
587 fail_open_set_trigger_duration(ofproto->fail_open,
588 trigger_duration);
589 }
590 } else {
591 fail_open_destroy(ofproto->fail_open);
592 ofproto->fail_open = NULL;
593 }
594}
595
596void
597ofproto_set_rate_limit(struct ofproto *ofproto,
598 int rate_limit, int burst_limit)
599{
600 if (rate_limit > 0) {
601 if (!ofproto->miss_sched) {
602 ofproto->miss_sched = pinsched_create(rate_limit, burst_limit,
603 ofproto->switch_status);
604 ofproto->action_sched = pinsched_create(rate_limit, burst_limit,
605 NULL);
606 } else {
607 pinsched_set_limits(ofproto->miss_sched, rate_limit, burst_limit);
608 pinsched_set_limits(ofproto->action_sched,
609 rate_limit, burst_limit);
610 }
611 } else {
612 pinsched_destroy(ofproto->miss_sched);
613 ofproto->miss_sched = NULL;
614 pinsched_destroy(ofproto->action_sched);
615 ofproto->action_sched = NULL;
616 }
617}
618
619int
67a4917b 620ofproto_set_stp(struct ofproto *ofproto OVS_UNUSED, bool enable_stp)
064af421
BP
621{
622 /* XXX */
623 if (enable_stp) {
624 VLOG_WARN("STP is not yet implemented");
625 return EINVAL;
626 } else {
627 return 0;
628 }
629}
630
631int
632ofproto_set_remote_execution(struct ofproto *ofproto, const char *command_acl,
633 const char *command_dir)
634{
635 if (command_acl) {
636 if (!ofproto->executer) {
637 return executer_create(command_acl, command_dir,
638 &ofproto->executer);
639 } else {
640 executer_set_acl(ofproto->executer, command_acl, command_dir);
641 }
642 } else {
643 executer_destroy(ofproto->executer);
644 ofproto->executer = NULL;
645 }
646 return 0;
647}
648
649uint64_t
650ofproto_get_datapath_id(const struct ofproto *ofproto)
651{
652 return ofproto->datapath_id;
653}
654
3b250e29
JP
655uint64_t
656ofproto_get_mgmt_id(const struct ofproto *ofproto)
657{
658 return ofproto->mgmt_id;
659}
660
064af421
BP
661int
662ofproto_get_probe_interval(const struct ofproto *ofproto)
663{
664 return rconn_get_probe_interval(ofproto->controller->rconn);
665}
666
667int
668ofproto_get_max_backoff(const struct ofproto *ofproto)
669{
670 return rconn_get_max_backoff(ofproto->controller->rconn);
671}
672
673bool
674ofproto_get_in_band(const struct ofproto *ofproto)
675{
676 return ofproto->in_band != NULL;
677}
678
679bool
680ofproto_get_discovery(const struct ofproto *ofproto)
681{
682 return ofproto->discovery != NULL;
683}
684
685const char *
686ofproto_get_controller(const struct ofproto *ofproto)
687{
688 return rconn_get_name(ofproto->controller->rconn);
689}
690
691void
692ofproto_get_listeners(const struct ofproto *ofproto, struct svec *listeners)
693{
694 size_t i;
695
696 for (i = 0; i < ofproto->n_listeners; i++) {
697 svec_add(listeners, pvconn_get_name(ofproto->listeners[i]));
698 }
699}
700
701void
702ofproto_get_snoops(const struct ofproto *ofproto, struct svec *snoops)
703{
704 size_t i;
705
706 for (i = 0; i < ofproto->n_snoops; i++) {
707 svec_add(snoops, pvconn_get_name(ofproto->snoops[i]));
708 }
709}
710
711void
712ofproto_destroy(struct ofproto *p)
713{
714 struct ofconn *ofconn, *next_ofconn;
715 struct ofport *ofport;
716 unsigned int port_no;
717 size_t i;
718
719 if (!p) {
720 return;
721 }
722
723 ofproto_flush_flows(p);
724 classifier_destroy(&p->cls);
725
726 LIST_FOR_EACH_SAFE (ofconn, next_ofconn, struct ofconn, node,
727 &p->all_conns) {
728 ofconn_destroy(ofconn, p);
729 }
730
c228a364 731 dpif_close(p->dpif);
e9e28be3 732 netdev_monitor_destroy(p->netdev_monitor);
064af421
BP
733 PORT_ARRAY_FOR_EACH (ofport, &p->ports, port_no) {
734 ofport_free(ofport);
735 }
736 shash_destroy(&p->port_by_name);
737
738 switch_status_destroy(p->switch_status);
739 in_band_destroy(p->in_band);
740 discovery_destroy(p->discovery);
741 fail_open_destroy(p->fail_open);
742 pinsched_destroy(p->miss_sched);
743 pinsched_destroy(p->action_sched);
744 executer_destroy(p->executer);
745 netflow_destroy(p->netflow);
72b06300 746 ofproto_sflow_destroy(p->sflow);
064af421
BP
747
748 switch_status_unregister(p->ss_cat);
749
750 for (i = 0; i < p->n_listeners; i++) {
751 pvconn_close(p->listeners[i]);
752 }
753 free(p->listeners);
754
755 for (i = 0; i < p->n_snoops; i++) {
756 pvconn_close(p->snoops[i]);
757 }
758 free(p->snoops);
759
760 mac_learning_destroy(p->ml);
761
762 free(p);
763}
764
765int
766ofproto_run(struct ofproto *p)
767{
768 int error = ofproto_run1(p);
769 if (!error) {
770 error = ofproto_run2(p, false);
771 }
772 return error;
773}
774
e9e28be3
BP
775static void
776process_port_change(struct ofproto *ofproto, int error, char *devname)
777{
778 if (error == ENOBUFS) {
779 reinit_ports(ofproto);
780 } else if (!error) {
781 update_port(ofproto, devname);
782 free(devname);
783 }
784}
785
064af421
BP
786int
787ofproto_run1(struct ofproto *p)
788{
789 struct ofconn *ofconn, *next_ofconn;
790 char *devname;
791 int error;
792 int i;
793
794 for (i = 0; i < 50; i++) {
795 struct ofpbuf *buf;
796 int error;
797
c228a364 798 error = dpif_recv(p->dpif, &buf);
064af421
BP
799 if (error) {
800 if (error == ENODEV) {
801 /* Someone destroyed the datapath behind our back. The caller
802 * better destroy us and give up, because we're just going to
803 * spin from here on out. */
804 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
b29ba128 805 VLOG_ERR_RL(&rl, "%s: datapath was destroyed externally",
c228a364 806 dpif_name(p->dpif));
064af421
BP
807 return ENODEV;
808 }
809 break;
810 }
811
812 handle_odp_msg(p, buf);
813 }
814
e9e28be3
BP
815 while ((error = dpif_port_poll(p->dpif, &devname)) != EAGAIN) {
816 process_port_change(p, error, devname);
817 }
818 while ((error = netdev_monitor_poll(p->netdev_monitor,
819 &devname)) != EAGAIN) {
820 process_port_change(p, error, devname);
064af421
BP
821 }
822
823 if (p->in_band) {
824 in_band_run(p->in_band);
825 }
826 if (p->discovery) {
827 char *controller_name;
828 if (rconn_is_connectivity_questionable(p->controller->rconn)) {
829 discovery_question_connectivity(p->discovery);
830 }
831 if (discovery_run(p->discovery, &controller_name)) {
832 if (controller_name) {
833 rconn_connect(p->controller->rconn, controller_name);
834 } else {
835 rconn_disconnect(p->controller->rconn);
836 }
837 }
838 }
064af421
BP
839 pinsched_run(p->miss_sched, send_packet_in_miss, p);
840 pinsched_run(p->action_sched, send_packet_in_action, p);
841 if (p->executer) {
842 executer_run(p->executer);
843 }
844
845 LIST_FOR_EACH_SAFE (ofconn, next_ofconn, struct ofconn, node,
846 &p->all_conns) {
847 ofconn_run(ofconn, p);
848 }
849
7778bd15
BP
850 /* Fail-open maintenance. Do this after processing the ofconns since
851 * fail-open checks the status of the controller rconn. */
852 if (p->fail_open) {
853 fail_open_run(p->fail_open);
854 }
855
064af421
BP
856 for (i = 0; i < p->n_listeners; i++) {
857 struct vconn *vconn;
858 int retval;
859
860 retval = pvconn_accept(p->listeners[i], OFP_VERSION, &vconn);
861 if (!retval) {
862 ofconn_create(p, rconn_new_from_vconn("passive", vconn));
863 } else if (retval != EAGAIN) {
864 VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval));
865 }
866 }
867
868 for (i = 0; i < p->n_snoops; i++) {
869 struct vconn *vconn;
870 int retval;
871
872 retval = pvconn_accept(p->snoops[i], OFP_VERSION, &vconn);
873 if (!retval) {
874 rconn_add_monitor(p->controller->rconn, vconn);
875 } else if (retval != EAGAIN) {
876 VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval));
877 }
878 }
879
880 if (time_msec() >= p->next_expiration) {
881 COVERAGE_INC(ofproto_expiration);
882 p->next_expiration = time_msec() + 1000;
883 update_used(p);
884
885 classifier_for_each(&p->cls, CLS_INC_ALL, expire_rule, p);
886
887 /* Let the hook know that we're at a stable point: all outstanding data
888 * in existing flows has been accounted to the account_cb. Thus, the
889 * hook can now reasonably do operations that depend on having accurate
890 * flow volume accounting (currently, that's just bond rebalancing). */
891 if (p->ofhooks->account_checkpoint_cb) {
892 p->ofhooks->account_checkpoint_cb(p->aux);
893 }
894 }
895
896 if (p->netflow) {
897 netflow_run(p->netflow);
898 }
72b06300
BP
899 if (p->sflow) {
900 ofproto_sflow_run(p->sflow);
901 }
064af421
BP
902
903 return 0;
904}
905
906struct revalidate_cbdata {
907 struct ofproto *ofproto;
908 bool revalidate_all; /* Revalidate all exact-match rules? */
909 bool revalidate_subrules; /* Revalidate all exact-match subrules? */
910 struct tag_set revalidate_set; /* Set of tags to revalidate. */
911};
912
913int
914ofproto_run2(struct ofproto *p, bool revalidate_all)
915{
916 if (p->need_revalidate || revalidate_all
917 || !tag_set_is_empty(&p->revalidate_set)) {
918 struct revalidate_cbdata cbdata;
919 cbdata.ofproto = p;
920 cbdata.revalidate_all = revalidate_all;
921 cbdata.revalidate_subrules = p->need_revalidate;
922 cbdata.revalidate_set = p->revalidate_set;
923 tag_set_init(&p->revalidate_set);
924 COVERAGE_INC(ofproto_revalidate);
925 classifier_for_each(&p->cls, CLS_INC_EXACT, revalidate_cb, &cbdata);
926 p->need_revalidate = false;
927 }
928
929 return 0;
930}
931
932void
933ofproto_wait(struct ofproto *p)
934{
935 struct ofconn *ofconn;
936 size_t i;
937
c228a364 938 dpif_recv_wait(p->dpif);
e9e28be3
BP
939 dpif_port_poll_wait(p->dpif);
940 netdev_monitor_poll_wait(p->netdev_monitor);
064af421
BP
941 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
942 ofconn_wait(ofconn);
943 }
944 if (p->in_band) {
945 in_band_wait(p->in_band);
946 }
947 if (p->discovery) {
948 discovery_wait(p->discovery);
949 }
950 if (p->fail_open) {
951 fail_open_wait(p->fail_open);
952 }
953 pinsched_wait(p->miss_sched);
954 pinsched_wait(p->action_sched);
955 if (p->executer) {
956 executer_wait(p->executer);
957 }
72b06300
BP
958 if (p->sflow) {
959 ofproto_sflow_wait(p->sflow);
960 }
064af421
BP
961 if (!tag_set_is_empty(&p->revalidate_set)) {
962 poll_immediate_wake();
963 }
964 if (p->need_revalidate) {
965 /* Shouldn't happen, but if it does just go around again. */
966 VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
967 poll_immediate_wake();
968 } else if (p->next_expiration != LLONG_MAX) {
969 poll_timer_wait(p->next_expiration - time_msec());
970 }
971 for (i = 0; i < p->n_listeners; i++) {
972 pvconn_wait(p->listeners[i]);
973 }
974 for (i = 0; i < p->n_snoops; i++) {
975 pvconn_wait(p->snoops[i]);
976 }
977}
978
979void
980ofproto_revalidate(struct ofproto *ofproto, tag_type tag)
981{
982 tag_set_add(&ofproto->revalidate_set, tag);
983}
984
985struct tag_set *
986ofproto_get_revalidate_set(struct ofproto *ofproto)
987{
988 return &ofproto->revalidate_set;
989}
990
991bool
992ofproto_is_alive(const struct ofproto *p)
993{
994 return p->discovery || rconn_is_alive(p->controller->rconn);
995}
996
997int
998ofproto_send_packet(struct ofproto *p, const flow_t *flow,
999 const union ofp_action *actions, size_t n_actions,
1000 const struct ofpbuf *packet)
1001{
1002 struct odp_actions odp_actions;
1003 int error;
1004
1005 error = xlate_actions(actions, n_actions, flow, p, packet, &odp_actions,
6a07af36 1006 NULL, NULL, NULL);
064af421
BP
1007 if (error) {
1008 return error;
1009 }
1010
1011 /* XXX Should we translate the dpif_execute() errno value into an OpenFlow
1012 * error code? */
c228a364 1013 dpif_execute(p->dpif, flow->in_port, odp_actions.actions,
064af421
BP
1014 odp_actions.n_actions, packet);
1015 return 0;
1016}
1017
1018void
1019ofproto_add_flow(struct ofproto *p,
1020 const flow_t *flow, uint32_t wildcards, unsigned int priority,
1021 const union ofp_action *actions, size_t n_actions,
1022 int idle_timeout)
1023{
1024 struct rule *rule;
0193b2af 1025 rule = rule_create(p, NULL, actions, n_actions,
064af421
BP
1026 idle_timeout >= 0 ? idle_timeout : 5 /* XXX */, 0);
1027 cls_rule_from_flow(&rule->cr, flow, wildcards, priority);
1028 rule_insert(p, rule, NULL, 0);
1029}
1030
1031void
1032ofproto_delete_flow(struct ofproto *ofproto, const flow_t *flow,
1033 uint32_t wildcards, unsigned int priority)
1034{
1035 struct rule *rule;
1036
1037 rule = rule_from_cls_rule(classifier_find_rule_exactly(&ofproto->cls,
1038 flow, wildcards,
1039 priority));
1040 if (rule) {
1041 rule_remove(ofproto, rule);
1042 }
1043}
1044
1045static void
1046destroy_rule(struct cls_rule *rule_, void *ofproto_)
1047{
1048 struct rule *rule = rule_from_cls_rule(rule_);
1049 struct ofproto *ofproto = ofproto_;
1050
1051 /* Mark the flow as not installed, even though it might really be
1052 * installed, so that rule_remove() doesn't bother trying to uninstall it.
1053 * There is no point in uninstalling it individually since we are about to
1054 * blow away all the flows with dpif_flow_flush(). */
1055 rule->installed = false;
1056
1057 rule_remove(ofproto, rule);
1058}
1059
1060void
1061ofproto_flush_flows(struct ofproto *ofproto)
1062{
1063 COVERAGE_INC(ofproto_flush);
1064 classifier_for_each(&ofproto->cls, CLS_INC_ALL, destroy_rule, ofproto);
c228a364 1065 dpif_flow_flush(ofproto->dpif);
064af421
BP
1066 if (ofproto->in_band) {
1067 in_band_flushed(ofproto->in_band);
1068 }
1069 if (ofproto->fail_open) {
1070 fail_open_flushed(ofproto->fail_open);
1071 }
1072}
1073\f
1074static void
1075reinit_ports(struct ofproto *p)
1076{
1077 struct svec devnames;
1078 struct ofport *ofport;
1079 unsigned int port_no;
1080 struct odp_port *odp_ports;
1081 size_t n_odp_ports;
1082 size_t i;
1083
1084 svec_init(&devnames);
1085 PORT_ARRAY_FOR_EACH (ofport, &p->ports, port_no) {
1086 svec_add (&devnames, (char *) ofport->opp.name);
1087 }
c228a364 1088 dpif_port_list(p->dpif, &odp_ports, &n_odp_ports);
064af421
BP
1089 for (i = 0; i < n_odp_ports; i++) {
1090 svec_add (&devnames, odp_ports[i].devname);
1091 }
1092 free(odp_ports);
1093
1094 svec_sort_unique(&devnames);
1095 for (i = 0; i < devnames.n; i++) {
1096 update_port(p, devnames.names[i]);
1097 }
1098 svec_destroy(&devnames);
1099}
1100
72b06300 1101static size_t
064af421
BP
1102refresh_port_group(struct ofproto *p, unsigned int group)
1103{
1104 uint16_t *ports;
1105 size_t n_ports;
1106 struct ofport *port;
1107 unsigned int port_no;
1108
1109 assert(group == DP_GROUP_ALL || group == DP_GROUP_FLOOD);
1110
1111 ports = xmalloc(port_array_count(&p->ports) * sizeof *ports);
1112 n_ports = 0;
1113 PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) {
1114 if (group == DP_GROUP_ALL || !(port->opp.config & OFPPC_NO_FLOOD)) {
1115 ports[n_ports++] = port_no;
1116 }
1117 }
c228a364 1118 dpif_port_group_set(p->dpif, group, ports, n_ports);
064af421 1119 free(ports);
72b06300
BP
1120
1121 return n_ports;
064af421
BP
1122}
1123
1124static void
1125refresh_port_groups(struct ofproto *p)
1126{
72b06300
BP
1127 size_t n_flood = refresh_port_group(p, DP_GROUP_FLOOD);
1128 size_t n_all = refresh_port_group(p, DP_GROUP_ALL);
1129 if (p->sflow) {
1130 ofproto_sflow_set_group_sizes(p->sflow, n_flood, n_all);
1131 }
064af421
BP
1132}
1133
1134static struct ofport *
1135make_ofport(const struct odp_port *odp_port)
1136{
1137 enum netdev_flags flags;
1138 struct ofport *ofport;
1139 struct netdev *netdev;
1140 bool carrier;
1141 int error;
1142
1143 error = netdev_open(odp_port->devname, NETDEV_ETH_TYPE_NONE, &netdev);
1144 if (error) {
1145 VLOG_WARN_RL(&rl, "ignoring port %s (%"PRIu16") because netdev %s "
1146 "cannot be opened (%s)",
1147 odp_port->devname, odp_port->port,
1148 odp_port->devname, strerror(error));
1149 return NULL;
1150 }
1151
1152 ofport = xmalloc(sizeof *ofport);
1153 ofport->netdev = netdev;
1154 ofport->opp.port_no = odp_port_to_ofp_port(odp_port->port);
80992a35 1155 netdev_get_etheraddr(netdev, ofport->opp.hw_addr);
064af421
BP
1156 memcpy(ofport->opp.name, odp_port->devname,
1157 MIN(sizeof ofport->opp.name, sizeof odp_port->devname));
1158 ofport->opp.name[sizeof ofport->opp.name - 1] = '\0';
1159
1160 netdev_get_flags(netdev, &flags);
1161 ofport->opp.config = flags & NETDEV_UP ? 0 : OFPPC_PORT_DOWN;
1162
1163 netdev_get_carrier(netdev, &carrier);
1164 ofport->opp.state = carrier ? 0 : OFPPS_LINK_DOWN;
1165
1166 netdev_get_features(netdev,
1167 &ofport->opp.curr, &ofport->opp.advertised,
1168 &ofport->opp.supported, &ofport->opp.peer);
1169 return ofport;
1170}
1171
1172static bool
1173ofport_conflicts(const struct ofproto *p, const struct odp_port *odp_port)
1174{
1175 if (port_array_get(&p->ports, odp_port->port)) {
1176 VLOG_WARN_RL(&rl, "ignoring duplicate port %"PRIu16" in datapath",
1177 odp_port->port);
1178 return true;
1179 } else if (shash_find(&p->port_by_name, odp_port->devname)) {
1180 VLOG_WARN_RL(&rl, "ignoring duplicate device %s in datapath",
1181 odp_port->devname);
1182 return true;
1183 } else {
1184 return false;
1185 }
1186}
1187
1188static int
1189ofport_equal(const struct ofport *a_, const struct ofport *b_)
1190{
1191 const struct ofp_phy_port *a = &a_->opp;
1192 const struct ofp_phy_port *b = &b_->opp;
1193
1194 BUILD_ASSERT_DECL(sizeof *a == 48); /* Detect ofp_phy_port changes. */
1195 return (a->port_no == b->port_no
1196 && !memcmp(a->hw_addr, b->hw_addr, sizeof a->hw_addr)
1197 && !strcmp((char *) a->name, (char *) b->name)
1198 && a->state == b->state
1199 && a->config == b->config
1200 && a->curr == b->curr
1201 && a->advertised == b->advertised
1202 && a->supported == b->supported
1203 && a->peer == b->peer);
1204}
1205
1206static void
1207send_port_status(struct ofproto *p, const struct ofport *ofport,
1208 uint8_t reason)
1209{
1210 /* XXX Should limit the number of queued port status change messages. */
1211 struct ofconn *ofconn;
1212 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
1213 struct ofp_port_status *ops;
1214 struct ofpbuf *b;
1215
1216 ops = make_openflow_xid(sizeof *ops, OFPT_PORT_STATUS, 0, &b);
1217 ops->reason = reason;
1218 ops->desc = ofport->opp;
1219 hton_ofp_phy_port(&ops->desc);
1220 queue_tx(b, ofconn, NULL);
1221 }
1222 if (p->ofhooks->port_changed_cb) {
1223 p->ofhooks->port_changed_cb(reason, &ofport->opp, p->aux);
1224 }
1225}
1226
1227static void
1228ofport_install(struct ofproto *p, struct ofport *ofport)
1229{
72b06300
BP
1230 uint16_t odp_port = ofp_port_to_odp_port(ofport->opp.port_no);
1231 const char *netdev_name = (const char *) ofport->opp.name;
1232
e9e28be3 1233 netdev_monitor_add(p->netdev_monitor, ofport->netdev);
72b06300
BP
1234 port_array_set(&p->ports, odp_port, ofport);
1235 shash_add(&p->port_by_name, netdev_name, ofport);
1236 if (p->sflow) {
1237 ofproto_sflow_add_port(p->sflow, odp_port, netdev_name);
1238 }
064af421
BP
1239}
1240
1241static void
1242ofport_remove(struct ofproto *p, struct ofport *ofport)
1243{
72b06300
BP
1244 uint16_t odp_port = ofp_port_to_odp_port(ofport->opp.port_no);
1245
e9e28be3 1246 netdev_monitor_remove(p->netdev_monitor, ofport->netdev);
72b06300 1247 port_array_set(&p->ports, odp_port, NULL);
064af421
BP
1248 shash_delete(&p->port_by_name,
1249 shash_find(&p->port_by_name, (char *) ofport->opp.name));
72b06300
BP
1250 if (p->sflow) {
1251 ofproto_sflow_del_port(p->sflow, odp_port);
1252 }
064af421
BP
1253}
1254
1255static void
1256ofport_free(struct ofport *ofport)
1257{
1258 if (ofport) {
1259 netdev_close(ofport->netdev);
1260 free(ofport);
1261 }
1262}
1263
1264static void
1265update_port(struct ofproto *p, const char *devname)
1266{
1267 struct odp_port odp_port;
c874dc6d
BP
1268 struct ofport *old_ofport;
1269 struct ofport *new_ofport;
064af421
BP
1270 int error;
1271
1272 COVERAGE_INC(ofproto_update_port);
c874dc6d
BP
1273
1274 /* Query the datapath for port information. */
c228a364 1275 error = dpif_port_query_by_name(p->dpif, devname, &odp_port);
064af421 1276
c874dc6d
BP
1277 /* Find the old ofport. */
1278 old_ofport = shash_find_data(&p->port_by_name, devname);
1279 if (!error) {
1280 if (!old_ofport) {
1281 /* There's no port named 'devname' but there might be a port with
1282 * the same port number. This could happen if a port is deleted
1283 * and then a new one added in its place very quickly, or if a port
1284 * is renamed. In the former case we want to send an OFPPR_DELETE
1285 * and an OFPPR_ADD, and in the latter case we want to send a
1286 * single OFPPR_MODIFY. We can distinguish the cases by comparing
1287 * the old port's ifindex against the new port, or perhaps less
1288 * reliably but more portably by comparing the old port's MAC
1289 * against the new port's MAC. However, this code isn't that smart
1290 * and always sends an OFPPR_MODIFY (XXX). */
1291 old_ofport = port_array_get(&p->ports, odp_port.port);
064af421 1292 }
c874dc6d 1293 } else if (error != ENOENT && error != ENODEV) {
064af421
BP
1294 VLOG_WARN_RL(&rl, "dpif_port_query_by_name returned unexpected error "
1295 "%s", strerror(error));
1296 return;
1297 }
c874dc6d
BP
1298
1299 /* Create a new ofport. */
1300 new_ofport = !error ? make_ofport(&odp_port) : NULL;
1301
1302 /* Eliminate a few pathological cases. */
1303 if (!old_ofport && !new_ofport) {
1304 return;
1305 } else if (old_ofport && new_ofport) {
1306 /* Most of the 'config' bits are OpenFlow soft state, but
1307 * OFPPC_PORT_DOWN is maintained the kernel. So transfer the OpenFlow
1308 * bits from old_ofport. (make_ofport() only sets OFPPC_PORT_DOWN and
1309 * leaves the other bits 0.) */
1310 new_ofport->opp.config |= old_ofport->opp.config & ~OFPPC_PORT_DOWN;
1311
1312 if (ofport_equal(old_ofport, new_ofport)) {
1313 /* False alarm--no change. */
1314 ofport_free(new_ofport);
1315 return;
1316 }
1317 }
1318
1319 /* Now deal with the normal cases. */
1320 if (old_ofport) {
1321 ofport_remove(p, old_ofport);
1322 }
1323 if (new_ofport) {
1324 ofport_install(p, new_ofport);
1325 }
1326 send_port_status(p, new_ofport ? new_ofport : old_ofport,
1327 (!old_ofport ? OFPPR_ADD
1328 : !new_ofport ? OFPPR_DELETE
1329 : OFPPR_MODIFY));
1330 ofport_free(old_ofport);
1331
1332 /* Update port groups. */
064af421
BP
1333 refresh_port_groups(p);
1334}
1335
1336static int
1337init_ports(struct ofproto *p)
1338{
1339 struct odp_port *ports;
1340 size_t n_ports;
1341 size_t i;
1342 int error;
1343
c228a364 1344 error = dpif_port_list(p->dpif, &ports, &n_ports);
064af421
BP
1345 if (error) {
1346 return error;
1347 }
1348
1349 for (i = 0; i < n_ports; i++) {
1350 const struct odp_port *odp_port = &ports[i];
1351 if (!ofport_conflicts(p, odp_port)) {
1352 struct ofport *ofport = make_ofport(odp_port);
1353 if (ofport) {
1354 ofport_install(p, ofport);
1355 }
1356 }
1357 }
1358 free(ports);
1359 refresh_port_groups(p);
1360 return 0;
1361}
1362\f
1363static struct ofconn *
1364ofconn_create(struct ofproto *p, struct rconn *rconn)
1365{
1366 struct ofconn *ofconn = xmalloc(sizeof *ofconn);
1367 list_push_back(&p->all_conns, &ofconn->node);
1368 ofconn->rconn = rconn;
1369 ofconn->pktbuf = NULL;
1370 ofconn->send_flow_exp = false;
1371 ofconn->miss_send_len = 0;
1372 ofconn->packet_in_counter = rconn_packet_counter_create ();
1373 ofconn->reply_counter = rconn_packet_counter_create ();
1374 return ofconn;
1375}
1376
1377static void
1378ofconn_destroy(struct ofconn *ofconn, struct ofproto *p)
1379{
1380 if (p->executer) {
1381 executer_rconn_closing(p->executer, ofconn->rconn);
1382 }
1383
1384 list_remove(&ofconn->node);
1385 rconn_destroy(ofconn->rconn);
1386 rconn_packet_counter_destroy(ofconn->packet_in_counter);
1387 rconn_packet_counter_destroy(ofconn->reply_counter);
1388 pktbuf_destroy(ofconn->pktbuf);
1389 free(ofconn);
1390}
1391
1392static void
1393ofconn_run(struct ofconn *ofconn, struct ofproto *p)
1394{
1395 int iteration;
1396
1397 rconn_run(ofconn->rconn);
1398
1399 if (rconn_packet_counter_read (ofconn->reply_counter) < OFCONN_REPLY_MAX) {
1400 /* Limit the number of iterations to prevent other tasks from
1401 * starving. */
1402 for (iteration = 0; iteration < 50; iteration++) {
1403 struct ofpbuf *of_msg = rconn_recv(ofconn->rconn);
1404 if (!of_msg) {
1405 break;
1406 }
7778bd15
BP
1407 if (p->fail_open) {
1408 fail_open_maybe_recover(p->fail_open);
1409 }
064af421
BP
1410 handle_openflow(ofconn, p, of_msg);
1411 ofpbuf_delete(of_msg);
1412 }
1413 }
1414
1415 if (ofconn != p->controller && !rconn_is_alive(ofconn->rconn)) {
1416 ofconn_destroy(ofconn, p);
1417 }
1418}
1419
1420static void
1421ofconn_wait(struct ofconn *ofconn)
1422{
1423 rconn_run_wait(ofconn->rconn);
1424 if (rconn_packet_counter_read (ofconn->reply_counter) < OFCONN_REPLY_MAX) {
1425 rconn_recv_wait(ofconn->rconn);
1426 } else {
1427 COVERAGE_INC(ofproto_ofconn_stuck);
1428 }
1429}
1430\f
1431/* Caller is responsible for initializing the 'cr' member of the returned
1432 * rule. */
1433static struct rule *
0193b2af 1434rule_create(struct ofproto *ofproto, struct rule *super,
064af421
BP
1435 const union ofp_action *actions, size_t n_actions,
1436 uint16_t idle_timeout, uint16_t hard_timeout)
1437{
1438 struct rule *rule = xcalloc(1, sizeof *rule);
1439 rule->idle_timeout = idle_timeout;
1440 rule->hard_timeout = hard_timeout;
1441 rule->used = rule->created = time_msec();
1442 rule->super = super;
1443 if (super) {
1444 list_push_back(&super->list, &rule->list);
1445 } else {
1446 list_init(&rule->list);
1447 }
1448 rule->n_actions = n_actions;
1449 rule->actions = xmemdup(actions, n_actions * sizeof *actions);
0193b2af
JG
1450 netflow_flow_clear(&rule->nf_flow);
1451 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->created);
1452
064af421
BP
1453 return rule;
1454}
1455
1456static struct rule *
1457rule_from_cls_rule(const struct cls_rule *cls_rule)
1458{
1459 return cls_rule ? CONTAINER_OF(cls_rule, struct rule, cr) : NULL;
1460}
1461
1462static void
1463rule_free(struct rule *rule)
1464{
1465 free(rule->actions);
1466 free(rule->odp_actions);
1467 free(rule);
1468}
1469
1470/* Destroys 'rule'. If 'rule' is a subrule, also removes it from its
1471 * super-rule's list of subrules. If 'rule' is a super-rule, also iterates
1472 * through all of its subrules and revalidates them, destroying any that no
1473 * longer has a super-rule (which is probably all of them).
1474 *
1475 * Before calling this function, the caller must make have removed 'rule' from
1476 * the classifier. If 'rule' is an exact-match rule, the caller is also
1477 * responsible for ensuring that it has been uninstalled from the datapath. */
1478static void
1479rule_destroy(struct ofproto *ofproto, struct rule *rule)
1480{
1481 if (!rule->super) {
1482 struct rule *subrule, *next;
1483 LIST_FOR_EACH_SAFE (subrule, next, struct rule, list, &rule->list) {
1484 revalidate_rule(ofproto, subrule);
1485 }
1486 } else {
1487 list_remove(&rule->list);
1488 }
1489 rule_free(rule);
1490}
1491
1492static bool
1493rule_has_out_port(const struct rule *rule, uint16_t out_port)
1494{
1495 const union ofp_action *oa;
1496 struct actions_iterator i;
1497
1498 if (out_port == htons(OFPP_NONE)) {
1499 return true;
1500 }
1501 for (oa = actions_first(&i, rule->actions, rule->n_actions); oa;
1502 oa = actions_next(&i)) {
1503 if (oa->type == htons(OFPAT_OUTPUT) && oa->output.port == out_port) {
1504 return true;
1505 }
1506 }
1507 return false;
1508}
1509
1510/* Executes the actions indicated by 'rule' on 'packet', which is in flow
1511 * 'flow' and is considered to have arrived on ODP port 'in_port'.
1512 *
1513 * The flow that 'packet' actually contains does not need to actually match
1514 * 'rule'; the actions in 'rule' will be applied to it either way. Likewise,
1515 * the packet and byte counters for 'rule' will be credited for the packet sent
1516 * out whether or not the packet actually matches 'rule'.
1517 *
1518 * If 'rule' is an exact-match rule and 'flow' actually equals the rule's flow,
1519 * the caller must already have accurately composed ODP actions for it given
1520 * 'packet' using rule_make_actions(). If 'rule' is a wildcard rule, or if
1521 * 'rule' is an exact-match rule but 'flow' is not the rule's flow, then this
1522 * function will compose a set of ODP actions based on 'rule''s OpenFlow
1523 * actions and apply them to 'packet'. */
1524static void
1525rule_execute(struct ofproto *ofproto, struct rule *rule,
1526 struct ofpbuf *packet, const flow_t *flow)
1527{
1528 const union odp_action *actions;
1529 size_t n_actions;
1530 struct odp_actions a;
1531
1532 /* Grab or compose the ODP actions.
1533 *
1534 * The special case for an exact-match 'rule' where 'flow' is not the
1535 * rule's flow is important to avoid, e.g., sending a packet out its input
1536 * port simply because the ODP actions were composed for the wrong
1537 * scenario. */
1538 if (rule->cr.wc.wildcards || !flow_equal(flow, &rule->cr.flow)) {
1539 struct rule *super = rule->super ? rule->super : rule;
1540 if (xlate_actions(super->actions, super->n_actions, flow, ofproto,
6a07af36 1541 packet, &a, NULL, 0, NULL)) {
064af421
BP
1542 return;
1543 }
1544 actions = a.actions;
1545 n_actions = a.n_actions;
1546 } else {
1547 actions = rule->odp_actions;
1548 n_actions = rule->n_odp_actions;
1549 }
1550
1551 /* Execute the ODP actions. */
c228a364 1552 if (!dpif_execute(ofproto->dpif, flow->in_port,
064af421
BP
1553 actions, n_actions, packet)) {
1554 struct odp_flow_stats stats;
1555 flow_extract_stats(flow, packet, &stats);
0193b2af 1556 update_stats(ofproto, rule, &stats);
064af421 1557 rule->used = time_msec();
0193b2af 1558 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->used);
064af421
BP
1559 }
1560}
1561
1562static void
1563rule_insert(struct ofproto *p, struct rule *rule, struct ofpbuf *packet,
1564 uint16_t in_port)
1565{
1566 struct rule *displaced_rule;
1567
1568 /* Insert the rule in the classifier. */
1569 displaced_rule = rule_from_cls_rule(classifier_insert(&p->cls, &rule->cr));
1570 if (!rule->cr.wc.wildcards) {
1571 rule_make_actions(p, rule, packet);
1572 }
1573
1574 /* Send the packet and credit it to the rule. */
1575 if (packet) {
1576 flow_t flow;
1577 flow_extract(packet, in_port, &flow);
1578 rule_execute(p, rule, packet, &flow);
1579 }
1580
1581 /* Install the rule in the datapath only after sending the packet, to
1582 * avoid packet reordering. */
1583 if (rule->cr.wc.wildcards) {
1584 COVERAGE_INC(ofproto_add_wc_flow);
1585 p->need_revalidate = true;
1586 } else {
1587 rule_install(p, rule, displaced_rule);
1588 }
1589
1590 /* Free the rule that was displaced, if any. */
1591 if (displaced_rule) {
1592 rule_destroy(p, displaced_rule);
1593 }
1594}
1595
1596static struct rule *
1597rule_create_subrule(struct ofproto *ofproto, struct rule *rule,
1598 const flow_t *flow)
1599{
0193b2af 1600 struct rule *subrule = rule_create(ofproto, rule, NULL, 0,
064af421
BP
1601 rule->idle_timeout, rule->hard_timeout);
1602 COVERAGE_INC(ofproto_subrule_create);
1603 cls_rule_from_flow(&subrule->cr, flow, 0,
1604 (rule->cr.priority <= UINT16_MAX ? UINT16_MAX
1605 : rule->cr.priority));
1606 classifier_insert_exact(&ofproto->cls, &subrule->cr);
1607
1608 return subrule;
1609}
1610
1611static void
1612rule_remove(struct ofproto *ofproto, struct rule *rule)
1613{
1614 if (rule->cr.wc.wildcards) {
1615 COVERAGE_INC(ofproto_del_wc_flow);
1616 ofproto->need_revalidate = true;
1617 } else {
1618 rule_uninstall(ofproto, rule);
1619 }
1620 classifier_remove(&ofproto->cls, &rule->cr);
1621 rule_destroy(ofproto, rule);
1622}
1623
1624/* Returns true if the actions changed, false otherwise. */
1625static bool
1626rule_make_actions(struct ofproto *p, struct rule *rule,
1627 const struct ofpbuf *packet)
1628{
1629 const struct rule *super;
1630 struct odp_actions a;
1631 size_t actions_len;
1632
1633 assert(!rule->cr.wc.wildcards);
1634
1635 super = rule->super ? rule->super : rule;
1636 rule->tags = 0;
1637 xlate_actions(super->actions, super->n_actions, &rule->cr.flow, p,
6a07af36 1638 packet, &a, &rule->tags, &rule->may_install,
0193b2af 1639 &rule->nf_flow.output_iface);
064af421
BP
1640
1641 actions_len = a.n_actions * sizeof *a.actions;
1642 if (rule->n_odp_actions != a.n_actions
1643 || memcmp(rule->odp_actions, a.actions, actions_len)) {
1644 COVERAGE_INC(ofproto_odp_unchanged);
1645 free(rule->odp_actions);
1646 rule->n_odp_actions = a.n_actions;
1647 rule->odp_actions = xmemdup(a.actions, actions_len);
1648 return true;
1649 } else {
1650 return false;
1651 }
1652}
1653
1654static int
1655do_put_flow(struct ofproto *ofproto, struct rule *rule, int flags,
1656 struct odp_flow_put *put)
1657{
1658 memset(&put->flow.stats, 0, sizeof put->flow.stats);
1659 put->flow.key = rule->cr.flow;
1660 put->flow.actions = rule->odp_actions;
1661 put->flow.n_actions = rule->n_odp_actions;
1662 put->flags = flags;
c228a364 1663 return dpif_flow_put(ofproto->dpif, put);
064af421
BP
1664}
1665
1666static void
1667rule_install(struct ofproto *p, struct rule *rule, struct rule *displaced_rule)
1668{
1669 assert(!rule->cr.wc.wildcards);
1670
1671 if (rule->may_install) {
1672 struct odp_flow_put put;
1673 if (!do_put_flow(p, rule,
1674 ODPPF_CREATE | ODPPF_MODIFY | ODPPF_ZERO_STATS,
1675 &put)) {
1676 rule->installed = true;
1677 if (displaced_rule) {
14986b31 1678 update_stats(p, displaced_rule, &put.flow.stats);
064af421
BP
1679 rule_post_uninstall(p, displaced_rule);
1680 }
1681 }
1682 } else if (displaced_rule) {
1683 rule_uninstall(p, displaced_rule);
1684 }
1685}
1686
1687static void
1688rule_reinstall(struct ofproto *ofproto, struct rule *rule)
1689{
1690 if (rule->installed) {
1691 struct odp_flow_put put;
1692 COVERAGE_INC(ofproto_dp_missed);
1693 do_put_flow(ofproto, rule, ODPPF_CREATE | ODPPF_MODIFY, &put);
1694 } else {
1695 rule_install(ofproto, rule, NULL);
1696 }
1697}
1698
1699static void
1700rule_update_actions(struct ofproto *ofproto, struct rule *rule)
1701{
42c3641c
JG
1702 bool actions_changed;
1703 uint16_t new_out_iface, old_out_iface;
1704
1705 old_out_iface = rule->nf_flow.output_iface;
1706 actions_changed = rule_make_actions(ofproto, rule, NULL);
1707
064af421
BP
1708 if (rule->may_install) {
1709 if (rule->installed) {
1710 if (actions_changed) {
064af421 1711 struct odp_flow_put put;
42c3641c
JG
1712 do_put_flow(ofproto, rule, ODPPF_CREATE | ODPPF_MODIFY
1713 | ODPPF_ZERO_STATS, &put);
1714 update_stats(ofproto, rule, &put.flow.stats);
1715
1716 /* Temporarily set the old output iface so that NetFlow
1717 * messages have the correct output interface for the old
1718 * stats. */
1719 new_out_iface = rule->nf_flow.output_iface;
1720 rule->nf_flow.output_iface = old_out_iface;
1721 rule_post_uninstall(ofproto, rule);
1722 rule->nf_flow.output_iface = new_out_iface;
064af421
BP
1723 }
1724 } else {
1725 rule_install(ofproto, rule, NULL);
1726 }
1727 } else {
1728 rule_uninstall(ofproto, rule);
1729 }
1730}
1731
1732static void
1733rule_account(struct ofproto *ofproto, struct rule *rule, uint64_t extra_bytes)
1734{
1735 uint64_t total_bytes = rule->byte_count + extra_bytes;
1736
1737 if (ofproto->ofhooks->account_flow_cb
1738 && total_bytes > rule->accounted_bytes)
1739 {
1740 ofproto->ofhooks->account_flow_cb(
1741 &rule->cr.flow, rule->odp_actions, rule->n_odp_actions,
1742 total_bytes - rule->accounted_bytes, ofproto->aux);
1743 rule->accounted_bytes = total_bytes;
1744 }
1745}
1746
1747static void
1748rule_uninstall(struct ofproto *p, struct rule *rule)
1749{
1750 assert(!rule->cr.wc.wildcards);
1751 if (rule->installed) {
1752 struct odp_flow odp_flow;
1753
1754 odp_flow.key = rule->cr.flow;
1755 odp_flow.actions = NULL;
1756 odp_flow.n_actions = 0;
c228a364 1757 if (!dpif_flow_del(p->dpif, &odp_flow)) {
0193b2af 1758 update_stats(p, rule, &odp_flow.stats);
064af421
BP
1759 }
1760 rule->installed = false;
1761
1762 rule_post_uninstall(p, rule);
1763 }
1764}
1765
0193b2af
JG
1766static bool
1767is_controller_rule(struct rule *rule)
1768{
1769 /* If the only action is send to the controller then don't report
1770 * NetFlow expiration messages since it is just part of the control
1771 * logic for the network and not real traffic. */
1772
1773 if (rule && rule->super) {
1774 struct rule *super = rule->super;
1775
1776 return super->n_actions == 1 &&
1777 super->actions[0].type == htons(OFPAT_OUTPUT) &&
1778 super->actions[0].output.port == htons(OFPP_CONTROLLER);
1779 }
1780
1781 return false;
1782}
1783
064af421
BP
1784static void
1785rule_post_uninstall(struct ofproto *ofproto, struct rule *rule)
1786{
1787 struct rule *super = rule->super;
1788
1789 rule_account(ofproto, rule, 0);
6a07af36 1790
0193b2af 1791 if (ofproto->netflow && !is_controller_rule(rule)) {
064af421
BP
1792 struct ofexpired expired;
1793 expired.flow = rule->cr.flow;
1794 expired.packet_count = rule->packet_count;
1795 expired.byte_count = rule->byte_count;
1796 expired.used = rule->used;
0193b2af 1797 netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
064af421
BP
1798 }
1799 if (super) {
1800 super->packet_count += rule->packet_count;
1801 super->byte_count += rule->byte_count;
064af421 1802
0c0afbec
JG
1803 /* Reset counters to prevent double counting if the rule ever gets
1804 * reinstalled. */
1805 rule->packet_count = 0;
1806 rule->byte_count = 0;
1807 rule->accounted_bytes = 0;
0193b2af
JG
1808
1809 netflow_flow_clear(&rule->nf_flow);
0c0afbec 1810 }
064af421
BP
1811}
1812\f
1813static void
1814queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn,
1815 struct rconn_packet_counter *counter)
1816{
1817 update_openflow_length(msg);
1818 if (rconn_send(ofconn->rconn, msg, counter)) {
1819 ofpbuf_delete(msg);
1820 }
1821}
1822
1823static void
1824send_error(const struct ofconn *ofconn, const struct ofp_header *oh,
1825 int error, const void *data, size_t len)
1826{
1827 struct ofpbuf *buf;
1828 struct ofp_error_msg *oem;
1829
1830 if (!(error >> 16)) {
1831 VLOG_WARN_RL(&rl, "not sending bad error code %d to controller",
1832 error);
1833 return;
1834 }
1835
1836 COVERAGE_INC(ofproto_error);
1837 oem = make_openflow_xid(len + sizeof *oem, OFPT_ERROR,
1838 oh ? oh->xid : 0, &buf);
1839 oem->type = htons((unsigned int) error >> 16);
1840 oem->code = htons(error & 0xffff);
1841 memcpy(oem->data, data, len);
1842 queue_tx(buf, ofconn, ofconn->reply_counter);
1843}
1844
1845static void
1846send_error_oh(const struct ofconn *ofconn, const struct ofp_header *oh,
1847 int error)
1848{
1849 size_t oh_length = ntohs(oh->length);
1850 send_error(ofconn, oh, error, oh, MIN(oh_length, 64));
1851}
1852
1853static void
1854hton_ofp_phy_port(struct ofp_phy_port *opp)
1855{
1856 opp->port_no = htons(opp->port_no);
1857 opp->config = htonl(opp->config);
1858 opp->state = htonl(opp->state);
1859 opp->curr = htonl(opp->curr);
1860 opp->advertised = htonl(opp->advertised);
1861 opp->supported = htonl(opp->supported);
1862 opp->peer = htonl(opp->peer);
1863}
1864
1865static int
1866handle_echo_request(struct ofconn *ofconn, struct ofp_header *oh)
1867{
1868 struct ofp_header *rq = oh;
1869 queue_tx(make_echo_reply(rq), ofconn, ofconn->reply_counter);
1870 return 0;
1871}
1872
1873static int
1874handle_features_request(struct ofproto *p, struct ofconn *ofconn,
1875 struct ofp_header *oh)
1876{
1877 struct ofp_switch_features *osf;
1878 struct ofpbuf *buf;
1879 unsigned int port_no;
1880 struct ofport *port;
1881
1882 osf = make_openflow_xid(sizeof *osf, OFPT_FEATURES_REPLY, oh->xid, &buf);
1883 osf->datapath_id = htonll(p->datapath_id);
1884 osf->n_buffers = htonl(pktbuf_capacity());
1885 osf->n_tables = 2;
1886 osf->capabilities = htonl(OFPC_FLOW_STATS | OFPC_TABLE_STATS |
1887 OFPC_PORT_STATS | OFPC_MULTI_PHY_TX);
1888 osf->actions = htonl((1u << OFPAT_OUTPUT) |
1889 (1u << OFPAT_SET_VLAN_VID) |
1890 (1u << OFPAT_SET_VLAN_PCP) |
1891 (1u << OFPAT_STRIP_VLAN) |
1892 (1u << OFPAT_SET_DL_SRC) |
1893 (1u << OFPAT_SET_DL_DST) |
1894 (1u << OFPAT_SET_NW_SRC) |
1895 (1u << OFPAT_SET_NW_DST) |
1896 (1u << OFPAT_SET_TP_SRC) |
1897 (1u << OFPAT_SET_TP_DST));
1898
1899 PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) {
1900 hton_ofp_phy_port(ofpbuf_put(buf, &port->opp, sizeof port->opp));
1901 }
1902
1903 queue_tx(buf, ofconn, ofconn->reply_counter);
1904 return 0;
1905}
1906
1907static int
1908handle_get_config_request(struct ofproto *p, struct ofconn *ofconn,
1909 struct ofp_header *oh)
1910{
1911 struct ofpbuf *buf;
1912 struct ofp_switch_config *osc;
1913 uint16_t flags;
1914 bool drop_frags;
1915
1916 /* Figure out flags. */
c228a364 1917 dpif_get_drop_frags(p->dpif, &drop_frags);
064af421
BP
1918 flags = drop_frags ? OFPC_FRAG_DROP : OFPC_FRAG_NORMAL;
1919 if (ofconn->send_flow_exp) {
1920 flags |= OFPC_SEND_FLOW_EXP;
1921 }
1922
1923 /* Send reply. */
1924 osc = make_openflow_xid(sizeof *osc, OFPT_GET_CONFIG_REPLY, oh->xid, &buf);
1925 osc->flags = htons(flags);
1926 osc->miss_send_len = htons(ofconn->miss_send_len);
1927 queue_tx(buf, ofconn, ofconn->reply_counter);
1928
1929 return 0;
1930}
1931
1932static int
1933handle_set_config(struct ofproto *p, struct ofconn *ofconn,
1934 struct ofp_switch_config *osc)
1935{
1936 uint16_t flags;
1937 int error;
1938
1939 error = check_ofp_message(&osc->header, OFPT_SET_CONFIG, sizeof *osc);
1940 if (error) {
1941 return error;
1942 }
1943 flags = ntohs(osc->flags);
1944
1945 ofconn->send_flow_exp = (flags & OFPC_SEND_FLOW_EXP) != 0;
1946
1947 if (ofconn == p->controller) {
1948 switch (flags & OFPC_FRAG_MASK) {
1949 case OFPC_FRAG_NORMAL:
c228a364 1950 dpif_set_drop_frags(p->dpif, false);
064af421
BP
1951 break;
1952 case OFPC_FRAG_DROP:
c228a364 1953 dpif_set_drop_frags(p->dpif, true);
064af421
BP
1954 break;
1955 default:
1956 VLOG_WARN_RL(&rl, "requested bad fragment mode (flags=%"PRIx16")",
1957 osc->flags);
1958 break;
1959 }
1960 }
1961
1962 if ((ntohs(osc->miss_send_len) != 0) != (ofconn->miss_send_len != 0)) {
1963 if (ntohs(osc->miss_send_len) != 0) {
1964 ofconn->pktbuf = pktbuf_create();
1965 } else {
1966 pktbuf_destroy(ofconn->pktbuf);
1967 }
1968 }
1969
1970 ofconn->miss_send_len = ntohs(osc->miss_send_len);
1971
1972 return 0;
1973}
1974
1975static void
6a07af36
JG
1976add_output_group_action(struct odp_actions *actions, uint16_t group,
1977 uint16_t *nf_output_iface)
064af421
BP
1978{
1979 odp_actions_add(actions, ODPAT_OUTPUT_GROUP)->output_group.group = group;
6a07af36
JG
1980
1981 if (group == DP_GROUP_ALL || group == DP_GROUP_FLOOD) {
1982 *nf_output_iface = NF_OUT_FLOOD;
1983 }
064af421
BP
1984}
1985
1986static void
1987add_controller_action(struct odp_actions *actions,
1988 const struct ofp_action_output *oao)
1989{
1990 union odp_action *a = odp_actions_add(actions, ODPAT_CONTROLLER);
1991 a->controller.arg = oao->max_len ? ntohs(oao->max_len) : UINT32_MAX;
1992}
1993
1994struct action_xlate_ctx {
1995 /* Input. */
1996 const flow_t *flow; /* Flow to which these actions correspond. */
1997 int recurse; /* Recursion level, via xlate_table_action. */
1998 struct ofproto *ofproto;
1999 const struct ofpbuf *packet; /* The packet corresponding to 'flow', or a
2000 * null pointer if we are revalidating
2001 * without a packet to refer to. */
2002
2003 /* Output. */
2004 struct odp_actions *out; /* Datapath actions. */
2005 tag_type *tags; /* Tags associated with OFPP_NORMAL actions. */
d6fbec6d 2006 bool may_set_up_flow; /* True ordinarily; false if the actions must
064af421 2007 * be reassessed for every packet. */
6a07af36 2008 uint16_t nf_output_iface; /* Output interface index for NetFlow. */
064af421
BP
2009};
2010
2011static void do_xlate_actions(const union ofp_action *in, size_t n_in,
2012 struct action_xlate_ctx *ctx);
2013
2014static void
2015add_output_action(struct action_xlate_ctx *ctx, uint16_t port)
2016{
2017 const struct ofport *ofport = port_array_get(&ctx->ofproto->ports, port);
6cfaf517
BP
2018
2019 if (ofport) {
2020 if (ofport->opp.config & OFPPC_NO_FWD) {
2021 /* Forwarding disabled on port. */
2022 return;
2023 }
2024 } else {
2025 /*
2026 * We don't have an ofport record for this port, but it doesn't hurt to
2027 * allow forwarding to it anyhow. Maybe such a port will appear later
2028 * and we're pre-populating the flow table.
2029 */
064af421 2030 }
6cfaf517
BP
2031
2032 odp_actions_add(ctx->out, ODPAT_OUTPUT)->output.port = port;
6a07af36 2033 ctx->nf_output_iface = port;
064af421
BP
2034}
2035
2036static struct rule *
2037lookup_valid_rule(struct ofproto *ofproto, const flow_t *flow)
2038{
2039 struct rule *rule;
2040 rule = rule_from_cls_rule(classifier_lookup(&ofproto->cls, flow));
2041
2042 /* The rule we found might not be valid, since we could be in need of
2043 * revalidation. If it is not valid, don't return it. */
2044 if (rule
2045 && rule->super
2046 && ofproto->need_revalidate
2047 && !revalidate_rule(ofproto, rule)) {
2048 COVERAGE_INC(ofproto_invalidated);
2049 return NULL;
2050 }
2051
2052 return rule;
2053}
2054
2055static void
2056xlate_table_action(struct action_xlate_ctx *ctx, uint16_t in_port)
2057{
2058 if (!ctx->recurse) {
2059 struct rule *rule;
2060 flow_t flow;
2061
2062 flow = *ctx->flow;
2063 flow.in_port = in_port;
2064
2065 rule = lookup_valid_rule(ctx->ofproto, &flow);
2066 if (rule) {
2067 if (rule->super) {
2068 rule = rule->super;
2069 }
2070
2071 ctx->recurse++;
2072 do_xlate_actions(rule->actions, rule->n_actions, ctx);
2073 ctx->recurse--;
2074 }
2075 }
2076}
2077
2078static void
2079xlate_output_action(struct action_xlate_ctx *ctx,
2080 const struct ofp_action_output *oao)
2081{
2082 uint16_t odp_port;
6a07af36
JG
2083 uint16_t prev_nf_output_iface = ctx->nf_output_iface;
2084
2085 ctx->nf_output_iface = NF_OUT_DROP;
064af421
BP
2086
2087 switch (ntohs(oao->port)) {
2088 case OFPP_IN_PORT:
2089 add_output_action(ctx, ctx->flow->in_port);
2090 break;
2091 case OFPP_TABLE:
2092 xlate_table_action(ctx, ctx->flow->in_port);
2093 break;
2094 case OFPP_NORMAL:
2095 if (!ctx->ofproto->ofhooks->normal_cb(ctx->flow, ctx->packet,
2096 ctx->out, ctx->tags,
6a07af36 2097 &ctx->nf_output_iface,
064af421
BP
2098 ctx->ofproto->aux)) {
2099 COVERAGE_INC(ofproto_uninstallable);
d6fbec6d 2100 ctx->may_set_up_flow = false;
064af421
BP
2101 }
2102 break;
2103 case OFPP_FLOOD:
6a07af36
JG
2104 add_output_group_action(ctx->out, DP_GROUP_FLOOD,
2105 &ctx->nf_output_iface);
064af421
BP
2106 break;
2107 case OFPP_ALL:
6a07af36 2108 add_output_group_action(ctx->out, DP_GROUP_ALL, &ctx->nf_output_iface);
064af421
BP
2109 break;
2110 case OFPP_CONTROLLER:
2111 add_controller_action(ctx->out, oao);
2112 break;
2113 case OFPP_LOCAL:
2114 add_output_action(ctx, ODPP_LOCAL);
2115 break;
2116 default:
2117 odp_port = ofp_port_to_odp_port(ntohs(oao->port));
2118 if (odp_port != ctx->flow->in_port) {
2119 add_output_action(ctx, odp_port);
2120 }
2121 break;
2122 }
6a07af36
JG
2123
2124 if (prev_nf_output_iface == NF_OUT_FLOOD) {
2125 ctx->nf_output_iface = NF_OUT_FLOOD;
2126 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
2127 ctx->nf_output_iface = prev_nf_output_iface;
2128 } else if (prev_nf_output_iface != NF_OUT_DROP &&
2129 ctx->nf_output_iface != NF_OUT_FLOOD) {
2130 ctx->nf_output_iface = NF_OUT_MULTI;
2131 }
064af421
BP
2132}
2133
2134static void
2135xlate_nicira_action(struct action_xlate_ctx *ctx,
2136 const struct nx_action_header *nah)
2137{
2138 const struct nx_action_resubmit *nar;
2139 int subtype = ntohs(nah->subtype);
2140
2141 assert(nah->vendor == htonl(NX_VENDOR_ID));
2142 switch (subtype) {
2143 case NXAST_RESUBMIT:
2144 nar = (const struct nx_action_resubmit *) nah;
2145 xlate_table_action(ctx, ofp_port_to_odp_port(ntohs(nar->in_port)));
2146 break;
2147
2148 default:
2149 VLOG_DBG_RL(&rl, "unknown Nicira action type %"PRIu16, subtype);
2150 break;
2151 }
2152}
2153
2154static void
2155do_xlate_actions(const union ofp_action *in, size_t n_in,
2156 struct action_xlate_ctx *ctx)
2157{
2158 struct actions_iterator iter;
2159 const union ofp_action *ia;
2160 const struct ofport *port;
2161
2162 port = port_array_get(&ctx->ofproto->ports, ctx->flow->in_port);
2163 if (port && port->opp.config & (OFPPC_NO_RECV | OFPPC_NO_RECV_STP) &&
2164 port->opp.config & (eth_addr_equals(ctx->flow->dl_dst, stp_eth_addr)
2165 ? OFPPC_NO_RECV_STP : OFPPC_NO_RECV)) {
2166 /* Drop this flow. */
2167 return;
2168 }
2169
2170 for (ia = actions_first(&iter, in, n_in); ia; ia = actions_next(&iter)) {
2171 uint16_t type = ntohs(ia->type);
2172 union odp_action *oa;
2173
2174 switch (type) {
2175 case OFPAT_OUTPUT:
2176 xlate_output_action(ctx, &ia->output);
2177 break;
2178
2179 case OFPAT_SET_VLAN_VID:
2180 oa = odp_actions_add(ctx->out, ODPAT_SET_VLAN_VID);
2181 oa->vlan_vid.vlan_vid = ia->vlan_vid.vlan_vid;
2182 break;
2183
2184 case OFPAT_SET_VLAN_PCP:
2185 oa = odp_actions_add(ctx->out, ODPAT_SET_VLAN_PCP);
2186 oa->vlan_pcp.vlan_pcp = ia->vlan_pcp.vlan_pcp;
2187 break;
2188
2189 case OFPAT_STRIP_VLAN:
2190 odp_actions_add(ctx->out, ODPAT_STRIP_VLAN);
2191 break;
2192
2193 case OFPAT_SET_DL_SRC:
2194 oa = odp_actions_add(ctx->out, ODPAT_SET_DL_SRC);
2195 memcpy(oa->dl_addr.dl_addr,
2196 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
2197 break;
2198
2199 case OFPAT_SET_DL_DST:
2200 oa = odp_actions_add(ctx->out, ODPAT_SET_DL_DST);
2201 memcpy(oa->dl_addr.dl_addr,
2202 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
2203 break;
2204
2205 case OFPAT_SET_NW_SRC:
2206 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_SRC);
2207 oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
2208 break;
2209
2d70a31a
JP
2210 case OFPAT_SET_NW_DST:
2211 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_DST);
2212 oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
2213 break;
2214
064af421
BP
2215 case OFPAT_SET_TP_SRC:
2216 oa = odp_actions_add(ctx->out, ODPAT_SET_TP_SRC);
2217 oa->tp_port.tp_port = ia->tp_port.tp_port;
2218 break;
2219
2d70a31a
JP
2220 case OFPAT_SET_TP_DST:
2221 oa = odp_actions_add(ctx->out, ODPAT_SET_TP_DST);
2222 oa->tp_port.tp_port = ia->tp_port.tp_port;
2223 break;
2224
064af421
BP
2225 case OFPAT_VENDOR:
2226 xlate_nicira_action(ctx, (const struct nx_action_header *) ia);
2227 break;
2228
2229 default:
2230 VLOG_DBG_RL(&rl, "unknown action type %"PRIu16, type);
2231 break;
2232 }
2233 }
2234}
2235
2236static int
2237xlate_actions(const union ofp_action *in, size_t n_in,
2238 const flow_t *flow, struct ofproto *ofproto,
2239 const struct ofpbuf *packet,
6a07af36
JG
2240 struct odp_actions *out, tag_type *tags, bool *may_set_up_flow,
2241 uint16_t *nf_output_iface)
064af421
BP
2242{
2243 tag_type no_tags = 0;
2244 struct action_xlate_ctx ctx;
2245 COVERAGE_INC(ofproto_ofp2odp);
2246 odp_actions_init(out);
2247 ctx.flow = flow;
2248 ctx.recurse = 0;
2249 ctx.ofproto = ofproto;
2250 ctx.packet = packet;
2251 ctx.out = out;
2252 ctx.tags = tags ? tags : &no_tags;
d6fbec6d 2253 ctx.may_set_up_flow = true;
6a07af36 2254 ctx.nf_output_iface = NF_OUT_DROP;
064af421 2255 do_xlate_actions(in, n_in, &ctx);
0ad9b732 2256
d6fbec6d 2257 /* Check with in-band control to see if we're allowed to set up this
0ad9b732
JP
2258 * flow. */
2259 if (!in_band_rule_check(ofproto->in_band, flow, out)) {
d6fbec6d 2260 ctx.may_set_up_flow = false;
0ad9b732
JP
2261 }
2262
d6fbec6d
BP
2263 if (may_set_up_flow) {
2264 *may_set_up_flow = ctx.may_set_up_flow;
064af421 2265 }
6a07af36
JG
2266 if (nf_output_iface) {
2267 *nf_output_iface = ctx.nf_output_iface;
064af421
BP
2268 }
2269 if (odp_actions_overflow(out)) {
2270 odp_actions_init(out);
2271 return ofp_mkerr(OFPET_BAD_ACTION, OFPBAC_TOO_MANY);
2272 }
2273 return 0;
2274}
2275
2276static int
2277handle_packet_out(struct ofproto *p, struct ofconn *ofconn,
2278 struct ofp_header *oh)
2279{
2280 struct ofp_packet_out *opo;
2281 struct ofpbuf payload, *buffer;
2282 struct odp_actions actions;
2283 int n_actions;
2284 uint16_t in_port;
2285 flow_t flow;
2286 int error;
2287
2288 error = check_ofp_packet_out(oh, &payload, &n_actions, p->max_ports);
2289 if (error) {
2290 return error;
2291 }
2292 opo = (struct ofp_packet_out *) oh;
2293
2294 COVERAGE_INC(ofproto_packet_out);
2295 if (opo->buffer_id != htonl(UINT32_MAX)) {
2296 error = pktbuf_retrieve(ofconn->pktbuf, ntohl(opo->buffer_id),
2297 &buffer, &in_port);
7778bd15 2298 if (error || !buffer) {
064af421
BP
2299 return error;
2300 }
2301 payload = *buffer;
2302 } else {
2303 buffer = NULL;
2304 }
2305
2306 flow_extract(&payload, ofp_port_to_odp_port(ntohs(opo->in_port)), &flow);
2307 error = xlate_actions((const union ofp_action *) opo->actions, n_actions,
6a07af36 2308 &flow, p, &payload, &actions, NULL, NULL, NULL);
064af421
BP
2309 if (error) {
2310 return error;
2311 }
2312
c228a364 2313 dpif_execute(p->dpif, flow.in_port, actions.actions, actions.n_actions,
064af421
BP
2314 &payload);
2315 ofpbuf_delete(buffer);
2316
2317 return 0;
2318}
2319
2320static void
2321update_port_config(struct ofproto *p, struct ofport *port,
2322 uint32_t config, uint32_t mask)
2323{
2324 mask &= config ^ port->opp.config;
2325 if (mask & OFPPC_PORT_DOWN) {
2326 if (config & OFPPC_PORT_DOWN) {
2327 netdev_turn_flags_off(port->netdev, NETDEV_UP, true);
2328 } else {
2329 netdev_turn_flags_on(port->netdev, NETDEV_UP, true);
2330 }
2331 }
2332#define REVALIDATE_BITS (OFPPC_NO_RECV | OFPPC_NO_RECV_STP | OFPPC_NO_FWD)
2333 if (mask & REVALIDATE_BITS) {
2334 COVERAGE_INC(ofproto_costly_flags);
2335 port->opp.config ^= mask & REVALIDATE_BITS;
2336 p->need_revalidate = true;
2337 }
2338#undef REVALIDATE_BITS
2339 if (mask & OFPPC_NO_FLOOD) {
2340 port->opp.config ^= OFPPC_NO_FLOOD;
72b06300 2341 refresh_port_groups(p);
064af421
BP
2342 }
2343 if (mask & OFPPC_NO_PACKET_IN) {
2344 port->opp.config ^= OFPPC_NO_PACKET_IN;
2345 }
2346}
2347
2348static int
2349handle_port_mod(struct ofproto *p, struct ofp_header *oh)
2350{
2351 const struct ofp_port_mod *opm;
2352 struct ofport *port;
2353 int error;
2354
2355 error = check_ofp_message(oh, OFPT_PORT_MOD, sizeof *opm);
2356 if (error) {
2357 return error;
2358 }
2359 opm = (struct ofp_port_mod *) oh;
2360
2361 port = port_array_get(&p->ports,
2362 ofp_port_to_odp_port(ntohs(opm->port_no)));
2363 if (!port) {
2364 return ofp_mkerr(OFPET_PORT_MOD_FAILED, OFPPMFC_BAD_PORT);
2365 } else if (memcmp(port->opp.hw_addr, opm->hw_addr, OFP_ETH_ALEN)) {
2366 return ofp_mkerr(OFPET_PORT_MOD_FAILED, OFPPMFC_BAD_HW_ADDR);
2367 } else {
2368 update_port_config(p, port, ntohl(opm->config), ntohl(opm->mask));
2369 if (opm->advertise) {
2370 netdev_set_advertisements(port->netdev, ntohl(opm->advertise));
2371 }
2372 }
2373 return 0;
2374}
2375
2376static struct ofpbuf *
2377make_stats_reply(uint32_t xid, uint16_t type, size_t body_len)
2378{
2379 struct ofp_stats_reply *osr;
2380 struct ofpbuf *msg;
2381
2382 msg = ofpbuf_new(MIN(sizeof *osr + body_len, UINT16_MAX));
2383 osr = put_openflow_xid(sizeof *osr, OFPT_STATS_REPLY, xid, msg);
2384 osr->type = type;
2385 osr->flags = htons(0);
2386 return msg;
2387}
2388
2389static struct ofpbuf *
2390start_stats_reply(const struct ofp_stats_request *request, size_t body_len)
2391{
2392 return make_stats_reply(request->header.xid, request->type, body_len);
2393}
2394
2395static void *
2396append_stats_reply(size_t nbytes, struct ofconn *ofconn, struct ofpbuf **msgp)
2397{
2398 struct ofpbuf *msg = *msgp;
2399 assert(nbytes <= UINT16_MAX - sizeof(struct ofp_stats_reply));
2400 if (nbytes + msg->size > UINT16_MAX) {
2401 struct ofp_stats_reply *reply = msg->data;
2402 reply->flags = htons(OFPSF_REPLY_MORE);
2403 *msgp = make_stats_reply(reply->header.xid, reply->type, nbytes);
2404 queue_tx(msg, ofconn, ofconn->reply_counter);
2405 }
2406 return ofpbuf_put_uninit(*msgp, nbytes);
2407}
2408
2409static int
2410handle_desc_stats_request(struct ofproto *p, struct ofconn *ofconn,
2411 struct ofp_stats_request *request)
2412{
2413 struct ofp_desc_stats *ods;
2414 struct ofpbuf *msg;
2415
2416 msg = start_stats_reply(request, sizeof *ods);
2417 ods = append_stats_reply(sizeof *ods, ofconn, &msg);
2418 strncpy(ods->mfr_desc, p->manufacturer, sizeof ods->mfr_desc);
2419 strncpy(ods->hw_desc, p->hardware, sizeof ods->hw_desc);
2420 strncpy(ods->sw_desc, p->software, sizeof ods->sw_desc);
2421 strncpy(ods->serial_num, p->serial, sizeof ods->serial_num);
2422 queue_tx(msg, ofconn, ofconn->reply_counter);
2423
2424 return 0;
2425}
2426
2427static void
2428count_subrules(struct cls_rule *cls_rule, void *n_subrules_)
2429{
2430 struct rule *rule = rule_from_cls_rule(cls_rule);
2431 int *n_subrules = n_subrules_;
2432
2433 if (rule->super) {
2434 (*n_subrules)++;
2435 }
2436}
2437
2438static int
2439handle_table_stats_request(struct ofproto *p, struct ofconn *ofconn,
2440 struct ofp_stats_request *request)
2441{
2442 struct ofp_table_stats *ots;
2443 struct ofpbuf *msg;
2444 struct odp_stats dpstats;
2445 int n_exact, n_subrules, n_wild;
2446
2447 msg = start_stats_reply(request, sizeof *ots * 2);
2448
2449 /* Count rules of various kinds. */
2450 n_subrules = 0;
2451 classifier_for_each(&p->cls, CLS_INC_EXACT, count_subrules, &n_subrules);
2452 n_exact = classifier_count_exact(&p->cls) - n_subrules;
2453 n_wild = classifier_count(&p->cls) - classifier_count_exact(&p->cls);
2454
2455 /* Hash table. */
c228a364 2456 dpif_get_dp_stats(p->dpif, &dpstats);
064af421
BP
2457 ots = append_stats_reply(sizeof *ots, ofconn, &msg);
2458 memset(ots, 0, sizeof *ots);
2459 ots->table_id = TABLEID_HASH;
2460 strcpy(ots->name, "hash");
2461 ots->wildcards = htonl(0);
2462 ots->max_entries = htonl(dpstats.max_capacity);
2463 ots->active_count = htonl(n_exact);
2464 ots->lookup_count = htonll(dpstats.n_frags + dpstats.n_hit +
2465 dpstats.n_missed);
2466 ots->matched_count = htonll(dpstats.n_hit); /* XXX */
2467
2468 /* Classifier table. */
2469 ots = append_stats_reply(sizeof *ots, ofconn, &msg);
2470 memset(ots, 0, sizeof *ots);
2471 ots->table_id = TABLEID_CLASSIFIER;
2472 strcpy(ots->name, "classifier");
2473 ots->wildcards = htonl(OFPFW_ALL);
2474 ots->max_entries = htonl(65536);
2475 ots->active_count = htonl(n_wild);
2476 ots->lookup_count = htonll(0); /* XXX */
2477 ots->matched_count = htonll(0); /* XXX */
2478
2479 queue_tx(msg, ofconn, ofconn->reply_counter);
2480 return 0;
2481}
2482
2483static int
2484handle_port_stats_request(struct ofproto *p, struct ofconn *ofconn,
2485 struct ofp_stats_request *request)
2486{
2487 struct ofp_port_stats *ops;
2488 struct ofpbuf *msg;
2489 struct ofport *port;
2490 unsigned int port_no;
2491
2492 msg = start_stats_reply(request, sizeof *ops * 16);
2493 PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) {
2494 struct netdev_stats stats;
2495
2496 /* Intentionally ignore return value, since errors will set 'stats' to
2497 * all-1s, which is correct for OpenFlow, and netdev_get_stats() will
2498 * log errors. */
2499 netdev_get_stats(port->netdev, &stats);
2500
2501 ops = append_stats_reply(sizeof *ops, ofconn, &msg);
2502 ops->port_no = htons(odp_port_to_ofp_port(port_no));
2503 memset(ops->pad, 0, sizeof ops->pad);
2504 ops->rx_packets = htonll(stats.rx_packets);
2505 ops->tx_packets = htonll(stats.tx_packets);
2506 ops->rx_bytes = htonll(stats.rx_bytes);
2507 ops->tx_bytes = htonll(stats.tx_bytes);
2508 ops->rx_dropped = htonll(stats.rx_dropped);
2509 ops->tx_dropped = htonll(stats.tx_dropped);
2510 ops->rx_errors = htonll(stats.rx_errors);
2511 ops->tx_errors = htonll(stats.tx_errors);
2512 ops->rx_frame_err = htonll(stats.rx_frame_errors);
2513 ops->rx_over_err = htonll(stats.rx_over_errors);
2514 ops->rx_crc_err = htonll(stats.rx_crc_errors);
2515 ops->collisions = htonll(stats.collisions);
2516 }
2517
2518 queue_tx(msg, ofconn, ofconn->reply_counter);
2519 return 0;
2520}
2521
2522struct flow_stats_cbdata {
2523 struct ofproto *ofproto;
2524 struct ofconn *ofconn;
2525 uint16_t out_port;
2526 struct ofpbuf *msg;
2527};
2528
2529static void
2530query_stats(struct ofproto *p, struct rule *rule,
2531 uint64_t *packet_countp, uint64_t *byte_countp)
2532{
2533 uint64_t packet_count, byte_count;
2534 struct rule *subrule;
2535 struct odp_flow *odp_flows;
2536 size_t n_odp_flows;
2537
b3137fe8
JG
2538 packet_count = rule->packet_count;
2539 byte_count = rule->byte_count;
2540
064af421
BP
2541 n_odp_flows = rule->cr.wc.wildcards ? list_size(&rule->list) : 1;
2542 odp_flows = xcalloc(1, n_odp_flows * sizeof *odp_flows);
2543 if (rule->cr.wc.wildcards) {
2544 size_t i = 0;
2545 LIST_FOR_EACH (subrule, struct rule, list, &rule->list) {
2546 odp_flows[i++].key = subrule->cr.flow;
b3137fe8
JG
2547 packet_count += subrule->packet_count;
2548 byte_count += subrule->byte_count;
064af421
BP
2549 }
2550 } else {
2551 odp_flows[0].key = rule->cr.flow;
2552 }
2553
2554 packet_count = rule->packet_count;
2555 byte_count = rule->byte_count;
c228a364 2556 if (!dpif_flow_get_multiple(p->dpif, odp_flows, n_odp_flows)) {
064af421
BP
2557 size_t i;
2558 for (i = 0; i < n_odp_flows; i++) {
2559 struct odp_flow *odp_flow = &odp_flows[i];
2560 packet_count += odp_flow->stats.n_packets;
2561 byte_count += odp_flow->stats.n_bytes;
2562 }
2563 }
2564 free(odp_flows);
2565
2566 *packet_countp = packet_count;
2567 *byte_countp = byte_count;
2568}
2569
2570static void
2571flow_stats_cb(struct cls_rule *rule_, void *cbdata_)
2572{
2573 struct rule *rule = rule_from_cls_rule(rule_);
2574 struct flow_stats_cbdata *cbdata = cbdata_;
2575 struct ofp_flow_stats *ofs;
2576 uint64_t packet_count, byte_count;
2577 size_t act_len, len;
2578
2579 if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) {
2580 return;
2581 }
2582
2583 act_len = sizeof *rule->actions * rule->n_actions;
2584 len = offsetof(struct ofp_flow_stats, actions) + act_len;
2585
2586 query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
2587
2588 ofs = append_stats_reply(len, cbdata->ofconn, &cbdata->msg);
2589 ofs->length = htons(len);
2590 ofs->table_id = rule->cr.wc.wildcards ? TABLEID_CLASSIFIER : TABLEID_HASH;
2591 ofs->pad = 0;
2592 flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, &ofs->match);
2593 ofs->duration = htonl((time_msec() - rule->created) / 1000);
2594 ofs->priority = htons(rule->cr.priority);
2595 ofs->idle_timeout = htons(rule->idle_timeout);
2596 ofs->hard_timeout = htons(rule->hard_timeout);
2597 memset(ofs->pad2, 0, sizeof ofs->pad2);
2598 ofs->packet_count = htonll(packet_count);
2599 ofs->byte_count = htonll(byte_count);
2600 memcpy(ofs->actions, rule->actions, act_len);
2601}
2602
2603static int
2604table_id_to_include(uint8_t table_id)
2605{
2606 return (table_id == TABLEID_HASH ? CLS_INC_EXACT
2607 : table_id == TABLEID_CLASSIFIER ? CLS_INC_WILD
2608 : table_id == 0xff ? CLS_INC_ALL
2609 : 0);
2610}
2611
2612static int
2613handle_flow_stats_request(struct ofproto *p, struct ofconn *ofconn,
2614 const struct ofp_stats_request *osr,
2615 size_t arg_size)
2616{
2617 struct ofp_flow_stats_request *fsr;
2618 struct flow_stats_cbdata cbdata;
2619 struct cls_rule target;
2620
2621 if (arg_size != sizeof *fsr) {
2622 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH);
2623 }
2624 fsr = (struct ofp_flow_stats_request *) osr->body;
2625
2626 COVERAGE_INC(ofproto_flows_req);
2627 cbdata.ofproto = p;
2628 cbdata.ofconn = ofconn;
2629 cbdata.out_port = fsr->out_port;
2630 cbdata.msg = start_stats_reply(osr, 1024);
2631 cls_rule_from_match(&target, &fsr->match, 0);
2632 classifier_for_each_match(&p->cls, &target,
2633 table_id_to_include(fsr->table_id),
2634 flow_stats_cb, &cbdata);
2635 queue_tx(cbdata.msg, ofconn, ofconn->reply_counter);
2636 return 0;
2637}
2638
4f2cad2c
JP
2639struct flow_stats_ds_cbdata {
2640 struct ofproto *ofproto;
2641 struct ds *results;
2642};
2643
2644static void
2645flow_stats_ds_cb(struct cls_rule *rule_, void *cbdata_)
2646{
2647 struct rule *rule = rule_from_cls_rule(rule_);
2648 struct flow_stats_ds_cbdata *cbdata = cbdata_;
2649 struct ds *results = cbdata->results;
2650 struct ofp_match match;
2651 uint64_t packet_count, byte_count;
2652 size_t act_len = sizeof *rule->actions * rule->n_actions;
2653
2654 /* Don't report on subrules. */
2655 if (rule->super != NULL) {
2656 return;
2657 }
2658
2659 query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
a26ef517 2660 flow_to_ovs_match(&rule->cr.flow, rule->cr.wc.wildcards, &match);
4f2cad2c
JP
2661
2662 ds_put_format(results, "duration=%llds, ",
2663 (time_msec() - rule->created) / 1000);
52ae00b3 2664 ds_put_format(results, "priority=%u, ", rule->cr.priority);
4f2cad2c
JP
2665 ds_put_format(results, "n_packets=%"PRIu64", ", packet_count);
2666 ds_put_format(results, "n_bytes=%"PRIu64", ", byte_count);
2667 ofp_print_match(results, &match, true);
2668 ofp_print_actions(results, &rule->actions->header, act_len);
2669 ds_put_cstr(results, "\n");
2670}
2671
2672/* Adds a pretty-printed description of all flows to 'results', including
2673 * those marked hidden by secchan (e.g., by in-band control). */
2674void
2675ofproto_get_all_flows(struct ofproto *p, struct ds *results)
2676{
2677 struct ofp_match match;
2678 struct cls_rule target;
2679 struct flow_stats_ds_cbdata cbdata;
2680
2681 memset(&match, 0, sizeof match);
2682 match.wildcards = htonl(OFPFW_ALL);
2683
2684 cbdata.ofproto = p;
2685 cbdata.results = results;
2686
2687 cls_rule_from_match(&target, &match, 0);
2688 classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
2689 flow_stats_ds_cb, &cbdata);
2690}
2691
064af421
BP
2692struct aggregate_stats_cbdata {
2693 struct ofproto *ofproto;
2694 uint16_t out_port;
2695 uint64_t packet_count;
2696 uint64_t byte_count;
2697 uint32_t n_flows;
2698};
2699
2700static void
2701aggregate_stats_cb(struct cls_rule *rule_, void *cbdata_)
2702{
2703 struct rule *rule = rule_from_cls_rule(rule_);
2704 struct aggregate_stats_cbdata *cbdata = cbdata_;
2705 uint64_t packet_count, byte_count;
2706
2707 if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) {
2708 return;
2709 }
2710
2711 query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
2712
2713 cbdata->packet_count += packet_count;
2714 cbdata->byte_count += byte_count;
2715 cbdata->n_flows++;
2716}
2717
2718static int
2719handle_aggregate_stats_request(struct ofproto *p, struct ofconn *ofconn,
2720 const struct ofp_stats_request *osr,
2721 size_t arg_size)
2722{
2723 struct ofp_aggregate_stats_request *asr;
2724 struct ofp_aggregate_stats_reply *reply;
2725 struct aggregate_stats_cbdata cbdata;
2726 struct cls_rule target;
2727 struct ofpbuf *msg;
2728
2729 if (arg_size != sizeof *asr) {
2730 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH);
2731 }
2732 asr = (struct ofp_aggregate_stats_request *) osr->body;
2733
2734 COVERAGE_INC(ofproto_agg_request);
2735 cbdata.ofproto = p;
2736 cbdata.out_port = asr->out_port;
2737 cbdata.packet_count = 0;
2738 cbdata.byte_count = 0;
2739 cbdata.n_flows = 0;
2740 cls_rule_from_match(&target, &asr->match, 0);
2741 classifier_for_each_match(&p->cls, &target,
2742 table_id_to_include(asr->table_id),
2743 aggregate_stats_cb, &cbdata);
2744
2745 msg = start_stats_reply(osr, sizeof *reply);
2746 reply = append_stats_reply(sizeof *reply, ofconn, &msg);
2747 reply->flow_count = htonl(cbdata.n_flows);
2748 reply->packet_count = htonll(cbdata.packet_count);
2749 reply->byte_count = htonll(cbdata.byte_count);
2750 queue_tx(msg, ofconn, ofconn->reply_counter);
2751 return 0;
2752}
2753
2754static int
2755handle_stats_request(struct ofproto *p, struct ofconn *ofconn,
2756 struct ofp_header *oh)
2757{
2758 struct ofp_stats_request *osr;
2759 size_t arg_size;
2760 int error;
2761
2762 error = check_ofp_message_array(oh, OFPT_STATS_REQUEST, sizeof *osr,
2763 1, &arg_size);
2764 if (error) {
2765 return error;
2766 }
2767 osr = (struct ofp_stats_request *) oh;
2768
2769 switch (ntohs(osr->type)) {
2770 case OFPST_DESC:
2771 return handle_desc_stats_request(p, ofconn, osr);
2772
2773 case OFPST_FLOW:
2774 return handle_flow_stats_request(p, ofconn, osr, arg_size);
2775
2776 case OFPST_AGGREGATE:
2777 return handle_aggregate_stats_request(p, ofconn, osr, arg_size);
2778
2779 case OFPST_TABLE:
2780 return handle_table_stats_request(p, ofconn, osr);
2781
2782 case OFPST_PORT:
2783 return handle_port_stats_request(p, ofconn, osr);
2784
2785 case OFPST_VENDOR:
2786 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR);
2787
2788 default:
2789 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_STAT);
2790 }
2791}
2792
2793static long long int
2794msec_from_nsec(uint64_t sec, uint32_t nsec)
2795{
2796 return !sec ? 0 : sec * 1000 + nsec / 1000000;
2797}
2798
2799static void
0193b2af
JG
2800update_time(struct ofproto *ofproto, struct rule *rule,
2801 const struct odp_flow_stats *stats)
064af421
BP
2802{
2803 long long int used = msec_from_nsec(stats->used_sec, stats->used_nsec);
2804 if (used > rule->used) {
2805 rule->used = used;
4836f9f2
JP
2806 if (rule->super && used > rule->super->used) {
2807 rule->super->used = used;
2808 }
0193b2af 2809 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, used);
064af421
BP
2810 }
2811}
2812
2813static void
0193b2af
JG
2814update_stats(struct ofproto *ofproto, struct rule *rule,
2815 const struct odp_flow_stats *stats)
064af421 2816{
064af421 2817 if (stats->n_packets) {
0193b2af
JG
2818 update_time(ofproto, rule, stats);
2819 rule->packet_count += stats->n_packets;
2820 rule->byte_count += stats->n_bytes;
2821 netflow_flow_update_flags(&rule->nf_flow, stats->ip_tos,
2822 stats->tcp_flags);
064af421
BP
2823 }
2824}
2825
2826static int
2827add_flow(struct ofproto *p, struct ofconn *ofconn,
2828 struct ofp_flow_mod *ofm, size_t n_actions)
2829{
2830 struct ofpbuf *packet;
2831 struct rule *rule;
2832 uint16_t in_port;
2833 int error;
2834
0193b2af 2835 rule = rule_create(p, NULL, (const union ofp_action *) ofm->actions,
064af421
BP
2836 n_actions, ntohs(ofm->idle_timeout),
2837 ntohs(ofm->hard_timeout));
2838 cls_rule_from_match(&rule->cr, &ofm->match, ntohs(ofm->priority));
2839
2840 packet = NULL;
2841 error = 0;
2842 if (ofm->buffer_id != htonl(UINT32_MAX)) {
2843 error = pktbuf_retrieve(ofconn->pktbuf, ntohl(ofm->buffer_id),
2844 &packet, &in_port);
2845 }
2846
2847 rule_insert(p, rule, packet, in_port);
2848 ofpbuf_delete(packet);
2849 return error;
2850}
2851
2852static int
2853modify_flow(struct ofproto *p, const struct ofp_flow_mod *ofm,
2854 size_t n_actions, uint16_t command, struct rule *rule)
2855{
2856 if (rule_is_hidden(rule)) {
2857 return 0;
2858 }
2859
2860 if (command == OFPFC_DELETE) {
2861 rule_remove(p, rule);
2862 } else {
2863 size_t actions_len = n_actions * sizeof *rule->actions;
2864
2865 if (n_actions == rule->n_actions
2866 && !memcmp(ofm->actions, rule->actions, actions_len))
2867 {
2868 return 0;
2869 }
2870
2871 free(rule->actions);
2872 rule->actions = xmemdup(ofm->actions, actions_len);
2873 rule->n_actions = n_actions;
2874
2875 if (rule->cr.wc.wildcards) {
2876 COVERAGE_INC(ofproto_mod_wc_flow);
2877 p->need_revalidate = true;
2878 } else {
2879 rule_update_actions(p, rule);
2880 }
2881 }
2882
2883 return 0;
2884}
2885
2886static int
2887modify_flows_strict(struct ofproto *p, const struct ofp_flow_mod *ofm,
2888 size_t n_actions, uint16_t command)
2889{
2890 struct rule *rule;
2891 uint32_t wildcards;
2892 flow_t flow;
2893
2894 flow_from_match(&flow, &wildcards, &ofm->match);
2895 rule = rule_from_cls_rule(classifier_find_rule_exactly(
2896 &p->cls, &flow, wildcards,
2897 ntohs(ofm->priority)));
2898
2899 if (rule) {
2900 if (command == OFPFC_DELETE
2901 && ofm->out_port != htons(OFPP_NONE)
2902 && !rule_has_out_port(rule, ofm->out_port)) {
2903 return 0;
2904 }
2905
2906 modify_flow(p, ofm, n_actions, command, rule);
2907 }
2908 return 0;
2909}
2910
2911struct modify_flows_cbdata {
2912 struct ofproto *ofproto;
2913 const struct ofp_flow_mod *ofm;
2914 uint16_t out_port;
2915 size_t n_actions;
2916 uint16_t command;
2917};
2918
2919static void
2920modify_flows_cb(struct cls_rule *rule_, void *cbdata_)
2921{
2922 struct rule *rule = rule_from_cls_rule(rule_);
2923 struct modify_flows_cbdata *cbdata = cbdata_;
2924
2925 if (cbdata->out_port != htons(OFPP_NONE)
2926 && !rule_has_out_port(rule, cbdata->out_port)) {
2927 return;
2928 }
2929
2930 modify_flow(cbdata->ofproto, cbdata->ofm, cbdata->n_actions,
2931 cbdata->command, rule);
2932}
2933
2934static int
2935modify_flows_loose(struct ofproto *p, const struct ofp_flow_mod *ofm,
2936 size_t n_actions, uint16_t command)
2937{
2938 struct modify_flows_cbdata cbdata;
2939 struct cls_rule target;
2940
2941 cbdata.ofproto = p;
2942 cbdata.ofm = ofm;
2943 cbdata.out_port = (command == OFPFC_DELETE ? ofm->out_port
2944 : htons(OFPP_NONE));
2945 cbdata.n_actions = n_actions;
2946 cbdata.command = command;
2947
2948 cls_rule_from_match(&target, &ofm->match, 0);
2949
2950 classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
2951 modify_flows_cb, &cbdata);
2952 return 0;
2953}
2954
2955static int
2956handle_flow_mod(struct ofproto *p, struct ofconn *ofconn,
2957 struct ofp_flow_mod *ofm)
2958{
2959 size_t n_actions;
2960 int error;
2961
2962 error = check_ofp_message_array(&ofm->header, OFPT_FLOW_MOD, sizeof *ofm,
2963 sizeof *ofm->actions, &n_actions);
2964 if (error) {
2965 return error;
2966 }
2967
2968 normalize_match(&ofm->match);
2969 if (!ofm->match.wildcards) {
2970 ofm->priority = htons(UINT16_MAX);
2971 }
2972
2973 error = validate_actions((const union ofp_action *) ofm->actions,
2974 n_actions, p->max_ports);
2975 if (error) {
2976 return error;
2977 }
2978
2979 switch (ntohs(ofm->command)) {
2980 case OFPFC_ADD:
2981 return add_flow(p, ofconn, ofm, n_actions);
2982
2983 case OFPFC_MODIFY:
2984 return modify_flows_loose(p, ofm, n_actions, OFPFC_MODIFY);
2985
2986 case OFPFC_MODIFY_STRICT:
2987 return modify_flows_strict(p, ofm, n_actions, OFPFC_MODIFY);
2988
2989 case OFPFC_DELETE:
2990 return modify_flows_loose(p, ofm, n_actions, OFPFC_DELETE);
2991
2992 case OFPFC_DELETE_STRICT:
2993 return modify_flows_strict(p, ofm, n_actions, OFPFC_DELETE);
2994
2995 default:
2996 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_BAD_COMMAND);
2997 }
2998}
2999
3000static void
3001send_capability_reply(struct ofproto *p, struct ofconn *ofconn, uint32_t xid)
3002{
3003 struct ofmp_capability_reply *ocr;
3004 struct ofpbuf *b;
3005 char capabilities[] = "com.nicira.mgmt.manager=false\n";
3006
3007 ocr = make_openflow_xid(sizeof(*ocr), OFPT_VENDOR, xid, &b);
3008 ocr->header.header.vendor = htonl(NX_VENDOR_ID);
3009 ocr->header.header.subtype = htonl(NXT_MGMT);
3010 ocr->header.type = htons(OFMPT_CAPABILITY_REPLY);
3011
3012 ocr->format = htonl(OFMPCOF_SIMPLE);
3013 ocr->mgmt_id = htonll(p->mgmt_id);
3014
3015 ofpbuf_put(b, capabilities, strlen(capabilities));
3016
3017 queue_tx(b, ofconn, ofconn->reply_counter);
3018}
3019
3020static int
3021handle_ofmp(struct ofproto *p, struct ofconn *ofconn,
3022 struct ofmp_header *ofmph)
3023{
3024 size_t msg_len = ntohs(ofmph->header.header.length);
3025 if (msg_len < sizeof(*ofmph)) {
2886875a 3026 VLOG_WARN_RL(&rl, "dropping short managment message: %zu\n", msg_len);
064af421
BP
3027 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH);
3028 }
3029
3030 if (ofmph->type == htons(OFMPT_CAPABILITY_REQUEST)) {
3031 struct ofmp_capability_request *ofmpcr;
3032
3033 if (msg_len < sizeof(struct ofmp_capability_request)) {
2886875a 3034 VLOG_WARN_RL(&rl, "dropping short capability request: %zu\n",
064af421
BP
3035 msg_len);
3036 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH);
3037 }
3038
3039 ofmpcr = (struct ofmp_capability_request *)ofmph;
3040 if (ofmpcr->format != htonl(OFMPCAF_SIMPLE)) {
3041 /* xxx Find a better type than bad subtype */
3042 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE);
3043 }
3044
3045 send_capability_reply(p, ofconn, ofmph->header.header.xid);
3046 return 0;
3047 } else {
3048 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE);
3049 }
3050}
3051
3052static int
3053handle_vendor(struct ofproto *p, struct ofconn *ofconn, void *msg)
3054{
3055 struct ofp_vendor_header *ovh = msg;
3056 struct nicira_header *nh;
3057
3058 if (ntohs(ovh->header.length) < sizeof(struct ofp_vendor_header)) {
3059 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH);
3060 }
3061 if (ovh->vendor != htonl(NX_VENDOR_ID)) {
3062 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR);
3063 }
3064 if (ntohs(ovh->header.length) < sizeof(struct nicira_header)) {
3065 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH);
3066 }
3067
3068 nh = msg;
3069 switch (ntohl(nh->subtype)) {
3070 case NXT_STATUS_REQUEST:
3071 return switch_status_handle_request(p->switch_status, ofconn->rconn,
3072 msg);
3073
3074 case NXT_ACT_SET_CONFIG:
3075 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE); /* XXX */
3076
3077 case NXT_ACT_GET_CONFIG:
3078 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE); /* XXX */
3079
3080 case NXT_COMMAND_REQUEST:
3081 if (p->executer) {
3082 return executer_handle_request(p->executer, ofconn->rconn, msg);
3083 }
3084 break;
3085
3086 case NXT_MGMT:
3087 return handle_ofmp(p, ofconn, msg);
3088 }
3089
3090 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE);
3091}
3092
3093static void
3094handle_openflow(struct ofconn *ofconn, struct ofproto *p,
3095 struct ofpbuf *ofp_msg)
3096{
3097 struct ofp_header *oh = ofp_msg->data;
3098 int error;
3099
3100 COVERAGE_INC(ofproto_recv_openflow);
3101 switch (oh->type) {
3102 case OFPT_ECHO_REQUEST:
3103 error = handle_echo_request(ofconn, oh);
3104 break;
3105
3106 case OFPT_ECHO_REPLY:
3107 error = 0;
3108 break;
3109
3110 case OFPT_FEATURES_REQUEST:
3111 error = handle_features_request(p, ofconn, oh);
3112 break;
3113
3114 case OFPT_GET_CONFIG_REQUEST:
3115 error = handle_get_config_request(p, ofconn, oh);
3116 break;
3117
3118 case OFPT_SET_CONFIG:
3119 error = handle_set_config(p, ofconn, ofp_msg->data);
3120 break;
3121
3122 case OFPT_PACKET_OUT:
3123 error = handle_packet_out(p, ofconn, ofp_msg->data);
3124 break;
3125
3126 case OFPT_PORT_MOD:
3127 error = handle_port_mod(p, oh);
3128 break;
3129
3130 case OFPT_FLOW_MOD:
3131 error = handle_flow_mod(p, ofconn, ofp_msg->data);
3132 break;
3133
3134 case OFPT_STATS_REQUEST:
3135 error = handle_stats_request(p, ofconn, oh);
3136 break;
3137
3138 case OFPT_VENDOR:
3139 error = handle_vendor(p, ofconn, ofp_msg->data);
3140 break;
3141
3142 default:
3143 if (VLOG_IS_WARN_ENABLED()) {
3144 char *s = ofp_to_string(oh, ntohs(oh->length), 2);
3145 VLOG_DBG_RL(&rl, "OpenFlow message ignored: %s", s);
3146 free(s);
3147 }
3148 error = ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_TYPE);
3149 break;
3150 }
3151
3152 if (error) {
3153 send_error_oh(ofconn, ofp_msg->data, error);
3154 }
3155}
3156\f
3157static void
72b06300 3158handle_odp_miss_msg(struct ofproto *p, struct ofpbuf *packet)
064af421
BP
3159{
3160 struct odp_msg *msg = packet->data;
3161 uint16_t in_port = odp_port_to_ofp_port(msg->port);
3162 struct rule *rule;
3163 struct ofpbuf payload;
3164 flow_t flow;
3165
064af421
BP
3166 payload.data = msg + 1;
3167 payload.size = msg->length - sizeof *msg;
3168 flow_extract(&payload, msg->port, &flow);
3169
0ad9b732
JP
3170 /* Check with in-band control to see if this packet should be sent
3171 * to the local port regardless of the flow table. */
3172 if (in_band_msg_in_hook(p->in_band, &flow, &payload)) {
3173 union odp_action action;
3174
3175 memset(&action, 0, sizeof(action));
3176 action.output.type = ODPAT_OUTPUT;
3177 action.output.port = ODPP_LOCAL;
f1acd62b 3178 dpif_execute(p->dpif, flow.in_port, &action, 1, &payload);
0ad9b732
JP
3179 }
3180
064af421
BP
3181 rule = lookup_valid_rule(p, &flow);
3182 if (!rule) {
3183 /* Don't send a packet-in if OFPPC_NO_PACKET_IN asserted. */
3184 struct ofport *port = port_array_get(&p->ports, msg->port);
3185 if (port) {
3186 if (port->opp.config & OFPPC_NO_PACKET_IN) {
3187 COVERAGE_INC(ofproto_no_packet_in);
3188 /* XXX install 'drop' flow entry */
3189 ofpbuf_delete(packet);
3190 return;
3191 }
3192 } else {
3193 VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16, msg->port);
3194 }
3195
3196 COVERAGE_INC(ofproto_packet_in);
3197 pinsched_send(p->miss_sched, in_port, packet, send_packet_in_miss, p);
3198 return;
3199 }
3200
3201 if (rule->cr.wc.wildcards) {
3202 rule = rule_create_subrule(p, rule, &flow);
3203 rule_make_actions(p, rule, packet);
3204 } else {
3205 if (!rule->may_install) {
3206 /* The rule is not installable, that is, we need to process every
3207 * packet, so process the current packet and set its actions into
3208 * 'subrule'. */
3209 rule_make_actions(p, rule, packet);
3210 } else {
3211 /* XXX revalidate rule if it needs it */
3212 }
3213 }
3214
3215 rule_execute(p, rule, &payload, &flow);
3216 rule_reinstall(p, rule);
7778bd15
BP
3217
3218 if (rule->super && rule->super->cr.priority == FAIL_OPEN_PRIORITY
3219 && rconn_is_connected(p->controller->rconn)) {
3220 /*
3221 * Extra-special case for fail-open mode.
3222 *
3223 * We are in fail-open mode and the packet matched the fail-open rule,
3224 * but we are connected to a controller too. We should send the packet
3225 * up to the controller in the hope that it will try to set up a flow
3226 * and thereby allow us to exit fail-open.
3227 *
3228 * See the top-level comment in fail-open.c for more information.
3229 */
3230 pinsched_send(p->miss_sched, in_port, packet, send_packet_in_miss, p);
3231 } else {
3232 ofpbuf_delete(packet);
3233 }
064af421 3234}
72b06300
BP
3235
3236static void
3237handle_odp_msg(struct ofproto *p, struct ofpbuf *packet)
3238{
3239 struct odp_msg *msg = packet->data;
3240
3241 switch (msg->type) {
3242 case _ODPL_ACTION_NR:
3243 COVERAGE_INC(ofproto_ctlr_action);
3244 pinsched_send(p->action_sched, odp_port_to_ofp_port(msg->port), packet,
3245 send_packet_in_action, p);
3246 break;
3247
3248 case _ODPL_SFLOW_NR:
3249 if (p->sflow) {
3250 ofproto_sflow_received(p->sflow, msg);
3251 }
3252 ofpbuf_delete(packet);
3253 break;
3254
3255 case _ODPL_MISS_NR:
3256 handle_odp_miss_msg(p, packet);
3257 break;
3258
3259 default:
3260 VLOG_WARN_RL(&rl, "received ODP message of unexpected type %"PRIu32,
3261 msg->type);
3262 break;
3263 }
3264}
064af421
BP
3265\f
3266static void
3267revalidate_cb(struct cls_rule *sub_, void *cbdata_)
3268{
3269 struct rule *sub = rule_from_cls_rule(sub_);
3270 struct revalidate_cbdata *cbdata = cbdata_;
3271
3272 if (cbdata->revalidate_all
3273 || (cbdata->revalidate_subrules && sub->super)
3274 || (tag_set_intersects(&cbdata->revalidate_set, sub->tags))) {
3275 revalidate_rule(cbdata->ofproto, sub);
3276 }
3277}
3278
3279static bool
3280revalidate_rule(struct ofproto *p, struct rule *rule)
3281{
3282 const flow_t *flow = &rule->cr.flow;
3283
3284 COVERAGE_INC(ofproto_revalidate_rule);
3285 if (rule->super) {
3286 struct rule *super;
3287 super = rule_from_cls_rule(classifier_lookup_wild(&p->cls, flow));
3288 if (!super) {
3289 rule_remove(p, rule);
3290 return false;
3291 } else if (super != rule->super) {
3292 COVERAGE_INC(ofproto_revalidate_moved);
3293 list_remove(&rule->list);
3294 list_push_back(&super->list, &rule->list);
3295 rule->super = super;
3296 rule->hard_timeout = super->hard_timeout;
3297 rule->idle_timeout = super->idle_timeout;
3298 rule->created = super->created;
3299 rule->used = 0;
3300 }
3301 }
3302
3303 rule_update_actions(p, rule);
3304 return true;
3305}
3306
3307static struct ofpbuf *
3308compose_flow_exp(const struct rule *rule, long long int now, uint8_t reason)
3309{
3310 struct ofp_flow_expired *ofe;
3311 struct ofpbuf *buf;
3312
3313 ofe = make_openflow(sizeof *ofe, OFPT_FLOW_EXPIRED, &buf);
3314 flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, &ofe->match);
3315 ofe->priority = htons(rule->cr.priority);
3316 ofe->reason = reason;
3d3d15a0
JP
3317 ofe->duration = htonl((now - rule->created) / 1000);
3318 ofe->packet_count = htonll(rule->packet_count);
3319 ofe->byte_count = htonll(rule->byte_count);
064af421
BP
3320
3321 return buf;
3322}
3323
3324static void
3325send_flow_exp(struct ofproto *p, struct rule *rule,
3326 long long int now, uint8_t reason)
3327{
3328 struct ofconn *ofconn;
3329 struct ofconn *prev;
b9b0ce61 3330 struct ofpbuf *buf = NULL;
064af421
BP
3331
3332 /* We limit the maximum number of queued flow expirations it by accounting
3333 * them under the counter for replies. That works because preventing
3334 * OpenFlow requests from being processed also prevents new flows from
3335 * being added (and expiring). (It also prevents processing OpenFlow
3336 * requests that would not add new flows, so it is imperfect.) */
3337
3338 prev = NULL;
3339 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
3340 if (ofconn->send_flow_exp && rconn_is_connected(ofconn->rconn)) {
3341 if (prev) {
431d8ad2 3342 queue_tx(ofpbuf_clone(buf), prev, prev->reply_counter);
064af421
BP
3343 } else {
3344 buf = compose_flow_exp(rule, now, reason);
3345 }
3346 prev = ofconn;
3347 }
3348 }
3349 if (prev) {
431d8ad2 3350 queue_tx(buf, prev, prev->reply_counter);
064af421
BP
3351 }
3352}
3353
3354static void
3355uninstall_idle_flow(struct ofproto *ofproto, struct rule *rule)
3356{
3357 assert(rule->installed);
3358 assert(!rule->cr.wc.wildcards);
3359
3360 if (rule->super) {
3361 rule_remove(ofproto, rule);
3362 } else {
3363 rule_uninstall(ofproto, rule);
3364 }
3365}
3366
3367static void
3368expire_rule(struct cls_rule *cls_rule, void *p_)
3369{
3370 struct ofproto *p = p_;
3371 struct rule *rule = rule_from_cls_rule(cls_rule);
3372 long long int hard_expire, idle_expire, expire, now;
3373
3374 hard_expire = (rule->hard_timeout
3375 ? rule->created + rule->hard_timeout * 1000
3376 : LLONG_MAX);
3377 idle_expire = (rule->idle_timeout
3378 && (rule->super || list_is_empty(&rule->list))
3379 ? rule->used + rule->idle_timeout * 1000
3380 : LLONG_MAX);
3381 expire = MIN(hard_expire, idle_expire);
064af421
BP
3382
3383 now = time_msec();
3384 if (now < expire) {
3385 if (rule->installed && now >= rule->used + 5000) {
3386 uninstall_idle_flow(p, rule);
0193b2af
JG
3387 } else if (!rule->cr.wc.wildcards) {
3388 active_timeout(p, rule);
064af421 3389 }
0193b2af 3390
064af421
BP
3391 return;
3392 }
3393
3394 COVERAGE_INC(ofproto_expired);
46d6f36f
JG
3395
3396 /* Update stats. This code will be a no-op if the rule expired
3397 * due to an idle timeout. */
064af421 3398 if (rule->cr.wc.wildcards) {
064af421
BP
3399 struct rule *subrule, *next;
3400 LIST_FOR_EACH_SAFE (subrule, next, struct rule, list, &rule->list) {
3401 rule_remove(p, subrule);
3402 }
46d6f36f
JG
3403 } else {
3404 rule_uninstall(p, rule);
064af421
BP
3405 }
3406
8fe1a59d
JG
3407 if (!rule_is_hidden(rule)) {
3408 send_flow_exp(p, rule, now,
3409 (now >= hard_expire
3410 ? OFPER_HARD_TIMEOUT : OFPER_IDLE_TIMEOUT));
3411 }
064af421
BP
3412 rule_remove(p, rule);
3413}
3414
0193b2af
JG
3415static void
3416active_timeout(struct ofproto *ofproto, struct rule *rule)
3417{
3418 if (ofproto->netflow && !is_controller_rule(rule) &&
3419 netflow_active_timeout_expired(ofproto->netflow, &rule->nf_flow)) {
3420 struct ofexpired expired;
3421 struct odp_flow odp_flow;
3422
3423 /* Get updated flow stats. */
3424 memset(&odp_flow, 0, sizeof odp_flow);
094e1514
JG
3425 if (rule->installed) {
3426 odp_flow.key = rule->cr.flow;
3427 odp_flow.flags = ODPFF_ZERO_TCP_FLAGS;
d65349ea 3428 dpif_flow_get(ofproto->dpif, &odp_flow);
094e1514
JG
3429
3430 if (odp_flow.stats.n_packets) {
3431 update_time(ofproto, rule, &odp_flow.stats);
3432 netflow_flow_update_flags(&rule->nf_flow, odp_flow.stats.ip_tos,
3433 odp_flow.stats.tcp_flags);
3434 }
0193b2af
JG
3435 }
3436
3437 expired.flow = rule->cr.flow;
3438 expired.packet_count = rule->packet_count +
3439 odp_flow.stats.n_packets;
3440 expired.byte_count = rule->byte_count + odp_flow.stats.n_bytes;
3441 expired.used = rule->used;
3442
3443 netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
3444
3445 /* Schedule us to send the accumulated records once we have
3446 * collected all of them. */
3447 poll_immediate_wake();
3448 }
3449}
3450
064af421
BP
3451static void
3452update_used(struct ofproto *p)
3453{
3454 struct odp_flow *flows;
3455 size_t n_flows;
3456 size_t i;
3457 int error;
3458
c228a364 3459 error = dpif_flow_list_all(p->dpif, &flows, &n_flows);
064af421
BP
3460 if (error) {
3461 return;
3462 }
3463
3464 for (i = 0; i < n_flows; i++) {
3465 struct odp_flow *f = &flows[i];
3466 struct rule *rule;
3467
3468 rule = rule_from_cls_rule(
3469 classifier_find_rule_exactly(&p->cls, &f->key, 0, UINT16_MAX));
3470 if (!rule || !rule->installed) {
3471 COVERAGE_INC(ofproto_unexpected_rule);
c228a364 3472 dpif_flow_del(p->dpif, f);
064af421
BP
3473 continue;
3474 }
3475
0193b2af 3476 update_time(p, rule, &f->stats);
064af421
BP
3477 rule_account(p, rule, f->stats.n_bytes);
3478 }
3479 free(flows);
3480}
3481
3482static void
3483do_send_packet_in(struct ofconn *ofconn, uint32_t buffer_id,
3484 const struct ofpbuf *packet, int send_len)
3485{
372179d4
BP
3486 struct odp_msg *msg = packet->data;
3487 struct ofpbuf payload;
3488 struct ofpbuf *opi;
3489 uint8_t reason;
064af421 3490
372179d4 3491 /* Extract packet payload from 'msg'. */
064af421
BP
3492 payload.data = msg + 1;
3493 payload.size = msg->length - sizeof *msg;
3494
372179d4
BP
3495 /* Construct ofp_packet_in message. */
3496 reason = msg->type == _ODPL_ACTION_NR ? OFPR_ACTION : OFPR_NO_MATCH;
3497 opi = make_packet_in(buffer_id, odp_port_to_ofp_port(msg->port), reason,
3498 &payload, send_len);
3499
3500 /* Send. */
3501 rconn_send_with_limit(ofconn->rconn, opi, ofconn->packet_in_counter, 100);
064af421
BP
3502}
3503
3504static void
3505send_packet_in_action(struct ofpbuf *packet, void *p_)
3506{
3507 struct ofproto *p = p_;
3508 struct ofconn *ofconn;
3509 struct odp_msg *msg;
3510
3511 msg = packet->data;
3512 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
3513 if (ofconn == p->controller || ofconn->miss_send_len) {
3514 do_send_packet_in(ofconn, UINT32_MAX, packet, msg->arg);
3515 }
3516 }
3517 ofpbuf_delete(packet);
3518}
3519
3520static void
3521send_packet_in_miss(struct ofpbuf *packet, void *p_)
3522{
3523 struct ofproto *p = p_;
7778bd15 3524 bool in_fail_open = p->fail_open && fail_open_is_active(p->fail_open);
064af421
BP
3525 struct ofconn *ofconn;
3526 struct ofpbuf payload;
3527 struct odp_msg *msg;
3528
3529 msg = packet->data;
3530 payload.data = msg + 1;
3531 payload.size = msg->length - sizeof *msg;
3532 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
3533 if (ofconn->miss_send_len) {
7778bd15
BP
3534 struct pktbuf *pb = ofconn->pktbuf;
3535 uint32_t buffer_id = (in_fail_open
3536 ? pktbuf_get_null()
3537 : pktbuf_save(pb, &payload, msg->port));
064af421
BP
3538 int send_len = (buffer_id != UINT32_MAX ? ofconn->miss_send_len
3539 : UINT32_MAX);
3540 do_send_packet_in(ofconn, buffer_id, packet, send_len);
3541 }
3542 }
3543 ofpbuf_delete(packet);
3544}
3545
3546static uint64_t
fa60c019 3547pick_datapath_id(const struct ofproto *ofproto)
064af421 3548{
fa60c019 3549 const struct ofport *port;
064af421 3550
fa60c019
BP
3551 port = port_array_get(&ofproto->ports, ODPP_LOCAL);
3552 if (port) {
3553 uint8_t ea[ETH_ADDR_LEN];
3554 int error;
3555
3556 error = netdev_get_etheraddr(port->netdev, ea);
064af421
BP
3557 if (!error) {
3558 return eth_addr_to_uint64(ea);
3559 }
3560 VLOG_WARN("could not get MAC address for %s (%s)",
fa60c019 3561 netdev_get_name(port->netdev), strerror(error));
064af421 3562 }
fa60c019 3563 return ofproto->fallback_dpid;
064af421
BP
3564}
3565
3566static uint64_t
3567pick_fallback_dpid(void)
3568{
3569 uint8_t ea[ETH_ADDR_LEN];
3570 eth_addr_random(ea);
3571 ea[0] = 0x00; /* Set Nicira OUI. */
3572 ea[1] = 0x23;
3573 ea[2] = 0x20;
3574 return eth_addr_to_uint64(ea);
3575}
3576\f
3577static bool
3578default_normal_ofhook_cb(const flow_t *flow, const struct ofpbuf *packet,
3579 struct odp_actions *actions, tag_type *tags,
6a07af36 3580 uint16_t *nf_output_iface, void *ofproto_)
064af421
BP
3581{
3582 struct ofproto *ofproto = ofproto_;
3583 int out_port;
3584
3585 /* Drop frames for reserved multicast addresses. */
3586 if (eth_addr_is_reserved(flow->dl_dst)) {
3587 return true;
3588 }
3589
3590 /* Learn source MAC (but don't try to learn from revalidation). */
3591 if (packet != NULL) {
3592 tag_type rev_tag = mac_learning_learn(ofproto->ml, flow->dl_src,
3593 0, flow->in_port);
3594 if (rev_tag) {
3595 /* The log messages here could actually be useful in debugging,
3596 * so keep the rate limit relatively high. */
3597 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
3598 VLOG_DBG_RL(&rl, "learned that "ETH_ADDR_FMT" is on port %"PRIu16,
3599 ETH_ADDR_ARGS(flow->dl_src), flow->in_port);
3600 ofproto_revalidate(ofproto, rev_tag);
3601 }
3602 }
3603
3604 /* Determine output port. */
3605 out_port = mac_learning_lookup_tag(ofproto->ml, flow->dl_dst, 0, tags);
3606 if (out_port < 0) {
6a07af36 3607 add_output_group_action(actions, DP_GROUP_FLOOD, nf_output_iface);
064af421
BP
3608 } else if (out_port != flow->in_port) {
3609 odp_actions_add(actions, ODPAT_OUTPUT)->output.port = out_port;
6a07af36 3610 *nf_output_iface = out_port;
064af421
BP
3611 } else {
3612 /* Drop. */
3613 }
3614
3615 return true;
3616}
3617
3618static const struct ofhooks default_ofhooks = {
3619 NULL,
3620 default_normal_ofhook_cb,
3621 NULL,
3622 NULL
3623};