]> git.proxmox.com Git - mirror_ovs.git/blame - ofproto/ofproto.c
xenserver: make ovs-xenserverd startup configurable and disable it for 5.6.0 onwards.
[mirror_ovs.git] / ofproto / ofproto.c
CommitLineData
064af421 1/*
c475ae67 2 * Copyright (c) 2009, 2010 Nicira Networks.
43253595 3 * Copyright (c) 2010 Jean Tourrilhes - HP-Labs.
064af421 4 *
a14bc59f
BP
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
064af421 8 *
a14bc59f
BP
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
064af421
BP
16 */
17
18#include <config.h>
19#include "ofproto.h"
20#include <errno.h>
21#include <inttypes.h>
9d82ec47 22#include <sys/socket.h>
064af421
BP
23#include <net/if.h>
24#include <netinet/in.h>
25#include <stdbool.h>
26#include <stdlib.h>
27#include "classifier.h"
28#include "coverage.h"
29#include "discovery.h"
30#include "dpif.h"
4f2cad2c 31#include "dynamic-string.h"
064af421
BP
32#include "fail-open.h"
33#include "in-band.h"
34#include "mac-learning.h"
35#include "netdev.h"
36#include "netflow.h"
37#include "odp-util.h"
38#include "ofp-print.h"
72b06300 39#include "ofproto-sflow.h"
064af421
BP
40#include "ofpbuf.h"
41#include "openflow/nicira-ext.h"
42#include "openflow/openflow.h"
064af421
BP
43#include "openvswitch/datapath-protocol.h"
44#include "packets.h"
45#include "pinsched.h"
46#include "pktbuf.h"
47#include "poll-loop.h"
48#include "port-array.h"
49#include "rconn.h"
50#include "shash.h"
51#include "status.h"
52#include "stp.h"
fe55ad15 53#include "stream-ssl.h"
064af421
BP
54#include "svec.h"
55#include "tag.h"
56#include "timeval.h"
4f2cad2c 57#include "unixctl.h"
064af421 58#include "vconn.h"
064af421
BP
59#include "xtoxll.h"
60
61#define THIS_MODULE VLM_ofproto
62#include "vlog.h"
63
72b06300 64#include "sflow_api.h"
064af421
BP
65
66enum {
67 TABLEID_HASH = 0,
68 TABLEID_CLASSIFIER = 1
69};
70
71struct ofport {
72 struct netdev *netdev;
73 struct ofp_phy_port opp; /* In host byte order. */
74};
75
76static void ofport_free(struct ofport *);
77static void hton_ofp_phy_port(struct ofp_phy_port *);
78
79static int xlate_actions(const union ofp_action *in, size_t n_in,
80 const flow_t *flow, struct ofproto *ofproto,
81 const struct ofpbuf *packet,
82 struct odp_actions *out, tag_type *tags,
6a07af36 83 bool *may_set_up_flow, uint16_t *nf_output_iface);
064af421
BP
84
85struct rule {
86 struct cls_rule cr;
87
39997502
JP
88 uint64_t flow_cookie; /* Controller-issued identifier.
89 (Kept in network-byte order.) */
064af421
BP
90 uint16_t idle_timeout; /* In seconds from time of last use. */
91 uint16_t hard_timeout; /* In seconds from time of creation. */
ca069229 92 bool send_flow_removed; /* Send a flow removed message? */
064af421
BP
93 long long int used; /* Last-used time (0 if never used). */
94 long long int created; /* Creation time. */
95 uint64_t packet_count; /* Number of packets received. */
96 uint64_t byte_count; /* Number of bytes received. */
97 uint64_t accounted_bytes; /* Number of bytes passed to account_cb. */
064af421 98 tag_type tags; /* Tags (set only by hooks). */
0193b2af 99 struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
064af421
BP
100
101 /* If 'super' is non-NULL, this rule is a subrule, that is, it is an
102 * exact-match rule (having cr.wc.wildcards of 0) generated from the
103 * wildcard rule 'super'. In this case, 'list' is an element of the
104 * super-rule's list.
105 *
106 * If 'super' is NULL, this rule is a super-rule, and 'list' is the head of
107 * a list of subrules. A super-rule with no wildcards (where
108 * cr.wc.wildcards is 0) will never have any subrules. */
109 struct rule *super;
110 struct list list;
111
112 /* OpenFlow actions.
79eee1eb
BP
113 *
114 * 'n_actions' is the number of elements in the 'actions' array. A single
115 * action may take up more more than one element's worth of space.
064af421
BP
116 *
117 * A subrule has no actions (it uses the super-rule's actions). */
118 int n_actions;
119 union ofp_action *actions;
120
121 /* Datapath actions.
122 *
123 * A super-rule with wildcard fields never has ODP actions (since the
124 * datapath only supports exact-match flows). */
125 bool installed; /* Installed in datapath? */
126 bool may_install; /* True ordinarily; false if actions must
127 * be reassessed for every packet. */
128 int n_odp_actions;
129 union odp_action *odp_actions;
130};
131
132static inline bool
133rule_is_hidden(const struct rule *rule)
134{
135 /* Subrules are merely an implementation detail, so hide them from the
136 * controller. */
137 if (rule->super != NULL) {
138 return true;
139 }
140
8cd4882f 141 /* Rules with priority higher than UINT16_MAX are set up by ofproto itself
064af421
BP
142 * (e.g. by in-band control) and are intentionally hidden from the
143 * controller. */
144 if (rule->cr.priority > UINT16_MAX) {
145 return true;
146 }
147
148 return false;
149}
150
0193b2af
JG
151static struct rule *rule_create(struct ofproto *, struct rule *super,
152 const union ofp_action *, size_t n_actions,
ca069229 153 uint16_t idle_timeout, uint16_t hard_timeout,
39997502 154 uint64_t flow_cookie, bool send_flow_removed);
064af421
BP
155static void rule_free(struct rule *);
156static void rule_destroy(struct ofproto *, struct rule *);
157static struct rule *rule_from_cls_rule(const struct cls_rule *);
158static void rule_insert(struct ofproto *, struct rule *,
159 struct ofpbuf *packet, uint16_t in_port);
160static void rule_remove(struct ofproto *, struct rule *);
161static bool rule_make_actions(struct ofproto *, struct rule *,
162 const struct ofpbuf *packet);
163static void rule_install(struct ofproto *, struct rule *,
164 struct rule *displaced_rule);
165static void rule_uninstall(struct ofproto *, struct rule *);
166static void rule_post_uninstall(struct ofproto *, struct rule *);
ca069229
JP
167static void send_flow_removed(struct ofproto *p, struct rule *rule,
168 long long int now, uint8_t reason);
064af421 169
76ce9432
BP
170/* ofproto supports two kinds of OpenFlow connections:
171 *
172 * - "Controller connections": Connections to ordinary OpenFlow controllers.
173 * ofproto maintains persistent connections to these controllers and by
174 * default sends them asynchronous messages such as packet-ins.
175 *
176 * - "Transient connections", e.g. from ovs-ofctl. When these connections
177 * drop, it is the other side's responsibility to reconnect them if
178 * necessary. ofproto does not send them asynchronous messages by default.
179 */
180enum ofconn_type {
181 OFCONN_CONTROLLER, /* An OpenFlow controller. */
182 OFCONN_TRANSIENT /* A transient connection. */
183};
064af421 184
76ce9432
BP
185/* An OpenFlow connection. */
186struct ofconn {
187 struct ofproto *ofproto; /* The ofproto that owns this connection. */
188 struct list node; /* In struct ofproto's "all_conns" list. */
189 struct rconn *rconn; /* OpenFlow connection. */
190 enum ofconn_type type; /* Type. */
191
192 /* OFPT_PACKET_IN related data. */
193 struct rconn_packet_counter *packet_in_counter; /* # queued on 'rconn'. */
194 struct pinsched *schedulers[2]; /* Indexed by reason code; see below. */
195 struct pktbuf *pktbuf; /* OpenFlow packet buffers. */
196 int miss_send_len; /* Bytes to send of buffered packets. */
197
198 /* Number of OpenFlow messages queued on 'rconn' as replies to OpenFlow
199 * requests, and the maximum number before we stop reading OpenFlow
200 * requests. */
064af421
BP
201#define OFCONN_REPLY_MAX 100
202 struct rconn_packet_counter *reply_counter;
76ce9432
BP
203
204 /* type == OFCONN_CONTROLLER only. */
9deba63b 205 enum nx_role role; /* Role. */
76ce9432
BP
206 struct hmap_node hmap_node; /* In struct ofproto's "controllers" map. */
207 struct discovery *discovery; /* Controller discovery object, if enabled. */
208 struct status_category *ss; /* Switch status category. */
d2ede7bc 209 enum ofproto_band band; /* In-band or out-of-band? */
064af421
BP
210};
211
76ce9432
BP
212/* We use OFPR_NO_MATCH and OFPR_ACTION as indexes into struct ofconn's
213 * "schedulers" array. Their values are 0 and 1, and their meanings and values
214 * coincide with _ODPL_MISS_NR and _ODPL_ACTION_NR, so this is convenient. In
215 * case anything ever changes, check their values here. */
216#define N_SCHEDULERS 2
217BUILD_ASSERT_DECL(OFPR_NO_MATCH == 0);
218BUILD_ASSERT_DECL(OFPR_NO_MATCH == _ODPL_MISS_NR);
219BUILD_ASSERT_DECL(OFPR_ACTION == 1);
220BUILD_ASSERT_DECL(OFPR_ACTION == _ODPL_ACTION_NR);
221
222static struct ofconn *ofconn_create(struct ofproto *, struct rconn *,
223 enum ofconn_type);
c475ae67 224static void ofconn_destroy(struct ofconn *);
064af421
BP
225static void ofconn_run(struct ofconn *, struct ofproto *);
226static void ofconn_wait(struct ofconn *);
c91248b3
BP
227static bool ofconn_receives_async_msgs(const struct ofconn *);
228
064af421
BP
229static void queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn,
230 struct rconn_packet_counter *counter);
231
76ce9432
BP
232static void send_packet_in(struct ofproto *, struct ofpbuf *odp_msg);
233static void do_send_packet_in(struct ofpbuf *odp_msg, void *ofconn);
234
064af421
BP
235struct ofproto {
236 /* Settings. */
237 uint64_t datapath_id; /* Datapath ID. */
238 uint64_t fallback_dpid; /* Datapath ID if no better choice found. */
5a719c38
JP
239 char *mfr_desc; /* Manufacturer. */
240 char *hw_desc; /* Hardware. */
241 char *sw_desc; /* Software version. */
242 char *serial_desc; /* Serial number. */
8abc4ed7 243 char *dp_desc; /* Datapath description. */
064af421
BP
244
245 /* Datapath. */
c228a364 246 struct dpif *dpif;
e9e28be3 247 struct netdev_monitor *netdev_monitor;
064af421
BP
248 struct port_array ports; /* Index is ODP port nr; ofport->opp.port_no is
249 * OFP port nr. */
250 struct shash port_by_name;
251 uint32_t max_ports;
252
253 /* Configuration. */
254 struct switch_status *switch_status;
064af421 255 struct fail_open *fail_open;
064af421 256 struct netflow *netflow;
72b06300 257 struct ofproto_sflow *sflow;
064af421 258
d2ede7bc
BP
259 /* In-band control. */
260 struct in_band *in_band;
261 long long int next_in_band_update;
917e50e1
BP
262 struct sockaddr_in *extra_in_band_remotes;
263 size_t n_extra_remotes;
264
064af421
BP
265 /* Flow table. */
266 struct classifier cls;
267 bool need_revalidate;
268 long long int next_expiration;
269 struct tag_set revalidate_set;
659586ef 270 bool tun_id_from_cookie;
064af421
BP
271
272 /* OpenFlow connections. */
76ce9432
BP
273 struct hmap controllers; /* Controller "struct ofconn"s. */
274 struct list all_conns; /* Contains "struct ofconn"s. */
064af421
BP
275 struct pvconn **listeners;
276 size_t n_listeners;
277 struct pvconn **snoops;
278 size_t n_snoops;
279
280 /* Hooks for ovs-vswitchd. */
281 const struct ofhooks *ofhooks;
282 void *aux;
283
284 /* Used by default ofhooks. */
285 struct mac_learning *ml;
286};
287
288static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
289
290static const struct ofhooks default_ofhooks;
291
fa60c019 292static uint64_t pick_datapath_id(const struct ofproto *);
064af421 293static uint64_t pick_fallback_dpid(void);
76ce9432 294
064af421 295static void update_used(struct ofproto *);
0193b2af
JG
296static void update_stats(struct ofproto *, struct rule *,
297 const struct odp_flow_stats *);
064af421 298static void expire_rule(struct cls_rule *, void *ofproto);
0193b2af 299static void active_timeout(struct ofproto *ofproto, struct rule *rule);
064af421
BP
300static bool revalidate_rule(struct ofproto *p, struct rule *rule);
301static void revalidate_cb(struct cls_rule *rule_, void *p_);
302
303static void handle_odp_msg(struct ofproto *, struct ofpbuf *);
304
305static void handle_openflow(struct ofconn *, struct ofproto *,
306 struct ofpbuf *);
307
72b06300
BP
308static void refresh_port_groups(struct ofproto *);
309
064af421
BP
310static void update_port(struct ofproto *, const char *devname);
311static int init_ports(struct ofproto *);
312static void reinit_ports(struct ofproto *);
313
314int
1a6f1e2a
JG
315ofproto_create(const char *datapath, const char *datapath_type,
316 const struct ofhooks *ofhooks, void *aux,
064af421
BP
317 struct ofproto **ofprotop)
318{
064af421
BP
319 struct odp_stats stats;
320 struct ofproto *p;
c228a364 321 struct dpif *dpif;
064af421
BP
322 int error;
323
324 *ofprotop = NULL;
325
326 /* Connect to datapath and start listening for messages. */
1a6f1e2a 327 error = dpif_open(datapath, datapath_type, &dpif);
064af421
BP
328 if (error) {
329 VLOG_ERR("failed to open datapath %s: %s", datapath, strerror(error));
330 return error;
331 }
c228a364 332 error = dpif_get_dp_stats(dpif, &stats);
064af421
BP
333 if (error) {
334 VLOG_ERR("failed to obtain stats for datapath %s: %s",
335 datapath, strerror(error));
c228a364 336 dpif_close(dpif);
064af421
BP
337 return error;
338 }
72b06300 339 error = dpif_recv_set_mask(dpif, ODPL_MISS | ODPL_ACTION | ODPL_SFLOW);
064af421
BP
340 if (error) {
341 VLOG_ERR("failed to listen on datapath %s: %s",
342 datapath, strerror(error));
c228a364 343 dpif_close(dpif);
064af421
BP
344 return error;
345 }
c228a364 346 dpif_flow_flush(dpif);
8f24562a 347 dpif_recv_purge(dpif);
064af421
BP
348
349 /* Initialize settings. */
ec6fde61 350 p = xzalloc(sizeof *p);
064af421 351 p->fallback_dpid = pick_fallback_dpid();
fa60c019 352 p->datapath_id = p->fallback_dpid;
5a719c38
JP
353 p->mfr_desc = xstrdup(DEFAULT_MFR_DESC);
354 p->hw_desc = xstrdup(DEFAULT_HW_DESC);
355 p->sw_desc = xstrdup(DEFAULT_SW_DESC);
356 p->serial_desc = xstrdup(DEFAULT_SERIAL_DESC);
23ff2821 357 p->dp_desc = xstrdup(DEFAULT_DP_DESC);
064af421
BP
358
359 /* Initialize datapath. */
360 p->dpif = dpif;
8b61709d 361 p->netdev_monitor = netdev_monitor_create();
064af421
BP
362 port_array_init(&p->ports);
363 shash_init(&p->port_by_name);
364 p->max_ports = stats.max_ports;
365
366 /* Initialize submodules. */
367 p->switch_status = switch_status_create(p);
368 p->in_band = NULL;
064af421 369 p->fail_open = NULL;
064af421 370 p->netflow = NULL;
72b06300 371 p->sflow = NULL;
064af421
BP
372
373 /* Initialize flow table. */
374 classifier_init(&p->cls);
375 p->need_revalidate = false;
376 p->next_expiration = time_msec() + 1000;
377 tag_set_init(&p->revalidate_set);
378
379 /* Initialize OpenFlow connections. */
380 list_init(&p->all_conns);
76ce9432 381 hmap_init(&p->controllers);
064af421
BP
382 p->listeners = NULL;
383 p->n_listeners = 0;
384 p->snoops = NULL;
385 p->n_snoops = 0;
386
387 /* Initialize hooks. */
388 if (ofhooks) {
389 p->ofhooks = ofhooks;
390 p->aux = aux;
391 p->ml = NULL;
392 } else {
393 p->ofhooks = &default_ofhooks;
394 p->aux = p;
395 p->ml = mac_learning_create();
396 }
397
fa60c019
BP
398 /* Pick final datapath ID. */
399 p->datapath_id = pick_datapath_id(p);
b123cc3c 400 VLOG_INFO("using datapath ID %016"PRIx64, p->datapath_id);
fa60c019 401
064af421
BP
402 *ofprotop = p;
403 return 0;
404}
405
406void
407ofproto_set_datapath_id(struct ofproto *p, uint64_t datapath_id)
408{
409 uint64_t old_dpid = p->datapath_id;
fa60c019 410 p->datapath_id = datapath_id ? datapath_id : pick_datapath_id(p);
064af421 411 if (p->datapath_id != old_dpid) {
76ce9432
BP
412 struct ofconn *ofconn;
413
b123cc3c 414 VLOG_INFO("datapath ID changed to %016"PRIx64, p->datapath_id);
76ce9432
BP
415
416 /* Force all active connections to reconnect, since there is no way to
417 * notify a controller that the datapath ID has changed. */
418 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
419 rconn_reconnect(ofconn->rconn);
420 }
064af421
BP
421 }
422}
423
76ce9432
BP
424static bool
425is_discovery_controller(const struct ofproto_controller *c)
426{
427 return !strcmp(c->target, "discover");
428}
429
430static bool
431is_in_band_controller(const struct ofproto_controller *c)
432{
433 return is_discovery_controller(c) || c->band == OFPROTO_IN_BAND;
434}
435
436/* Creates a new controller in 'ofproto'. Some of the settings are initially
437 * drawn from 'c', but update_controller() needs to be called later to finish
438 * the new ofconn's configuration. */
439static void
440add_controller(struct ofproto *ofproto, const struct ofproto_controller *c)
441{
442 struct discovery *discovery;
443 struct ofconn *ofconn;
444
445 if (is_discovery_controller(c)) {
446 int error = discovery_create(c->accept_re, c->update_resolv_conf,
447 ofproto->dpif, ofproto->switch_status,
448 &discovery);
449 if (error) {
450 return;
451 }
452 } else {
453 discovery = NULL;
454 }
455
456 ofconn = ofconn_create(ofproto, rconn_create(5, 8), OFCONN_CONTROLLER);
457 ofconn->pktbuf = pktbuf_create();
458 ofconn->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN;
459 if (discovery) {
460 ofconn->discovery = discovery;
461 } else {
462 rconn_connect(ofconn->rconn, c->target);
463 }
464 hmap_insert(&ofproto->controllers, &ofconn->hmap_node,
465 hash_string(c->target, 0));
466}
467
468/* Reconfigures 'ofconn' to match 'c'. This function cannot update an ofconn's
469 * target or turn discovery on or off (these are done by creating new ofconns
470 * and deleting old ones), but it can update the rest of an ofconn's
471 * settings. */
472static void
473update_controller(struct ofconn *ofconn, const struct ofproto_controller *c)
064af421 474{
76ce9432
BP
475 struct ofproto *ofproto = ofconn->ofproto;
476 int probe_interval;
477 int i;
79c9f2ee 478
d2ede7bc
BP
479 ofconn->band = (is_in_band_controller(c)
480 ? OFPROTO_IN_BAND : OFPROTO_OUT_OF_BAND);
481
76ce9432 482 rconn_set_max_backoff(ofconn->rconn, c->max_backoff);
79c9f2ee 483
76ce9432
BP
484 probe_interval = c->probe_interval ? MAX(c->probe_interval, 5) : 0;
485 rconn_set_probe_interval(ofconn->rconn, probe_interval);
79c9f2ee 486
76ce9432
BP
487 if (ofconn->discovery) {
488 discovery_set_update_resolv_conf(ofconn->discovery,
489 c->update_resolv_conf);
490 discovery_set_accept_controller_re(ofconn->discovery, c->accept_re);
491 }
79c9f2ee 492
76ce9432
BP
493 for (i = 0; i < N_SCHEDULERS; i++) {
494 struct pinsched **s = &ofconn->schedulers[i];
79c9f2ee 495
76ce9432
BP
496 if (c->rate_limit > 0) {
497 if (!*s) {
498 *s = pinsched_create(c->rate_limit, c->burst_limit,
499 ofproto->switch_status);
79c9f2ee 500 } else {
76ce9432 501 pinsched_set_limits(*s, c->rate_limit, c->burst_limit);
79c9f2ee 502 }
76ce9432
BP
503 } else {
504 pinsched_destroy(*s);
505 *s = NULL;
79c9f2ee 506 }
76ce9432
BP
507 }
508}
79c9f2ee 509
76ce9432
BP
510static const char *
511ofconn_get_target(const struct ofconn *ofconn)
512{
513 return ofconn->discovery ? "discover" : rconn_get_name(ofconn->rconn);
514}
515
516static struct ofconn *
517find_controller_by_target(struct ofproto *ofproto, const char *target)
518{
519 struct ofconn *ofconn;
520
521 HMAP_FOR_EACH_WITH_HASH (ofconn, struct ofconn, hmap_node,
522 hash_string(target, 0), &ofproto->controllers) {
523 if (!strcmp(ofconn_get_target(ofconn), target)) {
524 return ofconn;
79c9f2ee 525 }
064af421 526 }
76ce9432
BP
527 return NULL;
528}
064af421 529
d2ede7bc
BP
530static void
531update_in_band_remotes(struct ofproto *ofproto)
532{
533 const struct ofconn *ofconn;
534 struct sockaddr_in *addrs;
917e50e1 535 size_t max_addrs, n_addrs;
d2ede7bc 536 bool discovery;
917e50e1 537 size_t i;
d2ede7bc 538
917e50e1
BP
539 /* Allocate enough memory for as many remotes as we could possibly have. */
540 max_addrs = ofproto->n_extra_remotes + hmap_count(&ofproto->controllers);
541 addrs = xmalloc(max_addrs * sizeof *addrs);
d2ede7bc
BP
542 n_addrs = 0;
543
544 /* Add all the remotes. */
545 discovery = false;
546 HMAP_FOR_EACH (ofconn, struct ofconn, hmap_node, &ofproto->controllers) {
547 struct sockaddr_in *sin = &addrs[n_addrs];
548
487ec65f
BP
549 if (ofconn->band == OFPROTO_OUT_OF_BAND) {
550 continue;
551 }
552
d2ede7bc
BP
553 sin->sin_addr.s_addr = rconn_get_remote_ip(ofconn->rconn);
554 if (sin->sin_addr.s_addr) {
555 sin->sin_port = rconn_get_remote_port(ofconn->rconn);
556 n_addrs++;
557 }
558 if (ofconn->discovery) {
559 discovery = true;
560 }
561 }
917e50e1
BP
562 for (i = 0; i < ofproto->n_extra_remotes; i++) {
563 addrs[n_addrs++] = ofproto->extra_in_band_remotes[i];
564 }
d2ede7bc
BP
565
566 /* Create or update or destroy in-band.
567 *
568 * Ordinarily we only enable in-band if there's at least one remote
569 * address, but discovery needs the in-band rules for DHCP to be installed
570 * even before we know any remote addresses. */
571 if (n_addrs || discovery) {
572 if (!ofproto->in_band) {
573 in_band_create(ofproto, ofproto->dpif, ofproto->switch_status,
574 &ofproto->in_band);
575 }
40cae670
BP
576 if (ofproto->in_band) {
577 in_band_set_remotes(ofproto->in_band, addrs, n_addrs);
578 }
d2ede7bc
BP
579 ofproto->next_in_band_update = time_msec() + 1000;
580 } else {
581 in_band_destroy(ofproto->in_band);
582 ofproto->in_band = NULL;
583 }
584
585 /* Clean up. */
586 free(addrs);
587}
588
76ce9432
BP
589void
590ofproto_set_controllers(struct ofproto *p,
591 const struct ofproto_controller *controllers,
592 size_t n_controllers)
593{
594 struct shash new_controllers;
76ce9432
BP
595 enum ofproto_fail_mode fail_mode;
596 struct ofconn *ofconn, *next;
597 bool ss_exists;
76ce9432 598 size_t i;
79c9f2ee 599
76ce9432
BP
600 shash_init(&new_controllers);
601 for (i = 0; i < n_controllers; i++) {
602 const struct ofproto_controller *c = &controllers[i];
603
604 shash_add_once(&new_controllers, c->target, &controllers[i]);
605 if (!find_controller_by_target(p, c->target)) {
606 add_controller(p, c);
607 }
608 }
609
76ce9432
BP
610 fail_mode = OFPROTO_FAIL_STANDALONE;
611 ss_exists = false;
612 HMAP_FOR_EACH_SAFE (ofconn, next, struct ofconn, hmap_node,
613 &p->controllers) {
614 struct ofproto_controller *c;
615
616 c = shash_find_data(&new_controllers, ofconn_get_target(ofconn));
617 if (!c) {
618 ofconn_destroy(ofconn);
79c9f2ee 619 } else {
76ce9432 620 update_controller(ofconn, c);
76ce9432
BP
621 if (ofconn->ss) {
622 ss_exists = true;
623 }
76ce9432
BP
624 if (c->fail == OFPROTO_FAIL_SECURE) {
625 fail_mode = OFPROTO_FAIL_SECURE;
626 }
627 }
628 }
629 shash_destroy(&new_controllers);
630
d2ede7bc 631 update_in_band_remotes(p);
76ce9432
BP
632
633 if (!hmap_is_empty(&p->controllers)
634 && fail_mode == OFPROTO_FAIL_STANDALONE) {
635 struct rconn **rconns;
636 size_t n;
79c9f2ee 637
79c9f2ee 638 if (!p->fail_open) {
76ce9432
BP
639 p->fail_open = fail_open_create(p, p->switch_status);
640 }
641
642 n = 0;
643 rconns = xmalloc(hmap_count(&p->controllers) * sizeof *rconns);
644 HMAP_FOR_EACH (ofconn, struct ofconn, hmap_node, &p->controllers) {
645 rconns[n++] = ofconn->rconn;
79c9f2ee 646 }
76ce9432
BP
647
648 fail_open_set_controllers(p->fail_open, rconns, n);
649 /* p->fail_open takes ownership of 'rconns'. */
79c9f2ee
BP
650 } else {
651 fail_open_destroy(p->fail_open);
652 p->fail_open = NULL;
653 }
654
76ce9432
BP
655 if (!hmap_is_empty(&p->controllers) && !ss_exists) {
656 ofconn = CONTAINER_OF(hmap_first(&p->controllers),
657 struct ofconn, hmap_node);
658 ofconn->ss = switch_status_register(p->switch_status, "remote",
659 rconn_status_cb, ofconn->rconn);
79c9f2ee 660 }
064af421
BP
661}
662
917e50e1
BP
663static bool
664any_extras_changed(const struct ofproto *ofproto,
665 const struct sockaddr_in *extras, size_t n)
666{
667 size_t i;
668
669 if (n != ofproto->n_extra_remotes) {
670 return true;
671 }
672
673 for (i = 0; i < n; i++) {
674 const struct sockaddr_in *old = &ofproto->extra_in_band_remotes[i];
675 const struct sockaddr_in *new = &extras[i];
676
677 if (old->sin_addr.s_addr != new->sin_addr.s_addr ||
678 old->sin_port != new->sin_port) {
679 return true;
680 }
681 }
682
683 return false;
684}
685
686/* Sets the 'n' TCP port addresses in 'extras' as ones to which 'ofproto''s
687 * in-band control should guarantee access, in the same way that in-band
688 * control guarantees access to OpenFlow controllers. */
689void
690ofproto_set_extra_in_band_remotes(struct ofproto *ofproto,
691 const struct sockaddr_in *extras, size_t n)
692{
693 if (!any_extras_changed(ofproto, extras, n)) {
694 return;
695 }
696
697 free(ofproto->extra_in_band_remotes);
698 ofproto->n_extra_remotes = n;
699 ofproto->extra_in_band_remotes = xmemdup(extras, n * sizeof *extras);
700
701 update_in_band_remotes(ofproto);
702}
703
064af421
BP
704void
705ofproto_set_desc(struct ofproto *p,
5a719c38
JP
706 const char *mfr_desc, const char *hw_desc,
707 const char *sw_desc, const char *serial_desc,
8abc4ed7 708 const char *dp_desc)
064af421 709{
5a719c38
JP
710 struct ofp_desc_stats *ods;
711
712 if (mfr_desc) {
713 if (strlen(mfr_desc) >= sizeof ods->mfr_desc) {
714 VLOG_WARN("truncating mfr_desc, must be less than %zu characters",
715 sizeof ods->mfr_desc);
716 }
717 free(p->mfr_desc);
718 p->mfr_desc = xstrdup(mfr_desc);
064af421 719 }
5a719c38
JP
720 if (hw_desc) {
721 if (strlen(hw_desc) >= sizeof ods->hw_desc) {
722 VLOG_WARN("truncating hw_desc, must be less than %zu characters",
723 sizeof ods->hw_desc);
724 }
725 free(p->hw_desc);
726 p->hw_desc = xstrdup(hw_desc);
064af421 727 }
5a719c38
JP
728 if (sw_desc) {
729 if (strlen(sw_desc) >= sizeof ods->sw_desc) {
730 VLOG_WARN("truncating sw_desc, must be less than %zu characters",
731 sizeof ods->sw_desc);
732 }
733 free(p->sw_desc);
734 p->sw_desc = xstrdup(sw_desc);
735 }
736 if (serial_desc) {
737 if (strlen(serial_desc) >= sizeof ods->serial_num) {
738 VLOG_WARN("truncating serial_desc, must be less than %zu "
739 "characters",
740 sizeof ods->serial_num);
741 }
742 free(p->serial_desc);
743 p->serial_desc = xstrdup(serial_desc);
064af421 744 }
8abc4ed7 745 if (dp_desc) {
5a719c38
JP
746 if (strlen(dp_desc) >= sizeof ods->dp_desc) {
747 VLOG_WARN("truncating dp_desc, must be less than %zu characters",
748 sizeof ods->dp_desc);
749 }
8abc4ed7
JP
750 free(p->dp_desc);
751 p->dp_desc = xstrdup(dp_desc);
752 }
064af421
BP
753}
754
064af421
BP
755static int
756set_pvconns(struct pvconn ***pvconnsp, size_t *n_pvconnsp,
757 const struct svec *svec)
758{
759 struct pvconn **pvconns = *pvconnsp;
760 size_t n_pvconns = *n_pvconnsp;
761 int retval = 0;
762 size_t i;
763
764 for (i = 0; i < n_pvconns; i++) {
765 pvconn_close(pvconns[i]);
766 }
767 free(pvconns);
768
769 pvconns = xmalloc(svec->n * sizeof *pvconns);
770 n_pvconns = 0;
771 for (i = 0; i < svec->n; i++) {
772 const char *name = svec->names[i];
773 struct pvconn *pvconn;
774 int error;
775
776 error = pvconn_open(name, &pvconn);
777 if (!error) {
778 pvconns[n_pvconns++] = pvconn;
779 } else {
780 VLOG_ERR("failed to listen on %s: %s", name, strerror(error));
781 if (!retval) {
782 retval = error;
783 }
784 }
785 }
786
787 *pvconnsp = pvconns;
788 *n_pvconnsp = n_pvconns;
789
790 return retval;
791}
792
793int
794ofproto_set_listeners(struct ofproto *ofproto, const struct svec *listeners)
795{
796 return set_pvconns(&ofproto->listeners, &ofproto->n_listeners, listeners);
797}
798
799int
800ofproto_set_snoops(struct ofproto *ofproto, const struct svec *snoops)
801{
802 return set_pvconns(&ofproto->snoops, &ofproto->n_snoops, snoops);
803}
804
805int
0193b2af
JG
806ofproto_set_netflow(struct ofproto *ofproto,
807 const struct netflow_options *nf_options)
064af421 808{
76343538 809 if (nf_options && nf_options->collectors.n) {
064af421
BP
810 if (!ofproto->netflow) {
811 ofproto->netflow = netflow_create();
812 }
0193b2af 813 return netflow_set_options(ofproto->netflow, nf_options);
064af421
BP
814 } else {
815 netflow_destroy(ofproto->netflow);
816 ofproto->netflow = NULL;
817 return 0;
818 }
819}
820
72b06300
BP
821void
822ofproto_set_sflow(struct ofproto *ofproto,
823 const struct ofproto_sflow_options *oso)
824{
825 struct ofproto_sflow *os = ofproto->sflow;
826 if (oso) {
827 if (!os) {
828 struct ofport *ofport;
829 unsigned int odp_port;
830
831 os = ofproto->sflow = ofproto_sflow_create(ofproto->dpif);
832 refresh_port_groups(ofproto);
833 PORT_ARRAY_FOR_EACH (ofport, &ofproto->ports, odp_port) {
834 ofproto_sflow_add_port(os, odp_port,
835 netdev_get_name(ofport->netdev));
836 }
837 }
838 ofproto_sflow_set_options(os, oso);
839 } else {
840 ofproto_sflow_destroy(os);
841 ofproto->sflow = NULL;
842 }
843}
844
064af421 845int
67a4917b 846ofproto_set_stp(struct ofproto *ofproto OVS_UNUSED, bool enable_stp)
064af421
BP
847{
848 /* XXX */
849 if (enable_stp) {
850 VLOG_WARN("STP is not yet implemented");
851 return EINVAL;
852 } else {
853 return 0;
854 }
855}
856
064af421
BP
857uint64_t
858ofproto_get_datapath_id(const struct ofproto *ofproto)
859{
860 return ofproto->datapath_id;
861}
862
76ce9432
BP
863bool
864ofproto_has_controller(const struct ofproto *ofproto)
064af421 865{
76ce9432 866 return !hmap_is_empty(&ofproto->controllers);
064af421
BP
867}
868
869void
870ofproto_get_listeners(const struct ofproto *ofproto, struct svec *listeners)
871{
872 size_t i;
873
874 for (i = 0; i < ofproto->n_listeners; i++) {
875 svec_add(listeners, pvconn_get_name(ofproto->listeners[i]));
876 }
877}
878
879void
880ofproto_get_snoops(const struct ofproto *ofproto, struct svec *snoops)
881{
882 size_t i;
883
884 for (i = 0; i < ofproto->n_snoops; i++) {
885 svec_add(snoops, pvconn_get_name(ofproto->snoops[i]));
886 }
887}
888
889void
890ofproto_destroy(struct ofproto *p)
891{
892 struct ofconn *ofconn, *next_ofconn;
893 struct ofport *ofport;
894 unsigned int port_no;
895 size_t i;
896
897 if (!p) {
898 return;
899 }
900
f7de2cdf 901 /* Destroy fail-open and in-band early, since they touch the classifier. */
79c9f2ee
BP
902 fail_open_destroy(p->fail_open);
903 p->fail_open = NULL;
904
905 in_band_destroy(p->in_band);
906 p->in_band = NULL;
917e50e1 907 free(p->extra_in_band_remotes);
2f6d3445 908
064af421
BP
909 ofproto_flush_flows(p);
910 classifier_destroy(&p->cls);
911
912 LIST_FOR_EACH_SAFE (ofconn, next_ofconn, struct ofconn, node,
913 &p->all_conns) {
c475ae67 914 ofconn_destroy(ofconn);
064af421 915 }
76ce9432 916 hmap_destroy(&p->controllers);
064af421 917
c228a364 918 dpif_close(p->dpif);
e9e28be3 919 netdev_monitor_destroy(p->netdev_monitor);
064af421
BP
920 PORT_ARRAY_FOR_EACH (ofport, &p->ports, port_no) {
921 ofport_free(ofport);
922 }
923 shash_destroy(&p->port_by_name);
924
925 switch_status_destroy(p->switch_status);
064af421 926 netflow_destroy(p->netflow);
72b06300 927 ofproto_sflow_destroy(p->sflow);
064af421 928
064af421
BP
929 for (i = 0; i < p->n_listeners; i++) {
930 pvconn_close(p->listeners[i]);
931 }
932 free(p->listeners);
933
934 for (i = 0; i < p->n_snoops; i++) {
935 pvconn_close(p->snoops[i]);
936 }
937 free(p->snoops);
938
939 mac_learning_destroy(p->ml);
940
5a719c38
JP
941 free(p->mfr_desc);
942 free(p->hw_desc);
943 free(p->sw_desc);
944 free(p->serial_desc);
cb871ae0
JP
945 free(p->dp_desc);
946
3b917492
BP
947 port_array_destroy(&p->ports);
948
064af421
BP
949 free(p);
950}
951
952int
953ofproto_run(struct ofproto *p)
954{
955 int error = ofproto_run1(p);
956 if (!error) {
957 error = ofproto_run2(p, false);
958 }
959 return error;
960}
961
e9e28be3
BP
962static void
963process_port_change(struct ofproto *ofproto, int error, char *devname)
964{
965 if (error == ENOBUFS) {
966 reinit_ports(ofproto);
967 } else if (!error) {
968 update_port(ofproto, devname);
969 free(devname);
970 }
971}
972
e2bfacb6
BP
973/* Returns a "preference level" for snooping 'ofconn'. A higher return value
974 * means that 'ofconn' is more interesting for monitoring than a lower return
975 * value. */
976static int
977snoop_preference(const struct ofconn *ofconn)
978{
979 switch (ofconn->role) {
980 case NX_ROLE_MASTER:
981 return 3;
982 case NX_ROLE_OTHER:
983 return 2;
984 case NX_ROLE_SLAVE:
985 return 1;
986 default:
987 /* Shouldn't happen. */
988 return 0;
989 }
990}
991
76ce9432
BP
992/* One of ofproto's "snoop" pvconns has accepted a new connection on 'vconn'.
993 * Connects this vconn to a controller. */
994static void
995add_snooper(struct ofproto *ofproto, struct vconn *vconn)
996{
e2bfacb6 997 struct ofconn *ofconn, *best;
76ce9432 998
e2bfacb6
BP
999 /* Pick a controller for monitoring. */
1000 best = NULL;
76ce9432 1001 LIST_FOR_EACH (ofconn, struct ofconn, node, &ofproto->all_conns) {
e2bfacb6
BP
1002 if (ofconn->type == OFCONN_CONTROLLER
1003 && (!best || snoop_preference(ofconn) > snoop_preference(best))) {
1004 best = ofconn;
76ce9432 1005 }
e2bfacb6 1006 }
76ce9432 1007
e2bfacb6
BP
1008 if (best) {
1009 rconn_add_monitor(best->rconn, vconn);
1010 } else {
1011 VLOG_INFO_RL(&rl, "no controller connection to snoop");
1012 vconn_close(vconn);
76ce9432 1013 }
76ce9432
BP
1014}
1015
064af421
BP
1016int
1017ofproto_run1(struct ofproto *p)
1018{
1019 struct ofconn *ofconn, *next_ofconn;
1020 char *devname;
1021 int error;
1022 int i;
1023
149f577a
JG
1024 if (shash_is_empty(&p->port_by_name)) {
1025 init_ports(p);
1026 }
1027
064af421
BP
1028 for (i = 0; i < 50; i++) {
1029 struct ofpbuf *buf;
1030 int error;
1031
c228a364 1032 error = dpif_recv(p->dpif, &buf);
064af421
BP
1033 if (error) {
1034 if (error == ENODEV) {
1035 /* Someone destroyed the datapath behind our back. The caller
1036 * better destroy us and give up, because we're just going to
1037 * spin from here on out. */
39a559f2
BP
1038 static struct vlog_rate_limit rl2 = VLOG_RATE_LIMIT_INIT(1, 5);
1039 VLOG_ERR_RL(&rl2, "%s: datapath was destroyed externally",
c228a364 1040 dpif_name(p->dpif));
064af421
BP
1041 return ENODEV;
1042 }
1043 break;
1044 }
1045
1046 handle_odp_msg(p, buf);
1047 }
1048
e9e28be3
BP
1049 while ((error = dpif_port_poll(p->dpif, &devname)) != EAGAIN) {
1050 process_port_change(p, error, devname);
1051 }
1052 while ((error = netdev_monitor_poll(p->netdev_monitor,
1053 &devname)) != EAGAIN) {
1054 process_port_change(p, error, devname);
064af421
BP
1055 }
1056
1057 if (p->in_band) {
d2ede7bc
BP
1058 if (time_msec() >= p->next_in_band_update) {
1059 update_in_band_remotes(p);
1060 }
064af421
BP
1061 in_band_run(p->in_band);
1062 }
064af421
BP
1063
1064 LIST_FOR_EACH_SAFE (ofconn, next_ofconn, struct ofconn, node,
1065 &p->all_conns) {
1066 ofconn_run(ofconn, p);
1067 }
1068
7778bd15
BP
1069 /* Fail-open maintenance. Do this after processing the ofconns since
1070 * fail-open checks the status of the controller rconn. */
1071 if (p->fail_open) {
1072 fail_open_run(p->fail_open);
1073 }
1074
064af421
BP
1075 for (i = 0; i < p->n_listeners; i++) {
1076 struct vconn *vconn;
1077 int retval;
1078
1079 retval = pvconn_accept(p->listeners[i], OFP_VERSION, &vconn);
1080 if (!retval) {
76ce9432
BP
1081 ofconn_create(p, rconn_new_from_vconn("passive", vconn),
1082 OFCONN_TRANSIENT);
064af421
BP
1083 } else if (retval != EAGAIN) {
1084 VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval));
1085 }
1086 }
1087
1088 for (i = 0; i < p->n_snoops; i++) {
1089 struct vconn *vconn;
1090 int retval;
1091
1092 retval = pvconn_accept(p->snoops[i], OFP_VERSION, &vconn);
1093 if (!retval) {
76ce9432 1094 add_snooper(p, vconn);
064af421
BP
1095 } else if (retval != EAGAIN) {
1096 VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval));
1097 }
1098 }
1099
1100 if (time_msec() >= p->next_expiration) {
1101 COVERAGE_INC(ofproto_expiration);
1102 p->next_expiration = time_msec() + 1000;
1103 update_used(p);
1104
1105 classifier_for_each(&p->cls, CLS_INC_ALL, expire_rule, p);
1106
1107 /* Let the hook know that we're at a stable point: all outstanding data
1108 * in existing flows has been accounted to the account_cb. Thus, the
1109 * hook can now reasonably do operations that depend on having accurate
1110 * flow volume accounting (currently, that's just bond rebalancing). */
1111 if (p->ofhooks->account_checkpoint_cb) {
1112 p->ofhooks->account_checkpoint_cb(p->aux);
1113 }
1114 }
1115
1116 if (p->netflow) {
1117 netflow_run(p->netflow);
1118 }
72b06300
BP
1119 if (p->sflow) {
1120 ofproto_sflow_run(p->sflow);
1121 }
064af421
BP
1122
1123 return 0;
1124}
1125
1126struct revalidate_cbdata {
1127 struct ofproto *ofproto;
1128 bool revalidate_all; /* Revalidate all exact-match rules? */
1129 bool revalidate_subrules; /* Revalidate all exact-match subrules? */
1130 struct tag_set revalidate_set; /* Set of tags to revalidate. */
1131};
1132
1133int
1134ofproto_run2(struct ofproto *p, bool revalidate_all)
1135{
1136 if (p->need_revalidate || revalidate_all
1137 || !tag_set_is_empty(&p->revalidate_set)) {
1138 struct revalidate_cbdata cbdata;
1139 cbdata.ofproto = p;
1140 cbdata.revalidate_all = revalidate_all;
1141 cbdata.revalidate_subrules = p->need_revalidate;
1142 cbdata.revalidate_set = p->revalidate_set;
1143 tag_set_init(&p->revalidate_set);
1144 COVERAGE_INC(ofproto_revalidate);
1145 classifier_for_each(&p->cls, CLS_INC_EXACT, revalidate_cb, &cbdata);
1146 p->need_revalidate = false;
1147 }
1148
1149 return 0;
1150}
1151
1152void
1153ofproto_wait(struct ofproto *p)
1154{
1155 struct ofconn *ofconn;
1156 size_t i;
1157
c228a364 1158 dpif_recv_wait(p->dpif);
e9e28be3
BP
1159 dpif_port_poll_wait(p->dpif);
1160 netdev_monitor_poll_wait(p->netdev_monitor);
064af421
BP
1161 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
1162 ofconn_wait(ofconn);
1163 }
1164 if (p->in_band) {
7cf8b266 1165 poll_timer_wait_until(p->next_in_band_update);
064af421
BP
1166 in_band_wait(p->in_band);
1167 }
064af421
BP
1168 if (p->fail_open) {
1169 fail_open_wait(p->fail_open);
1170 }
72b06300
BP
1171 if (p->sflow) {
1172 ofproto_sflow_wait(p->sflow);
1173 }
064af421
BP
1174 if (!tag_set_is_empty(&p->revalidate_set)) {
1175 poll_immediate_wake();
1176 }
1177 if (p->need_revalidate) {
1178 /* Shouldn't happen, but if it does just go around again. */
1179 VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
1180 poll_immediate_wake();
1181 } else if (p->next_expiration != LLONG_MAX) {
7cf8b266 1182 poll_timer_wait_until(p->next_expiration);
064af421
BP
1183 }
1184 for (i = 0; i < p->n_listeners; i++) {
1185 pvconn_wait(p->listeners[i]);
1186 }
1187 for (i = 0; i < p->n_snoops; i++) {
1188 pvconn_wait(p->snoops[i]);
1189 }
1190}
1191
1192void
1193ofproto_revalidate(struct ofproto *ofproto, tag_type tag)
1194{
1195 tag_set_add(&ofproto->revalidate_set, tag);
1196}
1197
1198struct tag_set *
1199ofproto_get_revalidate_set(struct ofproto *ofproto)
1200{
1201 return &ofproto->revalidate_set;
1202}
1203
1204bool
1205ofproto_is_alive(const struct ofproto *p)
1206{
76ce9432 1207 return !hmap_is_empty(&p->controllers);
064af421
BP
1208}
1209
1210int
1211ofproto_send_packet(struct ofproto *p, const flow_t *flow,
1212 const union ofp_action *actions, size_t n_actions,
1213 const struct ofpbuf *packet)
1214{
1215 struct odp_actions odp_actions;
1216 int error;
1217
1218 error = xlate_actions(actions, n_actions, flow, p, packet, &odp_actions,
6a07af36 1219 NULL, NULL, NULL);
064af421
BP
1220 if (error) {
1221 return error;
1222 }
1223
1224 /* XXX Should we translate the dpif_execute() errno value into an OpenFlow
1225 * error code? */
c228a364 1226 dpif_execute(p->dpif, flow->in_port, odp_actions.actions,
064af421
BP
1227 odp_actions.n_actions, packet);
1228 return 0;
1229}
1230
1231void
1232ofproto_add_flow(struct ofproto *p,
1233 const flow_t *flow, uint32_t wildcards, unsigned int priority,
1234 const union ofp_action *actions, size_t n_actions,
1235 int idle_timeout)
1236{
1237 struct rule *rule;
0193b2af 1238 rule = rule_create(p, NULL, actions, n_actions,
ca069229 1239 idle_timeout >= 0 ? idle_timeout : 5 /* XXX */,
39997502 1240 0, 0, false);
659586ef 1241 cls_rule_from_flow(flow, wildcards, priority, &rule->cr);
064af421
BP
1242 rule_insert(p, rule, NULL, 0);
1243}
1244
1245void
1246ofproto_delete_flow(struct ofproto *ofproto, const flow_t *flow,
1247 uint32_t wildcards, unsigned int priority)
1248{
1249 struct rule *rule;
1250
1251 rule = rule_from_cls_rule(classifier_find_rule_exactly(&ofproto->cls,
1252 flow, wildcards,
1253 priority));
1254 if (rule) {
1255 rule_remove(ofproto, rule);
1256 }
1257}
1258
1259static void
1260destroy_rule(struct cls_rule *rule_, void *ofproto_)
1261{
1262 struct rule *rule = rule_from_cls_rule(rule_);
1263 struct ofproto *ofproto = ofproto_;
1264
1265 /* Mark the flow as not installed, even though it might really be
1266 * installed, so that rule_remove() doesn't bother trying to uninstall it.
1267 * There is no point in uninstalling it individually since we are about to
1268 * blow away all the flows with dpif_flow_flush(). */
1269 rule->installed = false;
1270
1271 rule_remove(ofproto, rule);
1272}
1273
1274void
1275ofproto_flush_flows(struct ofproto *ofproto)
1276{
1277 COVERAGE_INC(ofproto_flush);
1278 classifier_for_each(&ofproto->cls, CLS_INC_ALL, destroy_rule, ofproto);
c228a364 1279 dpif_flow_flush(ofproto->dpif);
064af421
BP
1280 if (ofproto->in_band) {
1281 in_band_flushed(ofproto->in_band);
1282 }
1283 if (ofproto->fail_open) {
1284 fail_open_flushed(ofproto->fail_open);
1285 }
1286}
1287\f
1288static void
1289reinit_ports(struct ofproto *p)
1290{
1291 struct svec devnames;
1292 struct ofport *ofport;
1293 unsigned int port_no;
1294 struct odp_port *odp_ports;
1295 size_t n_odp_ports;
1296 size_t i;
1297
1298 svec_init(&devnames);
1299 PORT_ARRAY_FOR_EACH (ofport, &p->ports, port_no) {
1300 svec_add (&devnames, (char *) ofport->opp.name);
1301 }
c228a364 1302 dpif_port_list(p->dpif, &odp_ports, &n_odp_ports);
064af421
BP
1303 for (i = 0; i < n_odp_ports; i++) {
1304 svec_add (&devnames, odp_ports[i].devname);
1305 }
1306 free(odp_ports);
1307
1308 svec_sort_unique(&devnames);
1309 for (i = 0; i < devnames.n; i++) {
1310 update_port(p, devnames.names[i]);
1311 }
1312 svec_destroy(&devnames);
1313}
1314
72b06300 1315static size_t
064af421
BP
1316refresh_port_group(struct ofproto *p, unsigned int group)
1317{
1318 uint16_t *ports;
1319 size_t n_ports;
1320 struct ofport *port;
1321 unsigned int port_no;
1322
1323 assert(group == DP_GROUP_ALL || group == DP_GROUP_FLOOD);
1324
1325 ports = xmalloc(port_array_count(&p->ports) * sizeof *ports);
1326 n_ports = 0;
1327 PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) {
1328 if (group == DP_GROUP_ALL || !(port->opp.config & OFPPC_NO_FLOOD)) {
1329 ports[n_ports++] = port_no;
1330 }
1331 }
c228a364 1332 dpif_port_group_set(p->dpif, group, ports, n_ports);
064af421 1333 free(ports);
72b06300
BP
1334
1335 return n_ports;
064af421
BP
1336}
1337
1338static void
1339refresh_port_groups(struct ofproto *p)
1340{
72b06300
BP
1341 size_t n_flood = refresh_port_group(p, DP_GROUP_FLOOD);
1342 size_t n_all = refresh_port_group(p, DP_GROUP_ALL);
1343 if (p->sflow) {
1344 ofproto_sflow_set_group_sizes(p->sflow, n_flood, n_all);
1345 }
064af421
BP
1346}
1347
1348static struct ofport *
1349make_ofport(const struct odp_port *odp_port)
1350{
149f577a 1351 struct netdev_options netdev_options;
064af421
BP
1352 enum netdev_flags flags;
1353 struct ofport *ofport;
1354 struct netdev *netdev;
1355 bool carrier;
1356 int error;
1357
149f577a
JG
1358 memset(&netdev_options, 0, sizeof netdev_options);
1359 netdev_options.name = odp_port->devname;
1360 netdev_options.ethertype = NETDEV_ETH_TYPE_NONE;
149f577a
JG
1361
1362 error = netdev_open(&netdev_options, &netdev);
064af421
BP
1363 if (error) {
1364 VLOG_WARN_RL(&rl, "ignoring port %s (%"PRIu16") because netdev %s "
1365 "cannot be opened (%s)",
1366 odp_port->devname, odp_port->port,
1367 odp_port->devname, strerror(error));
1368 return NULL;
1369 }
1370
1371 ofport = xmalloc(sizeof *ofport);
1372 ofport->netdev = netdev;
1373 ofport->opp.port_no = odp_port_to_ofp_port(odp_port->port);
80992a35 1374 netdev_get_etheraddr(netdev, ofport->opp.hw_addr);
064af421
BP
1375 memcpy(ofport->opp.name, odp_port->devname,
1376 MIN(sizeof ofport->opp.name, sizeof odp_port->devname));
1377 ofport->opp.name[sizeof ofport->opp.name - 1] = '\0';
1378
1379 netdev_get_flags(netdev, &flags);
1380 ofport->opp.config = flags & NETDEV_UP ? 0 : OFPPC_PORT_DOWN;
1381
1382 netdev_get_carrier(netdev, &carrier);
1383 ofport->opp.state = carrier ? 0 : OFPPS_LINK_DOWN;
1384
1385 netdev_get_features(netdev,
1386 &ofport->opp.curr, &ofport->opp.advertised,
1387 &ofport->opp.supported, &ofport->opp.peer);
1388 return ofport;
1389}
1390
1391static bool
1392ofport_conflicts(const struct ofproto *p, const struct odp_port *odp_port)
1393{
1394 if (port_array_get(&p->ports, odp_port->port)) {
1395 VLOG_WARN_RL(&rl, "ignoring duplicate port %"PRIu16" in datapath",
1396 odp_port->port);
1397 return true;
1398 } else if (shash_find(&p->port_by_name, odp_port->devname)) {
1399 VLOG_WARN_RL(&rl, "ignoring duplicate device %s in datapath",
1400 odp_port->devname);
1401 return true;
1402 } else {
1403 return false;
1404 }
1405}
1406
1407static int
1408ofport_equal(const struct ofport *a_, const struct ofport *b_)
1409{
1410 const struct ofp_phy_port *a = &a_->opp;
1411 const struct ofp_phy_port *b = &b_->opp;
1412
1413 BUILD_ASSERT_DECL(sizeof *a == 48); /* Detect ofp_phy_port changes. */
1414 return (a->port_no == b->port_no
1415 && !memcmp(a->hw_addr, b->hw_addr, sizeof a->hw_addr)
1416 && !strcmp((char *) a->name, (char *) b->name)
1417 && a->state == b->state
1418 && a->config == b->config
1419 && a->curr == b->curr
1420 && a->advertised == b->advertised
1421 && a->supported == b->supported
1422 && a->peer == b->peer);
1423}
1424
1425static void
1426send_port_status(struct ofproto *p, const struct ofport *ofport,
1427 uint8_t reason)
1428{
1429 /* XXX Should limit the number of queued port status change messages. */
1430 struct ofconn *ofconn;
1431 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
1432 struct ofp_port_status *ops;
1433 struct ofpbuf *b;
1434
c91248b3 1435 if (!ofconn_receives_async_msgs(ofconn)) {
9deba63b
BP
1436 continue;
1437 }
1438
064af421
BP
1439 ops = make_openflow_xid(sizeof *ops, OFPT_PORT_STATUS, 0, &b);
1440 ops->reason = reason;
1441 ops->desc = ofport->opp;
1442 hton_ofp_phy_port(&ops->desc);
1443 queue_tx(b, ofconn, NULL);
1444 }
1445 if (p->ofhooks->port_changed_cb) {
1446 p->ofhooks->port_changed_cb(reason, &ofport->opp, p->aux);
1447 }
1448}
1449
1450static void
1451ofport_install(struct ofproto *p, struct ofport *ofport)
1452{
72b06300
BP
1453 uint16_t odp_port = ofp_port_to_odp_port(ofport->opp.port_no);
1454 const char *netdev_name = (const char *) ofport->opp.name;
1455
e9e28be3 1456 netdev_monitor_add(p->netdev_monitor, ofport->netdev);
72b06300
BP
1457 port_array_set(&p->ports, odp_port, ofport);
1458 shash_add(&p->port_by_name, netdev_name, ofport);
1459 if (p->sflow) {
1460 ofproto_sflow_add_port(p->sflow, odp_port, netdev_name);
1461 }
064af421
BP
1462}
1463
1464static void
1465ofport_remove(struct ofproto *p, struct ofport *ofport)
1466{
72b06300
BP
1467 uint16_t odp_port = ofp_port_to_odp_port(ofport->opp.port_no);
1468
e9e28be3 1469 netdev_monitor_remove(p->netdev_monitor, ofport->netdev);
72b06300 1470 port_array_set(&p->ports, odp_port, NULL);
064af421
BP
1471 shash_delete(&p->port_by_name,
1472 shash_find(&p->port_by_name, (char *) ofport->opp.name));
72b06300
BP
1473 if (p->sflow) {
1474 ofproto_sflow_del_port(p->sflow, odp_port);
1475 }
064af421
BP
1476}
1477
1478static void
1479ofport_free(struct ofport *ofport)
1480{
1481 if (ofport) {
1482 netdev_close(ofport->netdev);
1483 free(ofport);
1484 }
1485}
1486
1487static void
1488update_port(struct ofproto *p, const char *devname)
1489{
1490 struct odp_port odp_port;
c874dc6d
BP
1491 struct ofport *old_ofport;
1492 struct ofport *new_ofport;
064af421
BP
1493 int error;
1494
1495 COVERAGE_INC(ofproto_update_port);
c874dc6d
BP
1496
1497 /* Query the datapath for port information. */
c228a364 1498 error = dpif_port_query_by_name(p->dpif, devname, &odp_port);
064af421 1499
c874dc6d
BP
1500 /* Find the old ofport. */
1501 old_ofport = shash_find_data(&p->port_by_name, devname);
1502 if (!error) {
1503 if (!old_ofport) {
1504 /* There's no port named 'devname' but there might be a port with
1505 * the same port number. This could happen if a port is deleted
1506 * and then a new one added in its place very quickly, or if a port
1507 * is renamed. In the former case we want to send an OFPPR_DELETE
1508 * and an OFPPR_ADD, and in the latter case we want to send a
1509 * single OFPPR_MODIFY. We can distinguish the cases by comparing
1510 * the old port's ifindex against the new port, or perhaps less
1511 * reliably but more portably by comparing the old port's MAC
1512 * against the new port's MAC. However, this code isn't that smart
1513 * and always sends an OFPPR_MODIFY (XXX). */
1514 old_ofport = port_array_get(&p->ports, odp_port.port);
064af421 1515 }
c874dc6d 1516 } else if (error != ENOENT && error != ENODEV) {
064af421
BP
1517 VLOG_WARN_RL(&rl, "dpif_port_query_by_name returned unexpected error "
1518 "%s", strerror(error));
1519 return;
1520 }
c874dc6d
BP
1521
1522 /* Create a new ofport. */
1523 new_ofport = !error ? make_ofport(&odp_port) : NULL;
1524
1525 /* Eliminate a few pathological cases. */
1526 if (!old_ofport && !new_ofport) {
1527 return;
1528 } else if (old_ofport && new_ofport) {
1529 /* Most of the 'config' bits are OpenFlow soft state, but
1530 * OFPPC_PORT_DOWN is maintained the kernel. So transfer the OpenFlow
1531 * bits from old_ofport. (make_ofport() only sets OFPPC_PORT_DOWN and
1532 * leaves the other bits 0.) */
1533 new_ofport->opp.config |= old_ofport->opp.config & ~OFPPC_PORT_DOWN;
1534
1535 if (ofport_equal(old_ofport, new_ofport)) {
1536 /* False alarm--no change. */
1537 ofport_free(new_ofport);
1538 return;
1539 }
1540 }
1541
1542 /* Now deal with the normal cases. */
1543 if (old_ofport) {
1544 ofport_remove(p, old_ofport);
1545 }
1546 if (new_ofport) {
1547 ofport_install(p, new_ofport);
1548 }
1549 send_port_status(p, new_ofport ? new_ofport : old_ofport,
1550 (!old_ofport ? OFPPR_ADD
1551 : !new_ofport ? OFPPR_DELETE
1552 : OFPPR_MODIFY));
1553 ofport_free(old_ofport);
1554
1555 /* Update port groups. */
064af421
BP
1556 refresh_port_groups(p);
1557}
1558
1559static int
1560init_ports(struct ofproto *p)
1561{
1562 struct odp_port *ports;
1563 size_t n_ports;
1564 size_t i;
1565 int error;
1566
c228a364 1567 error = dpif_port_list(p->dpif, &ports, &n_ports);
064af421
BP
1568 if (error) {
1569 return error;
1570 }
1571
1572 for (i = 0; i < n_ports; i++) {
1573 const struct odp_port *odp_port = &ports[i];
1574 if (!ofport_conflicts(p, odp_port)) {
1575 struct ofport *ofport = make_ofport(odp_port);
1576 if (ofport) {
1577 ofport_install(p, ofport);
1578 }
1579 }
1580 }
1581 free(ports);
1582 refresh_port_groups(p);
1583 return 0;
1584}
1585\f
1586static struct ofconn *
76ce9432 1587ofconn_create(struct ofproto *p, struct rconn *rconn, enum ofconn_type type)
064af421 1588{
76ce9432
BP
1589 struct ofconn *ofconn = xzalloc(sizeof *ofconn);
1590 ofconn->ofproto = p;
064af421
BP
1591 list_push_back(&p->all_conns, &ofconn->node);
1592 ofconn->rconn = rconn;
76ce9432 1593 ofconn->type = type;
9deba63b 1594 ofconn->role = NX_ROLE_OTHER;
76ce9432 1595 ofconn->packet_in_counter = rconn_packet_counter_create ();
064af421 1596 ofconn->pktbuf = NULL;
064af421 1597 ofconn->miss_send_len = 0;
064af421
BP
1598 ofconn->reply_counter = rconn_packet_counter_create ();
1599 return ofconn;
1600}
1601
1602static void
c475ae67 1603ofconn_destroy(struct ofconn *ofconn)
064af421 1604{
76ce9432
BP
1605 if (ofconn->type == OFCONN_CONTROLLER) {
1606 hmap_remove(&ofconn->ofproto->controllers, &ofconn->hmap_node);
1607 }
1608 discovery_destroy(ofconn->discovery);
1609
064af421 1610 list_remove(&ofconn->node);
76ce9432 1611 switch_status_unregister(ofconn->ss);
064af421
BP
1612 rconn_destroy(ofconn->rconn);
1613 rconn_packet_counter_destroy(ofconn->packet_in_counter);
1614 rconn_packet_counter_destroy(ofconn->reply_counter);
1615 pktbuf_destroy(ofconn->pktbuf);
1616 free(ofconn);
1617}
1618
1619static void
1620ofconn_run(struct ofconn *ofconn, struct ofproto *p)
1621{
1622 int iteration;
76ce9432
BP
1623 size_t i;
1624
1625 if (ofconn->discovery) {
1626 char *controller_name;
1627 if (rconn_is_connectivity_questionable(ofconn->rconn)) {
1628 discovery_question_connectivity(ofconn->discovery);
1629 }
1630 if (discovery_run(ofconn->discovery, &controller_name)) {
1631 if (controller_name) {
1632 rconn_connect(ofconn->rconn, controller_name);
1633 } else {
1634 rconn_disconnect(ofconn->rconn);
1635 }
1636 }
1637 }
1638
1639 for (i = 0; i < N_SCHEDULERS; i++) {
1640 pinsched_run(ofconn->schedulers[i], do_send_packet_in, ofconn);
1641 }
064af421
BP
1642
1643 rconn_run(ofconn->rconn);
1644
1645 if (rconn_packet_counter_read (ofconn->reply_counter) < OFCONN_REPLY_MAX) {
1646 /* Limit the number of iterations to prevent other tasks from
1647 * starving. */
1648 for (iteration = 0; iteration < 50; iteration++) {
1649 struct ofpbuf *of_msg = rconn_recv(ofconn->rconn);
1650 if (!of_msg) {
1651 break;
1652 }
7778bd15
BP
1653 if (p->fail_open) {
1654 fail_open_maybe_recover(p->fail_open);
1655 }
064af421
BP
1656 handle_openflow(ofconn, p, of_msg);
1657 ofpbuf_delete(of_msg);
1658 }
1659 }
1660
76ce9432 1661 if (!ofconn->discovery && !rconn_is_alive(ofconn->rconn)) {
c475ae67 1662 ofconn_destroy(ofconn);
064af421
BP
1663 }
1664}
1665
1666static void
1667ofconn_wait(struct ofconn *ofconn)
1668{
76ce9432
BP
1669 int i;
1670
1671 if (ofconn->discovery) {
1672 discovery_wait(ofconn->discovery);
1673 }
1674 for (i = 0; i < N_SCHEDULERS; i++) {
1675 pinsched_wait(ofconn->schedulers[i]);
1676 }
064af421
BP
1677 rconn_run_wait(ofconn->rconn);
1678 if (rconn_packet_counter_read (ofconn->reply_counter) < OFCONN_REPLY_MAX) {
1679 rconn_recv_wait(ofconn->rconn);
1680 } else {
1681 COVERAGE_INC(ofproto_ofconn_stuck);
1682 }
1683}
c91248b3
BP
1684
1685/* Returns true if 'ofconn' should receive asynchronous messages. */
1686static bool
1687ofconn_receives_async_msgs(const struct ofconn *ofconn)
1688{
1689 if (ofconn->type == OFCONN_CONTROLLER) {
1690 /* Ordinary controllers always get asynchronous messages unless they
1691 * have configured themselves as "slaves". */
1692 return ofconn->role != NX_ROLE_SLAVE;
1693 } else {
1694 /* Transient connections don't get asynchronous messages unless they
1695 * have explicitly asked for them by setting a nonzero miss send
1696 * length. */
1697 return ofconn->miss_send_len > 0;
1698 }
1699}
064af421
BP
1700\f
1701/* Caller is responsible for initializing the 'cr' member of the returned
1702 * rule. */
1703static struct rule *
0193b2af 1704rule_create(struct ofproto *ofproto, struct rule *super,
064af421 1705 const union ofp_action *actions, size_t n_actions,
ca069229 1706 uint16_t idle_timeout, uint16_t hard_timeout,
39997502 1707 uint64_t flow_cookie, bool send_flow_removed)
064af421 1708{
ec6fde61 1709 struct rule *rule = xzalloc(sizeof *rule);
064af421
BP
1710 rule->idle_timeout = idle_timeout;
1711 rule->hard_timeout = hard_timeout;
39997502 1712 rule->flow_cookie = flow_cookie;
064af421 1713 rule->used = rule->created = time_msec();
ca069229 1714 rule->send_flow_removed = send_flow_removed;
064af421
BP
1715 rule->super = super;
1716 if (super) {
1717 list_push_back(&super->list, &rule->list);
1718 } else {
1719 list_init(&rule->list);
1720 }
1721 rule->n_actions = n_actions;
1722 rule->actions = xmemdup(actions, n_actions * sizeof *actions);
0193b2af
JG
1723 netflow_flow_clear(&rule->nf_flow);
1724 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->created);
1725
064af421
BP
1726 return rule;
1727}
1728
1729static struct rule *
1730rule_from_cls_rule(const struct cls_rule *cls_rule)
1731{
1732 return cls_rule ? CONTAINER_OF(cls_rule, struct rule, cr) : NULL;
1733}
1734
1735static void
1736rule_free(struct rule *rule)
1737{
1738 free(rule->actions);
1739 free(rule->odp_actions);
1740 free(rule);
1741}
1742
1743/* Destroys 'rule'. If 'rule' is a subrule, also removes it from its
1744 * super-rule's list of subrules. If 'rule' is a super-rule, also iterates
1745 * through all of its subrules and revalidates them, destroying any that no
1746 * longer has a super-rule (which is probably all of them).
1747 *
1748 * Before calling this function, the caller must make have removed 'rule' from
1749 * the classifier. If 'rule' is an exact-match rule, the caller is also
1750 * responsible for ensuring that it has been uninstalled from the datapath. */
1751static void
1752rule_destroy(struct ofproto *ofproto, struct rule *rule)
1753{
1754 if (!rule->super) {
1755 struct rule *subrule, *next;
1756 LIST_FOR_EACH_SAFE (subrule, next, struct rule, list, &rule->list) {
1757 revalidate_rule(ofproto, subrule);
1758 }
1759 } else {
1760 list_remove(&rule->list);
1761 }
1762 rule_free(rule);
1763}
1764
1765static bool
1766rule_has_out_port(const struct rule *rule, uint16_t out_port)
1767{
1768 const union ofp_action *oa;
1769 struct actions_iterator i;
1770
1771 if (out_port == htons(OFPP_NONE)) {
1772 return true;
1773 }
1774 for (oa = actions_first(&i, rule->actions, rule->n_actions); oa;
1775 oa = actions_next(&i)) {
1776 if (oa->type == htons(OFPAT_OUTPUT) && oa->output.port == out_port) {
1777 return true;
1778 }
1779 }
1780 return false;
1781}
1782
1783/* Executes the actions indicated by 'rule' on 'packet', which is in flow
1784 * 'flow' and is considered to have arrived on ODP port 'in_port'.
1785 *
1786 * The flow that 'packet' actually contains does not need to actually match
1787 * 'rule'; the actions in 'rule' will be applied to it either way. Likewise,
1788 * the packet and byte counters for 'rule' will be credited for the packet sent
1789 * out whether or not the packet actually matches 'rule'.
1790 *
1791 * If 'rule' is an exact-match rule and 'flow' actually equals the rule's flow,
1792 * the caller must already have accurately composed ODP actions for it given
1793 * 'packet' using rule_make_actions(). If 'rule' is a wildcard rule, or if
1794 * 'rule' is an exact-match rule but 'flow' is not the rule's flow, then this
1795 * function will compose a set of ODP actions based on 'rule''s OpenFlow
1796 * actions and apply them to 'packet'. */
1797static void
1798rule_execute(struct ofproto *ofproto, struct rule *rule,
1799 struct ofpbuf *packet, const flow_t *flow)
1800{
1801 const union odp_action *actions;
1802 size_t n_actions;
1803 struct odp_actions a;
1804
1805 /* Grab or compose the ODP actions.
1806 *
1807 * The special case for an exact-match 'rule' where 'flow' is not the
1808 * rule's flow is important to avoid, e.g., sending a packet out its input
1809 * port simply because the ODP actions were composed for the wrong
1810 * scenario. */
1811 if (rule->cr.wc.wildcards || !flow_equal(flow, &rule->cr.flow)) {
1812 struct rule *super = rule->super ? rule->super : rule;
1813 if (xlate_actions(super->actions, super->n_actions, flow, ofproto,
6a07af36 1814 packet, &a, NULL, 0, NULL)) {
064af421
BP
1815 return;
1816 }
1817 actions = a.actions;
1818 n_actions = a.n_actions;
1819 } else {
1820 actions = rule->odp_actions;
1821 n_actions = rule->n_odp_actions;
1822 }
1823
1824 /* Execute the ODP actions. */
c228a364 1825 if (!dpif_execute(ofproto->dpif, flow->in_port,
064af421
BP
1826 actions, n_actions, packet)) {
1827 struct odp_flow_stats stats;
1828 flow_extract_stats(flow, packet, &stats);
0193b2af 1829 update_stats(ofproto, rule, &stats);
064af421 1830 rule->used = time_msec();
0193b2af 1831 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->used);
064af421
BP
1832 }
1833}
1834
1835static void
1836rule_insert(struct ofproto *p, struct rule *rule, struct ofpbuf *packet,
1837 uint16_t in_port)
1838{
1839 struct rule *displaced_rule;
1840
1841 /* Insert the rule in the classifier. */
1842 displaced_rule = rule_from_cls_rule(classifier_insert(&p->cls, &rule->cr));
1843 if (!rule->cr.wc.wildcards) {
1844 rule_make_actions(p, rule, packet);
1845 }
1846
1847 /* Send the packet and credit it to the rule. */
1848 if (packet) {
1849 flow_t flow;
659586ef 1850 flow_extract(packet, 0, in_port, &flow);
064af421
BP
1851 rule_execute(p, rule, packet, &flow);
1852 }
1853
1854 /* Install the rule in the datapath only after sending the packet, to
1855 * avoid packet reordering. */
1856 if (rule->cr.wc.wildcards) {
1857 COVERAGE_INC(ofproto_add_wc_flow);
1858 p->need_revalidate = true;
1859 } else {
1860 rule_install(p, rule, displaced_rule);
1861 }
1862
1863 /* Free the rule that was displaced, if any. */
1864 if (displaced_rule) {
1865 rule_destroy(p, displaced_rule);
1866 }
1867}
1868
1869static struct rule *
1870rule_create_subrule(struct ofproto *ofproto, struct rule *rule,
1871 const flow_t *flow)
1872{
0193b2af 1873 struct rule *subrule = rule_create(ofproto, rule, NULL, 0,
ca069229 1874 rule->idle_timeout, rule->hard_timeout,
39997502 1875 0, false);
064af421 1876 COVERAGE_INC(ofproto_subrule_create);
659586ef
JG
1877 cls_rule_from_flow(flow, 0, (rule->cr.priority <= UINT16_MAX ? UINT16_MAX
1878 : rule->cr.priority), &subrule->cr);
064af421
BP
1879 classifier_insert_exact(&ofproto->cls, &subrule->cr);
1880
1881 return subrule;
1882}
1883
1884static void
1885rule_remove(struct ofproto *ofproto, struct rule *rule)
1886{
1887 if (rule->cr.wc.wildcards) {
1888 COVERAGE_INC(ofproto_del_wc_flow);
1889 ofproto->need_revalidate = true;
1890 } else {
1891 rule_uninstall(ofproto, rule);
1892 }
1893 classifier_remove(&ofproto->cls, &rule->cr);
1894 rule_destroy(ofproto, rule);
1895}
1896
1897/* Returns true if the actions changed, false otherwise. */
1898static bool
1899rule_make_actions(struct ofproto *p, struct rule *rule,
1900 const struct ofpbuf *packet)
1901{
1902 const struct rule *super;
1903 struct odp_actions a;
1904 size_t actions_len;
1905
1906 assert(!rule->cr.wc.wildcards);
1907
1908 super = rule->super ? rule->super : rule;
1909 rule->tags = 0;
1910 xlate_actions(super->actions, super->n_actions, &rule->cr.flow, p,
6a07af36 1911 packet, &a, &rule->tags, &rule->may_install,
0193b2af 1912 &rule->nf_flow.output_iface);
064af421
BP
1913
1914 actions_len = a.n_actions * sizeof *a.actions;
1915 if (rule->n_odp_actions != a.n_actions
1916 || memcmp(rule->odp_actions, a.actions, actions_len)) {
1917 COVERAGE_INC(ofproto_odp_unchanged);
1918 free(rule->odp_actions);
1919 rule->n_odp_actions = a.n_actions;
1920 rule->odp_actions = xmemdup(a.actions, actions_len);
1921 return true;
1922 } else {
1923 return false;
1924 }
1925}
1926
1927static int
1928do_put_flow(struct ofproto *ofproto, struct rule *rule, int flags,
1929 struct odp_flow_put *put)
1930{
1931 memset(&put->flow.stats, 0, sizeof put->flow.stats);
1932 put->flow.key = rule->cr.flow;
1933 put->flow.actions = rule->odp_actions;
1934 put->flow.n_actions = rule->n_odp_actions;
ab48643b 1935 put->flow.flags = 0;
064af421 1936 put->flags = flags;
c228a364 1937 return dpif_flow_put(ofproto->dpif, put);
064af421
BP
1938}
1939
1940static void
1941rule_install(struct ofproto *p, struct rule *rule, struct rule *displaced_rule)
1942{
1943 assert(!rule->cr.wc.wildcards);
1944
1945 if (rule->may_install) {
1946 struct odp_flow_put put;
1947 if (!do_put_flow(p, rule,
1948 ODPPF_CREATE | ODPPF_MODIFY | ODPPF_ZERO_STATS,
1949 &put)) {
1950 rule->installed = true;
1951 if (displaced_rule) {
14986b31 1952 update_stats(p, displaced_rule, &put.flow.stats);
064af421
BP
1953 rule_post_uninstall(p, displaced_rule);
1954 }
1955 }
1956 } else if (displaced_rule) {
1957 rule_uninstall(p, displaced_rule);
1958 }
1959}
1960
1961static void
1962rule_reinstall(struct ofproto *ofproto, struct rule *rule)
1963{
1964 if (rule->installed) {
1965 struct odp_flow_put put;
1966 COVERAGE_INC(ofproto_dp_missed);
1967 do_put_flow(ofproto, rule, ODPPF_CREATE | ODPPF_MODIFY, &put);
1968 } else {
1969 rule_install(ofproto, rule, NULL);
1970 }
1971}
1972
1973static void
1974rule_update_actions(struct ofproto *ofproto, struct rule *rule)
1975{
42c3641c
JG
1976 bool actions_changed;
1977 uint16_t new_out_iface, old_out_iface;
1978
1979 old_out_iface = rule->nf_flow.output_iface;
1980 actions_changed = rule_make_actions(ofproto, rule, NULL);
1981
064af421
BP
1982 if (rule->may_install) {
1983 if (rule->installed) {
1984 if (actions_changed) {
064af421 1985 struct odp_flow_put put;
42c3641c
JG
1986 do_put_flow(ofproto, rule, ODPPF_CREATE | ODPPF_MODIFY
1987 | ODPPF_ZERO_STATS, &put);
1988 update_stats(ofproto, rule, &put.flow.stats);
1989
1990 /* Temporarily set the old output iface so that NetFlow
1991 * messages have the correct output interface for the old
1992 * stats. */
1993 new_out_iface = rule->nf_flow.output_iface;
1994 rule->nf_flow.output_iface = old_out_iface;
1995 rule_post_uninstall(ofproto, rule);
1996 rule->nf_flow.output_iface = new_out_iface;
064af421
BP
1997 }
1998 } else {
1999 rule_install(ofproto, rule, NULL);
2000 }
2001 } else {
2002 rule_uninstall(ofproto, rule);
2003 }
2004}
2005
2006static void
2007rule_account(struct ofproto *ofproto, struct rule *rule, uint64_t extra_bytes)
2008{
2009 uint64_t total_bytes = rule->byte_count + extra_bytes;
2010
2011 if (ofproto->ofhooks->account_flow_cb
2012 && total_bytes > rule->accounted_bytes)
2013 {
2014 ofproto->ofhooks->account_flow_cb(
2015 &rule->cr.flow, rule->odp_actions, rule->n_odp_actions,
2016 total_bytes - rule->accounted_bytes, ofproto->aux);
2017 rule->accounted_bytes = total_bytes;
2018 }
2019}
2020
2021static void
2022rule_uninstall(struct ofproto *p, struct rule *rule)
2023{
2024 assert(!rule->cr.wc.wildcards);
2025 if (rule->installed) {
2026 struct odp_flow odp_flow;
2027
2028 odp_flow.key = rule->cr.flow;
2029 odp_flow.actions = NULL;
2030 odp_flow.n_actions = 0;
ab48643b 2031 odp_flow.flags = 0;
c228a364 2032 if (!dpif_flow_del(p->dpif, &odp_flow)) {
0193b2af 2033 update_stats(p, rule, &odp_flow.stats);
064af421
BP
2034 }
2035 rule->installed = false;
2036
2037 rule_post_uninstall(p, rule);
2038 }
2039}
2040
0193b2af
JG
2041static bool
2042is_controller_rule(struct rule *rule)
2043{
2044 /* If the only action is send to the controller then don't report
2045 * NetFlow expiration messages since it is just part of the control
2046 * logic for the network and not real traffic. */
2047
2048 if (rule && rule->super) {
2049 struct rule *super = rule->super;
2050
2051 return super->n_actions == 1 &&
2052 super->actions[0].type == htons(OFPAT_OUTPUT) &&
2053 super->actions[0].output.port == htons(OFPP_CONTROLLER);
2054 }
2055
2056 return false;
2057}
2058
064af421
BP
2059static void
2060rule_post_uninstall(struct ofproto *ofproto, struct rule *rule)
2061{
2062 struct rule *super = rule->super;
2063
2064 rule_account(ofproto, rule, 0);
6a07af36 2065
0193b2af 2066 if (ofproto->netflow && !is_controller_rule(rule)) {
064af421
BP
2067 struct ofexpired expired;
2068 expired.flow = rule->cr.flow;
2069 expired.packet_count = rule->packet_count;
2070 expired.byte_count = rule->byte_count;
2071 expired.used = rule->used;
0193b2af 2072 netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
064af421
BP
2073 }
2074 if (super) {
2075 super->packet_count += rule->packet_count;
2076 super->byte_count += rule->byte_count;
064af421 2077
0c0afbec
JG
2078 /* Reset counters to prevent double counting if the rule ever gets
2079 * reinstalled. */
2080 rule->packet_count = 0;
2081 rule->byte_count = 0;
2082 rule->accounted_bytes = 0;
0193b2af
JG
2083
2084 netflow_flow_clear(&rule->nf_flow);
0c0afbec 2085 }
064af421
BP
2086}
2087\f
2088static void
2089queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn,
2090 struct rconn_packet_counter *counter)
2091{
2092 update_openflow_length(msg);
2093 if (rconn_send(ofconn->rconn, msg, counter)) {
2094 ofpbuf_delete(msg);
2095 }
2096}
2097
2098static void
2099send_error(const struct ofconn *ofconn, const struct ofp_header *oh,
2100 int error, const void *data, size_t len)
2101{
2102 struct ofpbuf *buf;
2103 struct ofp_error_msg *oem;
2104
2105 if (!(error >> 16)) {
2106 VLOG_WARN_RL(&rl, "not sending bad error code %d to controller",
2107 error);
2108 return;
2109 }
2110
2111 COVERAGE_INC(ofproto_error);
2112 oem = make_openflow_xid(len + sizeof *oem, OFPT_ERROR,
2113 oh ? oh->xid : 0, &buf);
2114 oem->type = htons((unsigned int) error >> 16);
2115 oem->code = htons(error & 0xffff);
2116 memcpy(oem->data, data, len);
2117 queue_tx(buf, ofconn, ofconn->reply_counter);
2118}
2119
2120static void
2121send_error_oh(const struct ofconn *ofconn, const struct ofp_header *oh,
2122 int error)
2123{
2124 size_t oh_length = ntohs(oh->length);
2125 send_error(ofconn, oh, error, oh, MIN(oh_length, 64));
2126}
2127
2128static void
2129hton_ofp_phy_port(struct ofp_phy_port *opp)
2130{
2131 opp->port_no = htons(opp->port_no);
2132 opp->config = htonl(opp->config);
2133 opp->state = htonl(opp->state);
2134 opp->curr = htonl(opp->curr);
2135 opp->advertised = htonl(opp->advertised);
2136 opp->supported = htonl(opp->supported);
2137 opp->peer = htonl(opp->peer);
2138}
2139
2140static int
2141handle_echo_request(struct ofconn *ofconn, struct ofp_header *oh)
2142{
2143 struct ofp_header *rq = oh;
2144 queue_tx(make_echo_reply(rq), ofconn, ofconn->reply_counter);
2145 return 0;
2146}
2147
2148static int
2149handle_features_request(struct ofproto *p, struct ofconn *ofconn,
2150 struct ofp_header *oh)
2151{
2152 struct ofp_switch_features *osf;
2153 struct ofpbuf *buf;
2154 unsigned int port_no;
2155 struct ofport *port;
2156
2157 osf = make_openflow_xid(sizeof *osf, OFPT_FEATURES_REPLY, oh->xid, &buf);
2158 osf->datapath_id = htonll(p->datapath_id);
2159 osf->n_buffers = htonl(pktbuf_capacity());
2160 osf->n_tables = 2;
2161 osf->capabilities = htonl(OFPC_FLOW_STATS | OFPC_TABLE_STATS |
0254ae23 2162 OFPC_PORT_STATS | OFPC_ARP_MATCH_IP);
064af421
BP
2163 osf->actions = htonl((1u << OFPAT_OUTPUT) |
2164 (1u << OFPAT_SET_VLAN_VID) |
2165 (1u << OFPAT_SET_VLAN_PCP) |
2166 (1u << OFPAT_STRIP_VLAN) |
2167 (1u << OFPAT_SET_DL_SRC) |
2168 (1u << OFPAT_SET_DL_DST) |
2169 (1u << OFPAT_SET_NW_SRC) |
2170 (1u << OFPAT_SET_NW_DST) |
959a2ecd 2171 (1u << OFPAT_SET_NW_TOS) |
064af421
BP
2172 (1u << OFPAT_SET_TP_SRC) |
2173 (1u << OFPAT_SET_TP_DST));
2174
2175 PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) {
2176 hton_ofp_phy_port(ofpbuf_put(buf, &port->opp, sizeof port->opp));
2177 }
2178
2179 queue_tx(buf, ofconn, ofconn->reply_counter);
2180 return 0;
2181}
2182
2183static int
2184handle_get_config_request(struct ofproto *p, struct ofconn *ofconn,
2185 struct ofp_header *oh)
2186{
2187 struct ofpbuf *buf;
2188 struct ofp_switch_config *osc;
2189 uint16_t flags;
2190 bool drop_frags;
2191
2192 /* Figure out flags. */
c228a364 2193 dpif_get_drop_frags(p->dpif, &drop_frags);
064af421 2194 flags = drop_frags ? OFPC_FRAG_DROP : OFPC_FRAG_NORMAL;
064af421
BP
2195
2196 /* Send reply. */
2197 osc = make_openflow_xid(sizeof *osc, OFPT_GET_CONFIG_REPLY, oh->xid, &buf);
2198 osc->flags = htons(flags);
2199 osc->miss_send_len = htons(ofconn->miss_send_len);
2200 queue_tx(buf, ofconn, ofconn->reply_counter);
2201
2202 return 0;
2203}
2204
2205static int
2206handle_set_config(struct ofproto *p, struct ofconn *ofconn,
2207 struct ofp_switch_config *osc)
2208{
2209 uint16_t flags;
2210 int error;
2211
2212 error = check_ofp_message(&osc->header, OFPT_SET_CONFIG, sizeof *osc);
2213 if (error) {
2214 return error;
2215 }
2216 flags = ntohs(osc->flags);
2217
9deba63b 2218 if (ofconn->type == OFCONN_CONTROLLER && ofconn->role != NX_ROLE_SLAVE) {
064af421
BP
2219 switch (flags & OFPC_FRAG_MASK) {
2220 case OFPC_FRAG_NORMAL:
c228a364 2221 dpif_set_drop_frags(p->dpif, false);
064af421
BP
2222 break;
2223 case OFPC_FRAG_DROP:
c228a364 2224 dpif_set_drop_frags(p->dpif, true);
064af421
BP
2225 break;
2226 default:
2227 VLOG_WARN_RL(&rl, "requested bad fragment mode (flags=%"PRIx16")",
2228 osc->flags);
2229 break;
2230 }
2231 }
2232
064af421
BP
2233 ofconn->miss_send_len = ntohs(osc->miss_send_len);
2234
2235 return 0;
2236}
2237
2238static void
6a07af36
JG
2239add_output_group_action(struct odp_actions *actions, uint16_t group,
2240 uint16_t *nf_output_iface)
064af421
BP
2241{
2242 odp_actions_add(actions, ODPAT_OUTPUT_GROUP)->output_group.group = group;
6a07af36
JG
2243
2244 if (group == DP_GROUP_ALL || group == DP_GROUP_FLOOD) {
2245 *nf_output_iface = NF_OUT_FLOOD;
2246 }
064af421
BP
2247}
2248
2249static void
2250add_controller_action(struct odp_actions *actions,
2251 const struct ofp_action_output *oao)
2252{
2253 union odp_action *a = odp_actions_add(actions, ODPAT_CONTROLLER);
30ea5d93 2254 a->controller.arg = ntohs(oao->max_len);
064af421
BP
2255}
2256
2257struct action_xlate_ctx {
2258 /* Input. */
e18fe8a2 2259 flow_t flow; /* Flow to which these actions correspond. */
064af421
BP
2260 int recurse; /* Recursion level, via xlate_table_action. */
2261 struct ofproto *ofproto;
2262 const struct ofpbuf *packet; /* The packet corresponding to 'flow', or a
2263 * null pointer if we are revalidating
2264 * without a packet to refer to. */
2265
2266 /* Output. */
2267 struct odp_actions *out; /* Datapath actions. */
2268 tag_type *tags; /* Tags associated with OFPP_NORMAL actions. */
d6fbec6d 2269 bool may_set_up_flow; /* True ordinarily; false if the actions must
064af421 2270 * be reassessed for every packet. */
6a07af36 2271 uint16_t nf_output_iface; /* Output interface index for NetFlow. */
064af421
BP
2272};
2273
2274static void do_xlate_actions(const union ofp_action *in, size_t n_in,
2275 struct action_xlate_ctx *ctx);
2276
2277static void
2278add_output_action(struct action_xlate_ctx *ctx, uint16_t port)
2279{
2280 const struct ofport *ofport = port_array_get(&ctx->ofproto->ports, port);
6cfaf517
BP
2281
2282 if (ofport) {
2283 if (ofport->opp.config & OFPPC_NO_FWD) {
2284 /* Forwarding disabled on port. */
2285 return;
2286 }
2287 } else {
2288 /*
2289 * We don't have an ofport record for this port, but it doesn't hurt to
2290 * allow forwarding to it anyhow. Maybe such a port will appear later
2291 * and we're pre-populating the flow table.
2292 */
064af421 2293 }
6cfaf517
BP
2294
2295 odp_actions_add(ctx->out, ODPAT_OUTPUT)->output.port = port;
6a07af36 2296 ctx->nf_output_iface = port;
064af421
BP
2297}
2298
2299static struct rule *
2300lookup_valid_rule(struct ofproto *ofproto, const flow_t *flow)
2301{
2302 struct rule *rule;
2303 rule = rule_from_cls_rule(classifier_lookup(&ofproto->cls, flow));
2304
2305 /* The rule we found might not be valid, since we could be in need of
2306 * revalidation. If it is not valid, don't return it. */
2307 if (rule
2308 && rule->super
2309 && ofproto->need_revalidate
2310 && !revalidate_rule(ofproto, rule)) {
2311 COVERAGE_INC(ofproto_invalidated);
2312 return NULL;
2313 }
2314
2315 return rule;
2316}
2317
2318static void
2319xlate_table_action(struct action_xlate_ctx *ctx, uint16_t in_port)
2320{
2321 if (!ctx->recurse) {
2c5d1389 2322 uint16_t old_in_port;
064af421 2323 struct rule *rule;
064af421 2324
2c5d1389
BP
2325 /* Look up a flow with 'in_port' as the input port. Then restore the
2326 * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
2327 * have surprising behavior). */
2328 old_in_port = ctx->flow.in_port;
e18fe8a2
BP
2329 ctx->flow.in_port = in_port;
2330 rule = lookup_valid_rule(ctx->ofproto, &ctx->flow);
2c5d1389
BP
2331 ctx->flow.in_port = old_in_port;
2332
064af421
BP
2333 if (rule) {
2334 if (rule->super) {
2335 rule = rule->super;
2336 }
2337
2338 ctx->recurse++;
2339 do_xlate_actions(rule->actions, rule->n_actions, ctx);
2340 ctx->recurse--;
2341 }
2342 }
2343}
2344
2345static void
2346xlate_output_action(struct action_xlate_ctx *ctx,
2347 const struct ofp_action_output *oao)
2348{
2349 uint16_t odp_port;
6a07af36
JG
2350 uint16_t prev_nf_output_iface = ctx->nf_output_iface;
2351
2352 ctx->nf_output_iface = NF_OUT_DROP;
064af421
BP
2353
2354 switch (ntohs(oao->port)) {
2355 case OFPP_IN_PORT:
e18fe8a2 2356 add_output_action(ctx, ctx->flow.in_port);
064af421
BP
2357 break;
2358 case OFPP_TABLE:
e18fe8a2 2359 xlate_table_action(ctx, ctx->flow.in_port);
064af421
BP
2360 break;
2361 case OFPP_NORMAL:
e18fe8a2 2362 if (!ctx->ofproto->ofhooks->normal_cb(&ctx->flow, ctx->packet,
064af421 2363 ctx->out, ctx->tags,
6a07af36 2364 &ctx->nf_output_iface,
064af421
BP
2365 ctx->ofproto->aux)) {
2366 COVERAGE_INC(ofproto_uninstallable);
d6fbec6d 2367 ctx->may_set_up_flow = false;
064af421
BP
2368 }
2369 break;
2370 case OFPP_FLOOD:
6a07af36
JG
2371 add_output_group_action(ctx->out, DP_GROUP_FLOOD,
2372 &ctx->nf_output_iface);
064af421
BP
2373 break;
2374 case OFPP_ALL:
6a07af36 2375 add_output_group_action(ctx->out, DP_GROUP_ALL, &ctx->nf_output_iface);
064af421
BP
2376 break;
2377 case OFPP_CONTROLLER:
2378 add_controller_action(ctx->out, oao);
2379 break;
2380 case OFPP_LOCAL:
2381 add_output_action(ctx, ODPP_LOCAL);
2382 break;
2383 default:
2384 odp_port = ofp_port_to_odp_port(ntohs(oao->port));
e18fe8a2 2385 if (odp_port != ctx->flow.in_port) {
064af421
BP
2386 add_output_action(ctx, odp_port);
2387 }
2388 break;
2389 }
6a07af36
JG
2390
2391 if (prev_nf_output_iface == NF_OUT_FLOOD) {
2392 ctx->nf_output_iface = NF_OUT_FLOOD;
2393 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
2394 ctx->nf_output_iface = prev_nf_output_iface;
2395 } else if (prev_nf_output_iface != NF_OUT_DROP &&
2396 ctx->nf_output_iface != NF_OUT_FLOOD) {
2397 ctx->nf_output_iface = NF_OUT_MULTI;
2398 }
064af421
BP
2399}
2400
2401static void
2402xlate_nicira_action(struct action_xlate_ctx *ctx,
2403 const struct nx_action_header *nah)
2404{
2405 const struct nx_action_resubmit *nar;
659586ef
JG
2406 const struct nx_action_set_tunnel *nast;
2407 union odp_action *oa;
064af421
BP
2408 int subtype = ntohs(nah->subtype);
2409
2410 assert(nah->vendor == htonl(NX_VENDOR_ID));
2411 switch (subtype) {
2412 case NXAST_RESUBMIT:
2413 nar = (const struct nx_action_resubmit *) nah;
2414 xlate_table_action(ctx, ofp_port_to_odp_port(ntohs(nar->in_port)));
2415 break;
2416
659586ef
JG
2417 case NXAST_SET_TUNNEL:
2418 nast = (const struct nx_action_set_tunnel *) nah;
2419 oa = odp_actions_add(ctx->out, ODPAT_SET_TUNNEL);
2420 ctx->flow.tun_id = oa->tunnel.tun_id = nast->tun_id;
2421 break;
2422
999f0d45
BP
2423 /* If you add a new action here that modifies flow data, don't forget to
2424 * update the flow key in ctx->flow in the same key. */
2425
064af421
BP
2426 default:
2427 VLOG_DBG_RL(&rl, "unknown Nicira action type %"PRIu16, subtype);
2428 break;
2429 }
2430}
2431
2432static void
2433do_xlate_actions(const union ofp_action *in, size_t n_in,
2434 struct action_xlate_ctx *ctx)
2435{
2436 struct actions_iterator iter;
2437 const union ofp_action *ia;
2438 const struct ofport *port;
2439
e18fe8a2 2440 port = port_array_get(&ctx->ofproto->ports, ctx->flow.in_port);
064af421 2441 if (port && port->opp.config & (OFPPC_NO_RECV | OFPPC_NO_RECV_STP) &&
e18fe8a2 2442 port->opp.config & (eth_addr_equals(ctx->flow.dl_dst, stp_eth_addr)
064af421
BP
2443 ? OFPPC_NO_RECV_STP : OFPPC_NO_RECV)) {
2444 /* Drop this flow. */
2445 return;
2446 }
2447
2448 for (ia = actions_first(&iter, in, n_in); ia; ia = actions_next(&iter)) {
2449 uint16_t type = ntohs(ia->type);
2450 union odp_action *oa;
2451
2452 switch (type) {
2453 case OFPAT_OUTPUT:
2454 xlate_output_action(ctx, &ia->output);
2455 break;
2456
2457 case OFPAT_SET_VLAN_VID:
2458 oa = odp_actions_add(ctx->out, ODPAT_SET_VLAN_VID);
999f0d45 2459 ctx->flow.dl_vlan = oa->vlan_vid.vlan_vid = ia->vlan_vid.vlan_vid;
064af421
BP
2460 break;
2461
2462 case OFPAT_SET_VLAN_PCP:
2463 oa = odp_actions_add(ctx->out, ODPAT_SET_VLAN_PCP);
999f0d45 2464 ctx->flow.dl_vlan_pcp = oa->vlan_pcp.vlan_pcp = ia->vlan_pcp.vlan_pcp;
064af421
BP
2465 break;
2466
2467 case OFPAT_STRIP_VLAN:
2468 odp_actions_add(ctx->out, ODPAT_STRIP_VLAN);
999f0d45
BP
2469 ctx->flow.dl_vlan = OFP_VLAN_NONE;
2470 ctx->flow.dl_vlan_pcp = 0;
064af421
BP
2471 break;
2472
2473 case OFPAT_SET_DL_SRC:
2474 oa = odp_actions_add(ctx->out, ODPAT_SET_DL_SRC);
2475 memcpy(oa->dl_addr.dl_addr,
2476 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
999f0d45
BP
2477 memcpy(ctx->flow.dl_src,
2478 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
064af421
BP
2479 break;
2480
2481 case OFPAT_SET_DL_DST:
2482 oa = odp_actions_add(ctx->out, ODPAT_SET_DL_DST);
2483 memcpy(oa->dl_addr.dl_addr,
2484 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
999f0d45
BP
2485 memcpy(ctx->flow.dl_dst,
2486 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
064af421
BP
2487 break;
2488
2489 case OFPAT_SET_NW_SRC:
2490 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_SRC);
999f0d45 2491 ctx->flow.nw_src = oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
064af421
BP
2492 break;
2493
2d70a31a
JP
2494 case OFPAT_SET_NW_DST:
2495 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_DST);
999f0d45 2496 ctx->flow.nw_dst = oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
2d38e234 2497 break;
959a2ecd
JP
2498
2499 case OFPAT_SET_NW_TOS:
2500 oa = odp_actions_add(ctx->out, ODPAT_SET_NW_TOS);
999f0d45 2501 ctx->flow.nw_tos = oa->nw_tos.nw_tos = ia->nw_tos.nw_tos;
2d70a31a
JP
2502 break;
2503
064af421
BP
2504 case OFPAT_SET_TP_SRC:
2505 oa = odp_actions_add(ctx->out, ODPAT_SET_TP_SRC);
999f0d45 2506 ctx->flow.tp_src = oa->tp_port.tp_port = ia->tp_port.tp_port;
064af421
BP
2507 break;
2508
2d70a31a
JP
2509 case OFPAT_SET_TP_DST:
2510 oa = odp_actions_add(ctx->out, ODPAT_SET_TP_DST);
999f0d45 2511 ctx->flow.tp_dst = oa->tp_port.tp_port = ia->tp_port.tp_port;
2d70a31a
JP
2512 break;
2513
064af421
BP
2514 case OFPAT_VENDOR:
2515 xlate_nicira_action(ctx, (const struct nx_action_header *) ia);
2516 break;
2517
2518 default:
2519 VLOG_DBG_RL(&rl, "unknown action type %"PRIu16, type);
2520 break;
2521 }
2522 }
2523}
2524
2525static int
2526xlate_actions(const union ofp_action *in, size_t n_in,
2527 const flow_t *flow, struct ofproto *ofproto,
2528 const struct ofpbuf *packet,
6a07af36
JG
2529 struct odp_actions *out, tag_type *tags, bool *may_set_up_flow,
2530 uint16_t *nf_output_iface)
064af421
BP
2531{
2532 tag_type no_tags = 0;
2533 struct action_xlate_ctx ctx;
2534 COVERAGE_INC(ofproto_ofp2odp);
2535 odp_actions_init(out);
e18fe8a2 2536 ctx.flow = *flow;
064af421
BP
2537 ctx.recurse = 0;
2538 ctx.ofproto = ofproto;
2539 ctx.packet = packet;
2540 ctx.out = out;
2541 ctx.tags = tags ? tags : &no_tags;
d6fbec6d 2542 ctx.may_set_up_flow = true;
6a07af36 2543 ctx.nf_output_iface = NF_OUT_DROP;
064af421 2544 do_xlate_actions(in, n_in, &ctx);
0ad9b732 2545
d6fbec6d 2546 /* Check with in-band control to see if we're allowed to set up this
0ad9b732
JP
2547 * flow. */
2548 if (!in_band_rule_check(ofproto->in_band, flow, out)) {
d6fbec6d 2549 ctx.may_set_up_flow = false;
0ad9b732
JP
2550 }
2551
d6fbec6d
BP
2552 if (may_set_up_flow) {
2553 *may_set_up_flow = ctx.may_set_up_flow;
064af421 2554 }
6a07af36
JG
2555 if (nf_output_iface) {
2556 *nf_output_iface = ctx.nf_output_iface;
064af421
BP
2557 }
2558 if (odp_actions_overflow(out)) {
2559 odp_actions_init(out);
2560 return ofp_mkerr(OFPET_BAD_ACTION, OFPBAC_TOO_MANY);
2561 }
2562 return 0;
2563}
2564
9deba63b
BP
2565/* Checks whether 'ofconn' is a slave controller. If so, returns an OpenFlow
2566 * error message code (composed with ofp_mkerr()) for the caller to propagate
2567 * upward. Otherwise, returns 0.
2568 *
2569 * 'oh' is used to make log messages more informative. */
2570static int
2571reject_slave_controller(struct ofconn *ofconn, const struct ofp_header *oh)
2572{
2573 if (ofconn->type == OFCONN_CONTROLLER && ofconn->role == NX_ROLE_SLAVE) {
2574 static struct vlog_rate_limit perm_rl = VLOG_RATE_LIMIT_INIT(1, 5);
2575 char *type_name;
2576
2577 type_name = ofp_message_type_to_string(oh->type);
2578 VLOG_WARN_RL(&perm_rl, "rejecting %s message from slave controller",
2579 type_name);
2580 free(type_name);
2581
2582 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM);
2583 } else {
2584 return 0;
2585 }
2586}
2587
064af421
BP
2588static int
2589handle_packet_out(struct ofproto *p, struct ofconn *ofconn,
2590 struct ofp_header *oh)
2591{
2592 struct ofp_packet_out *opo;
2593 struct ofpbuf payload, *buffer;
2594 struct odp_actions actions;
2595 int n_actions;
2596 uint16_t in_port;
2597 flow_t flow;
2598 int error;
2599
9deba63b
BP
2600 error = reject_slave_controller(ofconn, oh);
2601 if (error) {
2602 return error;
2603 }
2604
064af421
BP
2605 error = check_ofp_packet_out(oh, &payload, &n_actions, p->max_ports);
2606 if (error) {
2607 return error;
2608 }
2609 opo = (struct ofp_packet_out *) oh;
2610
2611 COVERAGE_INC(ofproto_packet_out);
2612 if (opo->buffer_id != htonl(UINT32_MAX)) {
2613 error = pktbuf_retrieve(ofconn->pktbuf, ntohl(opo->buffer_id),
2614 &buffer, &in_port);
7778bd15 2615 if (error || !buffer) {
064af421
BP
2616 return error;
2617 }
2618 payload = *buffer;
2619 } else {
2620 buffer = NULL;
2621 }
2622
659586ef 2623 flow_extract(&payload, 0, ofp_port_to_odp_port(ntohs(opo->in_port)), &flow);
064af421 2624 error = xlate_actions((const union ofp_action *) opo->actions, n_actions,
6a07af36 2625 &flow, p, &payload, &actions, NULL, NULL, NULL);
064af421
BP
2626 if (error) {
2627 return error;
2628 }
2629
c228a364 2630 dpif_execute(p->dpif, flow.in_port, actions.actions, actions.n_actions,
064af421
BP
2631 &payload);
2632 ofpbuf_delete(buffer);
2633
2634 return 0;
2635}
2636
2637static void
2638update_port_config(struct ofproto *p, struct ofport *port,
2639 uint32_t config, uint32_t mask)
2640{
2641 mask &= config ^ port->opp.config;
2642 if (mask & OFPPC_PORT_DOWN) {
2643 if (config & OFPPC_PORT_DOWN) {
2644 netdev_turn_flags_off(port->netdev, NETDEV_UP, true);
2645 } else {
2646 netdev_turn_flags_on(port->netdev, NETDEV_UP, true);
2647 }
2648 }
2649#define REVALIDATE_BITS (OFPPC_NO_RECV | OFPPC_NO_RECV_STP | OFPPC_NO_FWD)
2650 if (mask & REVALIDATE_BITS) {
2651 COVERAGE_INC(ofproto_costly_flags);
2652 port->opp.config ^= mask & REVALIDATE_BITS;
2653 p->need_revalidate = true;
2654 }
2655#undef REVALIDATE_BITS
2656 if (mask & OFPPC_NO_FLOOD) {
2657 port->opp.config ^= OFPPC_NO_FLOOD;
72b06300 2658 refresh_port_groups(p);
064af421
BP
2659 }
2660 if (mask & OFPPC_NO_PACKET_IN) {
2661 port->opp.config ^= OFPPC_NO_PACKET_IN;
2662 }
2663}
2664
2665static int
9deba63b
BP
2666handle_port_mod(struct ofproto *p, struct ofconn *ofconn,
2667 struct ofp_header *oh)
064af421
BP
2668{
2669 const struct ofp_port_mod *opm;
2670 struct ofport *port;
2671 int error;
2672
9deba63b
BP
2673 error = reject_slave_controller(ofconn, oh);
2674 if (error) {
2675 return error;
2676 }
064af421
BP
2677 error = check_ofp_message(oh, OFPT_PORT_MOD, sizeof *opm);
2678 if (error) {
2679 return error;
2680 }
2681 opm = (struct ofp_port_mod *) oh;
2682
2683 port = port_array_get(&p->ports,
2684 ofp_port_to_odp_port(ntohs(opm->port_no)));
2685 if (!port) {
2686 return ofp_mkerr(OFPET_PORT_MOD_FAILED, OFPPMFC_BAD_PORT);
2687 } else if (memcmp(port->opp.hw_addr, opm->hw_addr, OFP_ETH_ALEN)) {
2688 return ofp_mkerr(OFPET_PORT_MOD_FAILED, OFPPMFC_BAD_HW_ADDR);
2689 } else {
2690 update_port_config(p, port, ntohl(opm->config), ntohl(opm->mask));
2691 if (opm->advertise) {
2692 netdev_set_advertisements(port->netdev, ntohl(opm->advertise));
2693 }
2694 }
2695 return 0;
2696}
2697
2698static struct ofpbuf *
2699make_stats_reply(uint32_t xid, uint16_t type, size_t body_len)
2700{
2701 struct ofp_stats_reply *osr;
2702 struct ofpbuf *msg;
2703
2704 msg = ofpbuf_new(MIN(sizeof *osr + body_len, UINT16_MAX));
2705 osr = put_openflow_xid(sizeof *osr, OFPT_STATS_REPLY, xid, msg);
2706 osr->type = type;
2707 osr->flags = htons(0);
2708 return msg;
2709}
2710
2711static struct ofpbuf *
2712start_stats_reply(const struct ofp_stats_request *request, size_t body_len)
2713{
2714 return make_stats_reply(request->header.xid, request->type, body_len);
2715}
2716
2717static void *
2718append_stats_reply(size_t nbytes, struct ofconn *ofconn, struct ofpbuf **msgp)
2719{
2720 struct ofpbuf *msg = *msgp;
2721 assert(nbytes <= UINT16_MAX - sizeof(struct ofp_stats_reply));
2722 if (nbytes + msg->size > UINT16_MAX) {
2723 struct ofp_stats_reply *reply = msg->data;
2724 reply->flags = htons(OFPSF_REPLY_MORE);
2725 *msgp = make_stats_reply(reply->header.xid, reply->type, nbytes);
2726 queue_tx(msg, ofconn, ofconn->reply_counter);
2727 }
2728 return ofpbuf_put_uninit(*msgp, nbytes);
2729}
2730
2731static int
2732handle_desc_stats_request(struct ofproto *p, struct ofconn *ofconn,
2733 struct ofp_stats_request *request)
2734{
2735 struct ofp_desc_stats *ods;
2736 struct ofpbuf *msg;
2737
2738 msg = start_stats_reply(request, sizeof *ods);
2739 ods = append_stats_reply(sizeof *ods, ofconn, &msg);
5a719c38
JP
2740 memset(ods, 0, sizeof *ods);
2741 ovs_strlcpy(ods->mfr_desc, p->mfr_desc, sizeof ods->mfr_desc);
2742 ovs_strlcpy(ods->hw_desc, p->hw_desc, sizeof ods->hw_desc);
2743 ovs_strlcpy(ods->sw_desc, p->sw_desc, sizeof ods->sw_desc);
2744 ovs_strlcpy(ods->serial_num, p->serial_desc, sizeof ods->serial_num);
2745 ovs_strlcpy(ods->dp_desc, p->dp_desc, sizeof ods->dp_desc);
064af421
BP
2746 queue_tx(msg, ofconn, ofconn->reply_counter);
2747
2748 return 0;
2749}
2750
2751static void
2752count_subrules(struct cls_rule *cls_rule, void *n_subrules_)
2753{
2754 struct rule *rule = rule_from_cls_rule(cls_rule);
2755 int *n_subrules = n_subrules_;
2756
2757 if (rule->super) {
2758 (*n_subrules)++;
2759 }
2760}
2761
2762static int
2763handle_table_stats_request(struct ofproto *p, struct ofconn *ofconn,
2764 struct ofp_stats_request *request)
2765{
2766 struct ofp_table_stats *ots;
2767 struct ofpbuf *msg;
2768 struct odp_stats dpstats;
2769 int n_exact, n_subrules, n_wild;
2770
2771 msg = start_stats_reply(request, sizeof *ots * 2);
2772
2773 /* Count rules of various kinds. */
2774 n_subrules = 0;
2775 classifier_for_each(&p->cls, CLS_INC_EXACT, count_subrules, &n_subrules);
2776 n_exact = classifier_count_exact(&p->cls) - n_subrules;
2777 n_wild = classifier_count(&p->cls) - classifier_count_exact(&p->cls);
2778
2779 /* Hash table. */
c228a364 2780 dpif_get_dp_stats(p->dpif, &dpstats);
064af421
BP
2781 ots = append_stats_reply(sizeof *ots, ofconn, &msg);
2782 memset(ots, 0, sizeof *ots);
2783 ots->table_id = TABLEID_HASH;
2784 strcpy(ots->name, "hash");
2785 ots->wildcards = htonl(0);
2786 ots->max_entries = htonl(dpstats.max_capacity);
2787 ots->active_count = htonl(n_exact);
2788 ots->lookup_count = htonll(dpstats.n_frags + dpstats.n_hit +
2789 dpstats.n_missed);
2790 ots->matched_count = htonll(dpstats.n_hit); /* XXX */
2791
2792 /* Classifier table. */
2793 ots = append_stats_reply(sizeof *ots, ofconn, &msg);
2794 memset(ots, 0, sizeof *ots);
2795 ots->table_id = TABLEID_CLASSIFIER;
2796 strcpy(ots->name, "classifier");
659586ef
JG
2797 ots->wildcards = p->tun_id_from_cookie ? htonl(OVSFW_ALL)
2798 : htonl(OFPFW_ALL);
064af421
BP
2799 ots->max_entries = htonl(65536);
2800 ots->active_count = htonl(n_wild);
2801 ots->lookup_count = htonll(0); /* XXX */
2802 ots->matched_count = htonll(0); /* XXX */
2803
2804 queue_tx(msg, ofconn, ofconn->reply_counter);
2805 return 0;
2806}
2807
abaad8cf
JP
2808static void
2809append_port_stat(struct ofport *port, uint16_t port_no, struct ofconn *ofconn,
a4948b95 2810 struct ofpbuf **msgp)
abaad8cf
JP
2811{
2812 struct netdev_stats stats;
2813 struct ofp_port_stats *ops;
2814
2815 /* Intentionally ignore return value, since errors will set
2816 * 'stats' to all-1s, which is correct for OpenFlow, and
2817 * netdev_get_stats() will log errors. */
2818 netdev_get_stats(port->netdev, &stats);
2819
a4948b95 2820 ops = append_stats_reply(sizeof *ops, ofconn, msgp);
abaad8cf
JP
2821 ops->port_no = htons(odp_port_to_ofp_port(port_no));
2822 memset(ops->pad, 0, sizeof ops->pad);
2823 ops->rx_packets = htonll(stats.rx_packets);
2824 ops->tx_packets = htonll(stats.tx_packets);
2825 ops->rx_bytes = htonll(stats.rx_bytes);
2826 ops->tx_bytes = htonll(stats.tx_bytes);
2827 ops->rx_dropped = htonll(stats.rx_dropped);
2828 ops->tx_dropped = htonll(stats.tx_dropped);
2829 ops->rx_errors = htonll(stats.rx_errors);
2830 ops->tx_errors = htonll(stats.tx_errors);
2831 ops->rx_frame_err = htonll(stats.rx_frame_errors);
2832 ops->rx_over_err = htonll(stats.rx_over_errors);
2833 ops->rx_crc_err = htonll(stats.rx_crc_errors);
2834 ops->collisions = htonll(stats.collisions);
2835}
2836
064af421
BP
2837static int
2838handle_port_stats_request(struct ofproto *p, struct ofconn *ofconn,
abaad8cf
JP
2839 struct ofp_stats_request *osr,
2840 size_t arg_size)
064af421 2841{
abaad8cf 2842 struct ofp_port_stats_request *psr;
064af421
BP
2843 struct ofp_port_stats *ops;
2844 struct ofpbuf *msg;
2845 struct ofport *port;
2846 unsigned int port_no;
2847
abaad8cf
JP
2848 if (arg_size != sizeof *psr) {
2849 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
2850 }
2851 psr = (struct ofp_port_stats_request *) osr->body;
2852
2853 msg = start_stats_reply(osr, sizeof *ops * 16);
2854 if (psr->port_no != htons(OFPP_NONE)) {
2855 port = port_array_get(&p->ports,
2856 ofp_port_to_odp_port(ntohs(psr->port_no)));
2857 if (port) {
a4948b95 2858 append_port_stat(port, ntohs(psr->port_no), ofconn, &msg);
abaad8cf
JP
2859 }
2860 } else {
2861 PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) {
a4948b95 2862 append_port_stat(port, port_no, ofconn, &msg);
abaad8cf 2863 }
064af421
BP
2864 }
2865
2866 queue_tx(msg, ofconn, ofconn->reply_counter);
2867 return 0;
2868}
2869
2870struct flow_stats_cbdata {
2871 struct ofproto *ofproto;
2872 struct ofconn *ofconn;
2873 uint16_t out_port;
2874 struct ofpbuf *msg;
2875};
2876
01149cfd
BP
2877/* Obtains statistic counters for 'rule' within 'p' and stores them into
2878 * '*packet_countp' and '*byte_countp'. If 'rule' is a wildcarded rule, the
2879 * returned statistic include statistics for all of 'rule''s subrules. */
064af421
BP
2880static void
2881query_stats(struct ofproto *p, struct rule *rule,
2882 uint64_t *packet_countp, uint64_t *byte_countp)
2883{
2884 uint64_t packet_count, byte_count;
2885 struct rule *subrule;
2886 struct odp_flow *odp_flows;
2887 size_t n_odp_flows;
2888
01149cfd
BP
2889 /* Start from historical data for 'rule' itself that are no longer tracked
2890 * by the datapath. This counts, for example, subrules that have
2891 * expired. */
b3137fe8
JG
2892 packet_count = rule->packet_count;
2893 byte_count = rule->byte_count;
2894
01149cfd
BP
2895 /* Prepare to ask the datapath for statistics on 'rule', or if it is
2896 * wildcarded then on all of its subrules.
2897 *
2898 * Also, add any statistics that are not tracked by the datapath for each
2899 * subrule. This includes, for example, statistics for packets that were
2900 * executed "by hand" by ofproto via dpif_execute() but must be accounted
2901 * to a flow. */
064af421 2902 n_odp_flows = rule->cr.wc.wildcards ? list_size(&rule->list) : 1;
ec6fde61 2903 odp_flows = xzalloc(n_odp_flows * sizeof *odp_flows);
064af421
BP
2904 if (rule->cr.wc.wildcards) {
2905 size_t i = 0;
2906 LIST_FOR_EACH (subrule, struct rule, list, &rule->list) {
2907 odp_flows[i++].key = subrule->cr.flow;
b3137fe8
JG
2908 packet_count += subrule->packet_count;
2909 byte_count += subrule->byte_count;
064af421
BP
2910 }
2911 } else {
2912 odp_flows[0].key = rule->cr.flow;
2913 }
2914
28998b22 2915 /* Fetch up-to-date statistics from the datapath and add them in. */
c228a364 2916 if (!dpif_flow_get_multiple(p->dpif, odp_flows, n_odp_flows)) {
064af421
BP
2917 size_t i;
2918 for (i = 0; i < n_odp_flows; i++) {
2919 struct odp_flow *odp_flow = &odp_flows[i];
2920 packet_count += odp_flow->stats.n_packets;
2921 byte_count += odp_flow->stats.n_bytes;
2922 }
2923 }
2924 free(odp_flows);
2925
01149cfd 2926 /* Return the stats to the caller. */
064af421
BP
2927 *packet_countp = packet_count;
2928 *byte_countp = byte_count;
2929}
2930
2931static void
2932flow_stats_cb(struct cls_rule *rule_, void *cbdata_)
2933{
2934 struct rule *rule = rule_from_cls_rule(rule_);
2935 struct flow_stats_cbdata *cbdata = cbdata_;
2936 struct ofp_flow_stats *ofs;
2937 uint64_t packet_count, byte_count;
2938 size_t act_len, len;
26c3f94a
JP
2939 long long int tdiff = time_msec() - rule->created;
2940 uint32_t sec = tdiff / 1000;
2941 uint32_t msec = tdiff - (sec * 1000);
064af421
BP
2942
2943 if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) {
2944 return;
2945 }
2946
2947 act_len = sizeof *rule->actions * rule->n_actions;
2948 len = offsetof(struct ofp_flow_stats, actions) + act_len;
2949
2950 query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
2951
2952 ofs = append_stats_reply(len, cbdata->ofconn, &cbdata->msg);
2953 ofs->length = htons(len);
2954 ofs->table_id = rule->cr.wc.wildcards ? TABLEID_CLASSIFIER : TABLEID_HASH;
2955 ofs->pad = 0;
659586ef
JG
2956 flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards,
2957 cbdata->ofproto->tun_id_from_cookie, &ofs->match);
26c3f94a
JP
2958 ofs->duration_sec = htonl(sec);
2959 ofs->duration_nsec = htonl(msec * 1000000);
39997502 2960 ofs->cookie = rule->flow_cookie;
064af421
BP
2961 ofs->priority = htons(rule->cr.priority);
2962 ofs->idle_timeout = htons(rule->idle_timeout);
2963 ofs->hard_timeout = htons(rule->hard_timeout);
39997502 2964 memset(ofs->pad2, 0, sizeof ofs->pad2);
064af421
BP
2965 ofs->packet_count = htonll(packet_count);
2966 ofs->byte_count = htonll(byte_count);
2967 memcpy(ofs->actions, rule->actions, act_len);
2968}
2969
2970static int
2971table_id_to_include(uint8_t table_id)
2972{
2973 return (table_id == TABLEID_HASH ? CLS_INC_EXACT
2974 : table_id == TABLEID_CLASSIFIER ? CLS_INC_WILD
2975 : table_id == 0xff ? CLS_INC_ALL
2976 : 0);
2977}
2978
2979static int
2980handle_flow_stats_request(struct ofproto *p, struct ofconn *ofconn,
2981 const struct ofp_stats_request *osr,
2982 size_t arg_size)
2983{
2984 struct ofp_flow_stats_request *fsr;
2985 struct flow_stats_cbdata cbdata;
2986 struct cls_rule target;
2987
2988 if (arg_size != sizeof *fsr) {
49bdc010 2989 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
064af421
BP
2990 }
2991 fsr = (struct ofp_flow_stats_request *) osr->body;
2992
2993 COVERAGE_INC(ofproto_flows_req);
2994 cbdata.ofproto = p;
2995 cbdata.ofconn = ofconn;
2996 cbdata.out_port = fsr->out_port;
2997 cbdata.msg = start_stats_reply(osr, 1024);
659586ef 2998 cls_rule_from_match(&fsr->match, 0, false, 0, &target);
064af421
BP
2999 classifier_for_each_match(&p->cls, &target,
3000 table_id_to_include(fsr->table_id),
3001 flow_stats_cb, &cbdata);
3002 queue_tx(cbdata.msg, ofconn, ofconn->reply_counter);
3003 return 0;
3004}
3005
4f2cad2c
JP
3006struct flow_stats_ds_cbdata {
3007 struct ofproto *ofproto;
3008 struct ds *results;
3009};
3010
3011static void
3012flow_stats_ds_cb(struct cls_rule *rule_, void *cbdata_)
3013{
3014 struct rule *rule = rule_from_cls_rule(rule_);
3015 struct flow_stats_ds_cbdata *cbdata = cbdata_;
3016 struct ds *results = cbdata->results;
3017 struct ofp_match match;
3018 uint64_t packet_count, byte_count;
3019 size_t act_len = sizeof *rule->actions * rule->n_actions;
3020
3021 /* Don't report on subrules. */
3022 if (rule->super != NULL) {
3023 return;
3024 }
3025
3026 query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
659586ef
JG
3027 flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards,
3028 cbdata->ofproto->tun_id_from_cookie, &match);
4f2cad2c
JP
3029
3030 ds_put_format(results, "duration=%llds, ",
3031 (time_msec() - rule->created) / 1000);
52ae00b3 3032 ds_put_format(results, "priority=%u, ", rule->cr.priority);
4f2cad2c
JP
3033 ds_put_format(results, "n_packets=%"PRIu64", ", packet_count);
3034 ds_put_format(results, "n_bytes=%"PRIu64", ", byte_count);
3035 ofp_print_match(results, &match, true);
3036 ofp_print_actions(results, &rule->actions->header, act_len);
3037 ds_put_cstr(results, "\n");
3038}
3039
3040/* Adds a pretty-printed description of all flows to 'results', including
3041 * those marked hidden by secchan (e.g., by in-band control). */
3042void
3043ofproto_get_all_flows(struct ofproto *p, struct ds *results)
3044{
3045 struct ofp_match match;
3046 struct cls_rule target;
3047 struct flow_stats_ds_cbdata cbdata;
3048
3049 memset(&match, 0, sizeof match);
659586ef 3050 match.wildcards = htonl(OVSFW_ALL);
4f2cad2c
JP
3051
3052 cbdata.ofproto = p;
3053 cbdata.results = results;
3054
659586ef 3055 cls_rule_from_match(&match, 0, false, 0, &target);
4f2cad2c
JP
3056 classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
3057 flow_stats_ds_cb, &cbdata);
3058}
3059
064af421
BP
3060struct aggregate_stats_cbdata {
3061 struct ofproto *ofproto;
3062 uint16_t out_port;
3063 uint64_t packet_count;
3064 uint64_t byte_count;
3065 uint32_t n_flows;
3066};
3067
3068static void
3069aggregate_stats_cb(struct cls_rule *rule_, void *cbdata_)
3070{
3071 struct rule *rule = rule_from_cls_rule(rule_);
3072 struct aggregate_stats_cbdata *cbdata = cbdata_;
3073 uint64_t packet_count, byte_count;
3074
3075 if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) {
3076 return;
3077 }
3078
3079 query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
3080
3081 cbdata->packet_count += packet_count;
3082 cbdata->byte_count += byte_count;
3083 cbdata->n_flows++;
3084}
3085
3086static int
3087handle_aggregate_stats_request(struct ofproto *p, struct ofconn *ofconn,
3088 const struct ofp_stats_request *osr,
3089 size_t arg_size)
3090{
3091 struct ofp_aggregate_stats_request *asr;
3092 struct ofp_aggregate_stats_reply *reply;
3093 struct aggregate_stats_cbdata cbdata;
3094 struct cls_rule target;
3095 struct ofpbuf *msg;
3096
3097 if (arg_size != sizeof *asr) {
49bdc010 3098 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
064af421
BP
3099 }
3100 asr = (struct ofp_aggregate_stats_request *) osr->body;
3101
3102 COVERAGE_INC(ofproto_agg_request);
3103 cbdata.ofproto = p;
3104 cbdata.out_port = asr->out_port;
3105 cbdata.packet_count = 0;
3106 cbdata.byte_count = 0;
3107 cbdata.n_flows = 0;
659586ef 3108 cls_rule_from_match(&asr->match, 0, false, 0, &target);
064af421
BP
3109 classifier_for_each_match(&p->cls, &target,
3110 table_id_to_include(asr->table_id),
3111 aggregate_stats_cb, &cbdata);
3112
3113 msg = start_stats_reply(osr, sizeof *reply);
3114 reply = append_stats_reply(sizeof *reply, ofconn, &msg);
3115 reply->flow_count = htonl(cbdata.n_flows);
3116 reply->packet_count = htonll(cbdata.packet_count);
3117 reply->byte_count = htonll(cbdata.byte_count);
3118 queue_tx(msg, ofconn, ofconn->reply_counter);
3119 return 0;
3120}
3121
3122static int
3123handle_stats_request(struct ofproto *p, struct ofconn *ofconn,
3124 struct ofp_header *oh)
3125{
3126 struct ofp_stats_request *osr;
3127 size_t arg_size;
3128 int error;
3129
3130 error = check_ofp_message_array(oh, OFPT_STATS_REQUEST, sizeof *osr,
3131 1, &arg_size);
3132 if (error) {
3133 return error;
3134 }
3135 osr = (struct ofp_stats_request *) oh;
3136
3137 switch (ntohs(osr->type)) {
3138 case OFPST_DESC:
3139 return handle_desc_stats_request(p, ofconn, osr);
3140
3141 case OFPST_FLOW:
3142 return handle_flow_stats_request(p, ofconn, osr, arg_size);
3143
3144 case OFPST_AGGREGATE:
3145 return handle_aggregate_stats_request(p, ofconn, osr, arg_size);
3146
3147 case OFPST_TABLE:
3148 return handle_table_stats_request(p, ofconn, osr);
3149
3150 case OFPST_PORT:
abaad8cf 3151 return handle_port_stats_request(p, ofconn, osr, arg_size);
064af421
BP
3152
3153 case OFPST_VENDOR:
3154 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR);
3155
3156 default:
3157 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_STAT);
3158 }
3159}
3160
3161static long long int
3162msec_from_nsec(uint64_t sec, uint32_t nsec)
3163{
3164 return !sec ? 0 : sec * 1000 + nsec / 1000000;
3165}
3166
3167static void
0193b2af
JG
3168update_time(struct ofproto *ofproto, struct rule *rule,
3169 const struct odp_flow_stats *stats)
064af421
BP
3170{
3171 long long int used = msec_from_nsec(stats->used_sec, stats->used_nsec);
3172 if (used > rule->used) {
3173 rule->used = used;
4836f9f2
JP
3174 if (rule->super && used > rule->super->used) {
3175 rule->super->used = used;
3176 }
0193b2af 3177 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, used);
064af421
BP
3178 }
3179}
3180
3181static void
0193b2af
JG
3182update_stats(struct ofproto *ofproto, struct rule *rule,
3183 const struct odp_flow_stats *stats)
064af421 3184{
064af421 3185 if (stats->n_packets) {
0193b2af
JG
3186 update_time(ofproto, rule, stats);
3187 rule->packet_count += stats->n_packets;
3188 rule->byte_count += stats->n_bytes;
3189 netflow_flow_update_flags(&rule->nf_flow, stats->ip_tos,
3190 stats->tcp_flags);
064af421
BP
3191 }
3192}
3193
79eee1eb
BP
3194/* Implements OFPFC_ADD and the cases for OFPFC_MODIFY and OFPFC_MODIFY_STRICT
3195 * in which no matching flow already exists in the flow table.
3196 *
3197 * Adds the flow specified by 'ofm', which is followed by 'n_actions'
3198 * ofp_actions, to 'p''s flow table. Returns 0 on success or an OpenFlow error
3199 * code as encoded by ofp_mkerr() on failure.
3200 *
3201 * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
3202 * if any. */
064af421
BP
3203static int
3204add_flow(struct ofproto *p, struct ofconn *ofconn,
79eee1eb 3205 const struct ofp_flow_mod *ofm, size_t n_actions)
064af421
BP
3206{
3207 struct ofpbuf *packet;
3208 struct rule *rule;
3209 uint16_t in_port;
3210 int error;
3211
49bdc010
JP
3212 if (ofm->flags & htons(OFPFF_CHECK_OVERLAP)) {
3213 flow_t flow;
3214 uint32_t wildcards;
3215
659586ef
JG
3216 flow_from_match(&ofm->match, p->tun_id_from_cookie, ofm->cookie,
3217 &flow, &wildcards);
49bdc010
JP
3218 if (classifier_rule_overlaps(&p->cls, &flow, wildcards,
3219 ntohs(ofm->priority))) {
3220 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_OVERLAP);
3221 }
3222 }
3223
0193b2af 3224 rule = rule_create(p, NULL, (const union ofp_action *) ofm->actions,
064af421 3225 n_actions, ntohs(ofm->idle_timeout),
39997502 3226 ntohs(ofm->hard_timeout), ofm->cookie,
ca069229 3227 ofm->flags & htons(OFPFF_SEND_FLOW_REM));
659586ef
JG
3228 cls_rule_from_match(&ofm->match, ntohs(ofm->priority),
3229 p->tun_id_from_cookie, ofm->cookie, &rule->cr);
064af421 3230
064af421
BP
3231 error = 0;
3232 if (ofm->buffer_id != htonl(UINT32_MAX)) {
3233 error = pktbuf_retrieve(ofconn->pktbuf, ntohl(ofm->buffer_id),
3234 &packet, &in_port);
212fe71c
BP
3235 } else {
3236 packet = NULL;
165cd8a3 3237 in_port = UINT16_MAX;
064af421
BP
3238 }
3239
3240 rule_insert(p, rule, packet, in_port);
3241 ofpbuf_delete(packet);
3242 return error;
3243}
3244
79eee1eb
BP
3245static struct rule *
3246find_flow_strict(struct ofproto *p, const struct ofp_flow_mod *ofm)
064af421 3247{
064af421
BP
3248 uint32_t wildcards;
3249 flow_t flow;
3250
659586ef
JG
3251 flow_from_match(&ofm->match, p->tun_id_from_cookie, ofm->cookie,
3252 &flow, &wildcards);
79eee1eb 3253 return rule_from_cls_rule(classifier_find_rule_exactly(
064af421
BP
3254 &p->cls, &flow, wildcards,
3255 ntohs(ofm->priority)));
79eee1eb 3256}
064af421 3257
79eee1eb
BP
3258static int
3259send_buffered_packet(struct ofproto *ofproto, struct ofconn *ofconn,
3260 struct rule *rule, const struct ofp_flow_mod *ofm)
3261{
3262 struct ofpbuf *packet;
3263 uint16_t in_port;
3264 flow_t flow;
3265 int error;
064af421 3266
79eee1eb
BP
3267 if (ofm->buffer_id == htonl(UINT32_MAX)) {
3268 return 0;
064af421 3269 }
79eee1eb
BP
3270
3271 error = pktbuf_retrieve(ofconn->pktbuf, ntohl(ofm->buffer_id),
3272 &packet, &in_port);
3273 if (error) {
3274 return error;
3275 }
3276
659586ef 3277 flow_extract(packet, 0, in_port, &flow);
79eee1eb
BP
3278 rule_execute(ofproto, rule, packet, &flow);
3279 ofpbuf_delete(packet);
3280
064af421
BP
3281 return 0;
3282}
79eee1eb
BP
3283\f
3284/* OFPFC_MODIFY and OFPFC_MODIFY_STRICT. */
064af421
BP
3285
3286struct modify_flows_cbdata {
3287 struct ofproto *ofproto;
3288 const struct ofp_flow_mod *ofm;
064af421 3289 size_t n_actions;
79eee1eb 3290 struct rule *match;
064af421
BP
3291};
3292
79eee1eb
BP
3293static int modify_flow(struct ofproto *, const struct ofp_flow_mod *,
3294 size_t n_actions, struct rule *);
3295static void modify_flows_cb(struct cls_rule *, void *cbdata_);
3296
3297/* Implements OFPFC_MODIFY. Returns 0 on success or an OpenFlow error code as
3298 * encoded by ofp_mkerr() on failure.
3299 *
3300 * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
3301 * if any. */
3302static int
3303modify_flows_loose(struct ofproto *p, struct ofconn *ofconn,
3304 const struct ofp_flow_mod *ofm, size_t n_actions)
3305{
3306 struct modify_flows_cbdata cbdata;
3307 struct cls_rule target;
3308
3309 cbdata.ofproto = p;
3310 cbdata.ofm = ofm;
3311 cbdata.n_actions = n_actions;
3312 cbdata.match = NULL;
3313
659586ef
JG
3314 cls_rule_from_match(&ofm->match, 0, p->tun_id_from_cookie, ofm->cookie,
3315 &target);
79eee1eb
BP
3316
3317 classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
3318 modify_flows_cb, &cbdata);
3319 if (cbdata.match) {
3320 /* This credits the packet to whichever flow happened to happened to
3321 * match last. That's weird. Maybe we should do a lookup for the
3322 * flow that actually matches the packet? Who knows. */
3323 send_buffered_packet(p, ofconn, cbdata.match, ofm);
3324 return 0;
3325 } else {
3326 return add_flow(p, ofconn, ofm, n_actions);
3327 }
3328}
3329
3330/* Implements OFPFC_MODIFY_STRICT. Returns 0 on success or an OpenFlow error
3331 * code as encoded by ofp_mkerr() on failure.
3332 *
3333 * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
3334 * if any. */
3335static int
3336modify_flow_strict(struct ofproto *p, struct ofconn *ofconn,
3337 struct ofp_flow_mod *ofm, size_t n_actions)
3338{
3339 struct rule *rule = find_flow_strict(p, ofm);
3340 if (rule && !rule_is_hidden(rule)) {
3341 modify_flow(p, ofm, n_actions, rule);
3342 return send_buffered_packet(p, ofconn, rule, ofm);
3343 } else {
3344 return add_flow(p, ofconn, ofm, n_actions);
3345 }
3346}
3347
3348/* Callback for modify_flows_loose(). */
064af421
BP
3349static void
3350modify_flows_cb(struct cls_rule *rule_, void *cbdata_)
3351{
3352 struct rule *rule = rule_from_cls_rule(rule_);
3353 struct modify_flows_cbdata *cbdata = cbdata_;
3354
79eee1eb
BP
3355 if (!rule_is_hidden(rule)) {
3356 cbdata->match = rule;
3357 modify_flow(cbdata->ofproto, cbdata->ofm, cbdata->n_actions, rule);
064af421 3358 }
064af421
BP
3359}
3360
79eee1eb
BP
3361/* Implements core of OFPFC_MODIFY and OFPFC_MODIFY_STRICT where 'rule' has
3362 * been identified as a flow in 'p''s flow table to be modified, by changing
3363 * the rule's actions to match those in 'ofm' (which is followed by 'n_actions'
3364 * ofp_action[] structures). */
064af421 3365static int
79eee1eb
BP
3366modify_flow(struct ofproto *p, const struct ofp_flow_mod *ofm,
3367 size_t n_actions, struct rule *rule)
064af421 3368{
79eee1eb
BP
3369 size_t actions_len = n_actions * sizeof *rule->actions;
3370
3371 rule->flow_cookie = ofm->cookie;
3372
3373 /* If the actions are the same, do nothing. */
3374 if (n_actions == rule->n_actions
3375 && !memcmp(ofm->actions, rule->actions, actions_len))
3376 {
3377 return 0;
3378 }
3379
3380 /* Replace actions. */
3381 free(rule->actions);
3382 rule->actions = xmemdup(ofm->actions, actions_len);
3383 rule->n_actions = n_actions;
3384
3385 /* Make sure that the datapath gets updated properly. */
3386 if (rule->cr.wc.wildcards) {
3387 COVERAGE_INC(ofproto_mod_wc_flow);
3388 p->need_revalidate = true;
3389 } else {
3390 rule_update_actions(p, rule);
3391 }
3392
3393 return 0;
3394}
3395\f
3396/* OFPFC_DELETE implementation. */
3397
3398struct delete_flows_cbdata {
3399 struct ofproto *ofproto;
3400 uint16_t out_port;
3401};
3402
3403static void delete_flows_cb(struct cls_rule *, void *cbdata_);
3404static void delete_flow(struct ofproto *, struct rule *, uint16_t out_port);
3405
3406/* Implements OFPFC_DELETE. */
3407static void
3408delete_flows_loose(struct ofproto *p, const struct ofp_flow_mod *ofm)
3409{
3410 struct delete_flows_cbdata cbdata;
064af421
BP
3411 struct cls_rule target;
3412
3413 cbdata.ofproto = p;
79eee1eb 3414 cbdata.out_port = ofm->out_port;
064af421 3415
659586ef
JG
3416 cls_rule_from_match(&ofm->match, 0, p->tun_id_from_cookie, ofm->cookie,
3417 &target);
064af421
BP
3418
3419 classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
79eee1eb 3420 delete_flows_cb, &cbdata);
064af421
BP
3421}
3422
79eee1eb
BP
3423/* Implements OFPFC_DELETE_STRICT. */
3424static void
3425delete_flow_strict(struct ofproto *p, struct ofp_flow_mod *ofm)
3426{
3427 struct rule *rule = find_flow_strict(p, ofm);
3428 if (rule) {
3429 delete_flow(p, rule, ofm->out_port);
3430 }
3431}
3432
3433/* Callback for delete_flows_loose(). */
3434static void
3435delete_flows_cb(struct cls_rule *rule_, void *cbdata_)
3436{
3437 struct rule *rule = rule_from_cls_rule(rule_);
3438 struct delete_flows_cbdata *cbdata = cbdata_;
3439
3440 delete_flow(cbdata->ofproto, rule, cbdata->out_port);
3441}
3442
3443/* Implements core of OFPFC_DELETE and OFPFC_DELETE_STRICT where 'rule' has
3444 * been identified as a flow to delete from 'p''s flow table, by deleting the
3445 * flow and sending out a OFPT_FLOW_REMOVED message to any interested
3446 * controller.
3447 *
3448 * Will not delete 'rule' if it is hidden. Will delete 'rule' only if
3449 * 'out_port' is htons(OFPP_NONE) or if 'rule' actually outputs to the
3450 * specified 'out_port'. */
3451static void
3452delete_flow(struct ofproto *p, struct rule *rule, uint16_t out_port)
3453{
3454 if (rule_is_hidden(rule)) {
3455 return;
3456 }
3457
3458 if (out_port != htons(OFPP_NONE) && !rule_has_out_port(rule, out_port)) {
3459 return;
3460 }
3461
3462 send_flow_removed(p, rule, time_msec(), OFPRR_DELETE);
3463 rule_remove(p, rule);
3464}
3465\f
064af421
BP
3466static int
3467handle_flow_mod(struct ofproto *p, struct ofconn *ofconn,
3468 struct ofp_flow_mod *ofm)
3469{
3470 size_t n_actions;
3471 int error;
3472
9deba63b
BP
3473 error = reject_slave_controller(ofconn, &ofm->header);
3474 if (error) {
3475 return error;
3476 }
064af421
BP
3477 error = check_ofp_message_array(&ofm->header, OFPT_FLOW_MOD, sizeof *ofm,
3478 sizeof *ofm->actions, &n_actions);
3479 if (error) {
3480 return error;
3481 }
3482
49bdc010
JP
3483 /* We do not support the emergency flow cache. It will hopefully
3484 * get dropped from OpenFlow in the near future. */
3485 if (ofm->flags & htons(OFPFF_EMERG)) {
3486 /* There isn't a good fit for an error code, so just state that the
3487 * flow table is full. */
3488 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_ALL_TABLES_FULL);
3489 }
3490
064af421
BP
3491 normalize_match(&ofm->match);
3492 if (!ofm->match.wildcards) {
3493 ofm->priority = htons(UINT16_MAX);
3494 }
3495
3496 error = validate_actions((const union ofp_action *) ofm->actions,
3497 n_actions, p->max_ports);
3498 if (error) {
3499 return error;
3500 }
3501
3502 switch (ntohs(ofm->command)) {
3503 case OFPFC_ADD:
3504 return add_flow(p, ofconn, ofm, n_actions);
3505
3506 case OFPFC_MODIFY:
79eee1eb 3507 return modify_flows_loose(p, ofconn, ofm, n_actions);
064af421
BP
3508
3509 case OFPFC_MODIFY_STRICT:
79eee1eb 3510 return modify_flow_strict(p, ofconn, ofm, n_actions);
064af421
BP
3511
3512 case OFPFC_DELETE:
79eee1eb
BP
3513 delete_flows_loose(p, ofm);
3514 return 0;
064af421
BP
3515
3516 case OFPFC_DELETE_STRICT:
79eee1eb
BP
3517 delete_flow_strict(p, ofm);
3518 return 0;
064af421
BP
3519
3520 default:
3521 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_BAD_COMMAND);
3522 }
3523}
3524
659586ef
JG
3525static int
3526handle_tun_id_from_cookie(struct ofproto *p, struct nxt_tun_id_cookie *msg)
3527{
3528 int error;
3529
3530 error = check_ofp_message(&msg->header, OFPT_VENDOR, sizeof *msg);
3531 if (error) {
3532 return error;
3533 }
3534
3535 p->tun_id_from_cookie = !!msg->set;
3536 return 0;
3537}
3538
9deba63b
BP
3539static int
3540handle_role_request(struct ofproto *ofproto,
3541 struct ofconn *ofconn, struct nicira_header *msg)
3542{
3543 struct nx_role_request *nrr;
3544 struct nx_role_request *reply;
3545 struct ofpbuf *buf;
3546 uint32_t role;
3547
3548 if (ntohs(msg->header.length) != sizeof *nrr) {
100e95db 3549 VLOG_WARN_RL(&rl, "received role request of length %u (expected %zu)",
9deba63b
BP
3550 ntohs(msg->header.length), sizeof *nrr);
3551 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3552 }
3553 nrr = (struct nx_role_request *) msg;
3554
3555 if (ofconn->type != OFCONN_CONTROLLER) {
3556 VLOG_WARN_RL(&rl, "ignoring role request on non-controller "
3557 "connection");
3558 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM);
3559 }
3560
3561 role = ntohl(nrr->role);
3562 if (role != NX_ROLE_OTHER && role != NX_ROLE_MASTER
3563 && role != NX_ROLE_SLAVE) {
3564 VLOG_WARN_RL(&rl, "received request for unknown role %"PRIu32, role);
3565
3566 /* There's no good error code for this. */
3567 return ofp_mkerr(OFPET_BAD_REQUEST, -1);
3568 }
3569
3570 if (role == NX_ROLE_MASTER) {
3571 struct ofconn *other;
3572
3573 HMAP_FOR_EACH (other, struct ofconn, hmap_node,
3574 &ofproto->controllers) {
3575 if (other->role == NX_ROLE_MASTER) {
3576 other->role = NX_ROLE_SLAVE;
3577 }
3578 }
3579 }
3580 ofconn->role = role;
3581
3582 reply = make_openflow_xid(sizeof *reply, OFPT_VENDOR, msg->header.xid,
3583 &buf);
3584 reply->nxh.vendor = htonl(NX_VENDOR_ID);
3585 reply->nxh.subtype = htonl(NXT_ROLE_REPLY);
3586 reply->role = htonl(role);
3587 queue_tx(buf, ofconn, ofconn->reply_counter);
3588
3589 return 0;
3590}
3591
064af421
BP
3592static int
3593handle_vendor(struct ofproto *p, struct ofconn *ofconn, void *msg)
3594{
3595 struct ofp_vendor_header *ovh = msg;
3596 struct nicira_header *nh;
3597
3598 if (ntohs(ovh->header.length) < sizeof(struct ofp_vendor_header)) {
100e95db 3599 VLOG_WARN_RL(&rl, "received vendor message of length %u "
659586ef
JG
3600 "(expected at least %zu)",
3601 ntohs(ovh->header.length), sizeof(struct ofp_vendor_header));
49bdc010 3602 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
064af421
BP
3603 }
3604 if (ovh->vendor != htonl(NX_VENDOR_ID)) {
3605 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR);
3606 }
3607 if (ntohs(ovh->header.length) < sizeof(struct nicira_header)) {
100e95db 3608 VLOG_WARN_RL(&rl, "received Nicira vendor message of length %u "
659586ef
JG
3609 "(expected at least %zu)",
3610 ntohs(ovh->header.length), sizeof(struct nicira_header));
49bdc010 3611 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
064af421
BP
3612 }
3613
3614 nh = msg;
3615 switch (ntohl(nh->subtype)) {
3616 case NXT_STATUS_REQUEST:
3617 return switch_status_handle_request(p->switch_status, ofconn->rconn,
3618 msg);
659586ef
JG
3619
3620 case NXT_TUN_ID_FROM_COOKIE:
3621 return handle_tun_id_from_cookie(p, msg);
9deba63b
BP
3622
3623 case NXT_ROLE_REQUEST:
3624 return handle_role_request(p, ofconn, msg);
064af421
BP
3625 }
3626
3627 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE);
3628}
3629
246e61ea
JP
3630static int
3631handle_barrier_request(struct ofconn *ofconn, struct ofp_header *oh)
3632{
3633 struct ofp_header *ob;
3634 struct ofpbuf *buf;
3635
3636 /* Currently, everything executes synchronously, so we can just
3637 * immediately send the barrier reply. */
3638 ob = make_openflow_xid(sizeof *ob, OFPT_BARRIER_REPLY, oh->xid, &buf);
3639 queue_tx(buf, ofconn, ofconn->reply_counter);
3640 return 0;
3641}
3642
064af421
BP
3643static void
3644handle_openflow(struct ofconn *ofconn, struct ofproto *p,
3645 struct ofpbuf *ofp_msg)
3646{
3647 struct ofp_header *oh = ofp_msg->data;
3648 int error;
3649
3650 COVERAGE_INC(ofproto_recv_openflow);
3651 switch (oh->type) {
3652 case OFPT_ECHO_REQUEST:
3653 error = handle_echo_request(ofconn, oh);
3654 break;
3655
3656 case OFPT_ECHO_REPLY:
3657 error = 0;
3658 break;
3659
3660 case OFPT_FEATURES_REQUEST:
3661 error = handle_features_request(p, ofconn, oh);
3662 break;
3663
3664 case OFPT_GET_CONFIG_REQUEST:
3665 error = handle_get_config_request(p, ofconn, oh);
3666 break;
3667
3668 case OFPT_SET_CONFIG:
3669 error = handle_set_config(p, ofconn, ofp_msg->data);
3670 break;
3671
3672 case OFPT_PACKET_OUT:
3673 error = handle_packet_out(p, ofconn, ofp_msg->data);
3674 break;
3675
3676 case OFPT_PORT_MOD:
9deba63b 3677 error = handle_port_mod(p, ofconn, oh);
064af421
BP
3678 break;
3679
3680 case OFPT_FLOW_MOD:
3681 error = handle_flow_mod(p, ofconn, ofp_msg->data);
3682 break;
3683
3684 case OFPT_STATS_REQUEST:
3685 error = handle_stats_request(p, ofconn, oh);
3686 break;
3687
3688 case OFPT_VENDOR:
3689 error = handle_vendor(p, ofconn, ofp_msg->data);
3690 break;
3691
246e61ea
JP
3692 case OFPT_BARRIER_REQUEST:
3693 error = handle_barrier_request(ofconn, oh);
3694 break;
3695
064af421
BP
3696 default:
3697 if (VLOG_IS_WARN_ENABLED()) {
3698 char *s = ofp_to_string(oh, ntohs(oh->length), 2);
3699 VLOG_DBG_RL(&rl, "OpenFlow message ignored: %s", s);
3700 free(s);
3701 }
3702 error = ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_TYPE);
3703 break;
3704 }
3705
3706 if (error) {
3707 send_error_oh(ofconn, ofp_msg->data, error);
3708 }
3709}
3710\f
3711static void
72b06300 3712handle_odp_miss_msg(struct ofproto *p, struct ofpbuf *packet)
064af421
BP
3713{
3714 struct odp_msg *msg = packet->data;
064af421
BP
3715 struct rule *rule;
3716 struct ofpbuf payload;
3717 flow_t flow;
3718
064af421
BP
3719 payload.data = msg + 1;
3720 payload.size = msg->length - sizeof *msg;
659586ef 3721 flow_extract(&payload, msg->arg, msg->port, &flow);
064af421 3722
0ad9b732
JP
3723 /* Check with in-band control to see if this packet should be sent
3724 * to the local port regardless of the flow table. */
3725 if (in_band_msg_in_hook(p->in_band, &flow, &payload)) {
3726 union odp_action action;
3727
3728 memset(&action, 0, sizeof(action));
3729 action.output.type = ODPAT_OUTPUT;
3730 action.output.port = ODPP_LOCAL;
f1acd62b 3731 dpif_execute(p->dpif, flow.in_port, &action, 1, &payload);
0ad9b732
JP
3732 }
3733
064af421
BP
3734 rule = lookup_valid_rule(p, &flow);
3735 if (!rule) {
3736 /* Don't send a packet-in if OFPPC_NO_PACKET_IN asserted. */
3737 struct ofport *port = port_array_get(&p->ports, msg->port);
3738 if (port) {
3739 if (port->opp.config & OFPPC_NO_PACKET_IN) {
3740 COVERAGE_INC(ofproto_no_packet_in);
3741 /* XXX install 'drop' flow entry */
3742 ofpbuf_delete(packet);
3743 return;
3744 }
3745 } else {
3746 VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16, msg->port);
3747 }
3748
3749 COVERAGE_INC(ofproto_packet_in);
76ce9432 3750 send_packet_in(p, packet);
064af421
BP
3751 return;
3752 }
3753
3754 if (rule->cr.wc.wildcards) {
3755 rule = rule_create_subrule(p, rule, &flow);
3756 rule_make_actions(p, rule, packet);
3757 } else {
3758 if (!rule->may_install) {
3759 /* The rule is not installable, that is, we need to process every
3760 * packet, so process the current packet and set its actions into
3761 * 'subrule'. */
3762 rule_make_actions(p, rule, packet);
3763 } else {
3764 /* XXX revalidate rule if it needs it */
3765 }
3766 }
3767
3768 rule_execute(p, rule, &payload, &flow);
3769 rule_reinstall(p, rule);
7778bd15 3770
76ce9432 3771 if (rule->super && rule->super->cr.priority == FAIL_OPEN_PRIORITY) {
7778bd15
BP
3772 /*
3773 * Extra-special case for fail-open mode.
3774 *
3775 * We are in fail-open mode and the packet matched the fail-open rule,
3776 * but we are connected to a controller too. We should send the packet
3777 * up to the controller in the hope that it will try to set up a flow
3778 * and thereby allow us to exit fail-open.
3779 *
3780 * See the top-level comment in fail-open.c for more information.
3781 */
76ce9432 3782 send_packet_in(p, packet);
7778bd15
BP
3783 } else {
3784 ofpbuf_delete(packet);
3785 }
064af421 3786}
72b06300
BP
3787
3788static void
3789handle_odp_msg(struct ofproto *p, struct ofpbuf *packet)
3790{
3791 struct odp_msg *msg = packet->data;
3792
3793 switch (msg->type) {
3794 case _ODPL_ACTION_NR:
3795 COVERAGE_INC(ofproto_ctlr_action);
76ce9432 3796 send_packet_in(p, packet);
72b06300
BP
3797 break;
3798
3799 case _ODPL_SFLOW_NR:
3800 if (p->sflow) {
3801 ofproto_sflow_received(p->sflow, msg);
3802 }
3803 ofpbuf_delete(packet);
3804 break;
3805
3806 case _ODPL_MISS_NR:
3807 handle_odp_miss_msg(p, packet);
3808 break;
3809
3810 default:
3811 VLOG_WARN_RL(&rl, "received ODP message of unexpected type %"PRIu32,
3812 msg->type);
3813 break;
3814 }
3815}
064af421
BP
3816\f
3817static void
3818revalidate_cb(struct cls_rule *sub_, void *cbdata_)
3819{
3820 struct rule *sub = rule_from_cls_rule(sub_);
3821 struct revalidate_cbdata *cbdata = cbdata_;
3822
3823 if (cbdata->revalidate_all
3824 || (cbdata->revalidate_subrules && sub->super)
3825 || (tag_set_intersects(&cbdata->revalidate_set, sub->tags))) {
3826 revalidate_rule(cbdata->ofproto, sub);
3827 }
3828}
3829
3830static bool
3831revalidate_rule(struct ofproto *p, struct rule *rule)
3832{
3833 const flow_t *flow = &rule->cr.flow;
3834
3835 COVERAGE_INC(ofproto_revalidate_rule);
3836 if (rule->super) {
3837 struct rule *super;
3838 super = rule_from_cls_rule(classifier_lookup_wild(&p->cls, flow));
3839 if (!super) {
3840 rule_remove(p, rule);
3841 return false;
3842 } else if (super != rule->super) {
3843 COVERAGE_INC(ofproto_revalidate_moved);
3844 list_remove(&rule->list);
3845 list_push_back(&super->list, &rule->list);
3846 rule->super = super;
3847 rule->hard_timeout = super->hard_timeout;
3848 rule->idle_timeout = super->idle_timeout;
3849 rule->created = super->created;
3850 rule->used = 0;
3851 }
3852 }
3853
3854 rule_update_actions(p, rule);
3855 return true;
3856}
3857
3858static struct ofpbuf *
659586ef
JG
3859compose_flow_removed(struct ofproto *p, const struct rule *rule,
3860 long long int now, uint8_t reason)
064af421 3861{
ca069229 3862 struct ofp_flow_removed *ofr;
064af421 3863 struct ofpbuf *buf;
9ca76894 3864 long long int tdiff = now - rule->created;
26c3f94a
JP
3865 uint32_t sec = tdiff / 1000;
3866 uint32_t msec = tdiff - (sec * 1000);
064af421 3867
ca069229 3868 ofr = make_openflow(sizeof *ofr, OFPT_FLOW_REMOVED, &buf);
659586ef
JG
3869 flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, p->tun_id_from_cookie,
3870 &ofr->match);
39997502 3871 ofr->cookie = rule->flow_cookie;
ca069229
JP
3872 ofr->priority = htons(rule->cr.priority);
3873 ofr->reason = reason;
26c3f94a
JP
3874 ofr->duration_sec = htonl(sec);
3875 ofr->duration_nsec = htonl(msec * 1000000);
ca069229
JP
3876 ofr->idle_timeout = htons(rule->idle_timeout);
3877 ofr->packet_count = htonll(rule->packet_count);
3878 ofr->byte_count = htonll(rule->byte_count);
064af421
BP
3879
3880 return buf;
3881}
3882
3883static void
ca069229
JP
3884uninstall_idle_flow(struct ofproto *ofproto, struct rule *rule)
3885{
3886 assert(rule->installed);
3887 assert(!rule->cr.wc.wildcards);
3888
3889 if (rule->super) {
3890 rule_remove(ofproto, rule);
3891 } else {
3892 rule_uninstall(ofproto, rule);
3893 }
3894}
9deba63b 3895
ca069229
JP
3896static void
3897send_flow_removed(struct ofproto *p, struct rule *rule,
3898 long long int now, uint8_t reason)
064af421
BP
3899{
3900 struct ofconn *ofconn;
3901 struct ofconn *prev;
b9b0ce61 3902 struct ofpbuf *buf = NULL;
064af421
BP
3903
3904 /* We limit the maximum number of queued flow expirations it by accounting
3905 * them under the counter for replies. That works because preventing
3906 * OpenFlow requests from being processed also prevents new flows from
3907 * being added (and expiring). (It also prevents processing OpenFlow
3908 * requests that would not add new flows, so it is imperfect.) */
3909
3910 prev = NULL;
3911 LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
9deba63b 3912 if (rule->send_flow_removed && rconn_is_connected(ofconn->rconn)
c91248b3 3913 && ofconn_receives_async_msgs(ofconn)) {
064af421 3914 if (prev) {
431d8ad2 3915 queue_tx(ofpbuf_clone(buf), prev, prev->reply_counter);
064af421 3916 } else {
659586ef 3917 buf = compose_flow_removed(p, rule, now, reason);
064af421
BP
3918 }
3919 prev = ofconn;
3920 }
3921 }
3922 if (prev) {
431d8ad2 3923 queue_tx(buf, prev, prev->reply_counter);
064af421
BP
3924 }
3925}
3926
064af421
BP
3927
3928static void
3929expire_rule(struct cls_rule *cls_rule, void *p_)
3930{
3931 struct ofproto *p = p_;
3932 struct rule *rule = rule_from_cls_rule(cls_rule);
3933 long long int hard_expire, idle_expire, expire, now;
3934
3935 hard_expire = (rule->hard_timeout
3936 ? rule->created + rule->hard_timeout * 1000
3937 : LLONG_MAX);
3938 idle_expire = (rule->idle_timeout
3939 && (rule->super || list_is_empty(&rule->list))
3940 ? rule->used + rule->idle_timeout * 1000
3941 : LLONG_MAX);
3942 expire = MIN(hard_expire, idle_expire);
064af421
BP
3943
3944 now = time_msec();
3945 if (now < expire) {
3946 if (rule->installed && now >= rule->used + 5000) {
3947 uninstall_idle_flow(p, rule);
0193b2af
JG
3948 } else if (!rule->cr.wc.wildcards) {
3949 active_timeout(p, rule);
064af421 3950 }
0193b2af 3951
064af421
BP
3952 return;
3953 }
3954
3955 COVERAGE_INC(ofproto_expired);
46d6f36f
JG
3956
3957 /* Update stats. This code will be a no-op if the rule expired
3958 * due to an idle timeout. */
064af421 3959 if (rule->cr.wc.wildcards) {
064af421
BP
3960 struct rule *subrule, *next;
3961 LIST_FOR_EACH_SAFE (subrule, next, struct rule, list, &rule->list) {
3962 rule_remove(p, subrule);
3963 }
46d6f36f
JG
3964 } else {
3965 rule_uninstall(p, rule);
064af421
BP
3966 }
3967
8fe1a59d 3968 if (!rule_is_hidden(rule)) {
ca069229
JP
3969 send_flow_removed(p, rule, now,
3970 (now >= hard_expire
3971 ? OFPRR_HARD_TIMEOUT : OFPRR_IDLE_TIMEOUT));
8fe1a59d 3972 }
064af421
BP
3973 rule_remove(p, rule);
3974}
3975
0193b2af
JG
3976static void
3977active_timeout(struct ofproto *ofproto, struct rule *rule)
3978{
3979 if (ofproto->netflow && !is_controller_rule(rule) &&
3980 netflow_active_timeout_expired(ofproto->netflow, &rule->nf_flow)) {
3981 struct ofexpired expired;
3982 struct odp_flow odp_flow;
3983
3984 /* Get updated flow stats. */
3985 memset(&odp_flow, 0, sizeof odp_flow);
094e1514
JG
3986 if (rule->installed) {
3987 odp_flow.key = rule->cr.flow;
3988 odp_flow.flags = ODPFF_ZERO_TCP_FLAGS;
d65349ea 3989 dpif_flow_get(ofproto->dpif, &odp_flow);
094e1514
JG
3990
3991 if (odp_flow.stats.n_packets) {
3992 update_time(ofproto, rule, &odp_flow.stats);
3993 netflow_flow_update_flags(&rule->nf_flow, odp_flow.stats.ip_tos,
3994 odp_flow.stats.tcp_flags);
3995 }
0193b2af
JG
3996 }
3997
3998 expired.flow = rule->cr.flow;
3999 expired.packet_count = rule->packet_count +
4000 odp_flow.stats.n_packets;
4001 expired.byte_count = rule->byte_count + odp_flow.stats.n_bytes;
4002 expired.used = rule->used;
4003
4004 netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
4005
4006 /* Schedule us to send the accumulated records once we have
4007 * collected all of them. */
4008 poll_immediate_wake();
4009 }
4010}
4011
064af421
BP
4012static void
4013update_used(struct ofproto *p)
4014{
4015 struct odp_flow *flows;
4016 size_t n_flows;
4017 size_t i;
4018 int error;
4019
c228a364 4020 error = dpif_flow_list_all(p->dpif, &flows, &n_flows);
064af421
BP
4021 if (error) {
4022 return;
4023 }
4024
4025 for (i = 0; i < n_flows; i++) {
4026 struct odp_flow *f = &flows[i];
4027 struct rule *rule;
4028
4029 rule = rule_from_cls_rule(
4030 classifier_find_rule_exactly(&p->cls, &f->key, 0, UINT16_MAX));
4031 if (!rule || !rule->installed) {
4032 COVERAGE_INC(ofproto_unexpected_rule);
c228a364 4033 dpif_flow_del(p->dpif, f);
064af421
BP
4034 continue;
4035 }
4036
0193b2af 4037 update_time(p, rule, &f->stats);
064af421
BP
4038 rule_account(p, rule, f->stats.n_bytes);
4039 }
4040 free(flows);
4041}
4042
43253595 4043/* pinsched callback for sending 'packet' on 'ofconn'. */
064af421 4044static void
76ce9432 4045do_send_packet_in(struct ofpbuf *packet, void *ofconn_)
064af421 4046{
76ce9432 4047 struct ofconn *ofconn = ofconn_;
43253595
BP
4048
4049 rconn_send_with_limit(ofconn->rconn, packet,
4050 ofconn->packet_in_counter, 100);
4051}
4052
4053/* Takes 'packet', which has been converted with do_convert_to_packet_in(), and
4054 * finalizes its content for sending on 'ofconn', and passes it to 'ofconn''s
4055 * packet scheduler for sending.
4056 *
30ea5d93
BP
4057 * 'max_len' specifies the maximum number of bytes of the packet to send on
4058 * 'ofconn' (INT_MAX specifies no limit).
4059 *
43253595
BP
4060 * If 'clone' is true, the caller retains ownership of 'packet'. Otherwise,
4061 * ownership is transferred to this function. */
4062static void
30ea5d93
BP
4063schedule_packet_in(struct ofconn *ofconn, struct ofpbuf *packet, int max_len,
4064 bool clone)
43253595 4065{
76ce9432 4066 struct ofproto *ofproto = ofconn->ofproto;
43253595
BP
4067 struct ofp_packet_in *opi = packet->data;
4068 uint16_t in_port = ofp_port_to_odp_port(ntohs(opi->in_port));
4069 int send_len, trim_size;
76ce9432 4070 uint32_t buffer_id;
064af421 4071
43253595
BP
4072 /* Get buffer. */
4073 if (opi->reason == OFPR_ACTION) {
76ce9432 4074 buffer_id = UINT32_MAX;
43253595
BP
4075 } else if (ofproto->fail_open && fail_open_is_active(ofproto->fail_open)) {
4076 buffer_id = pktbuf_get_null();
89b9612d
BP
4077 } else if (!ofconn->pktbuf) {
4078 buffer_id = UINT32_MAX;
76ce9432 4079 } else {
43253595
BP
4080 struct ofpbuf payload;
4081 payload.data = opi->data;
4082 payload.size = packet->size - offsetof(struct ofp_packet_in, data);
4083 buffer_id = pktbuf_save(ofconn->pktbuf, &payload, in_port);
76ce9432 4084 }
372179d4 4085
43253595
BP
4086 /* Figure out how much of the packet to send. */
4087 send_len = ntohs(opi->total_len);
4088 if (buffer_id != UINT32_MAX) {
4089 send_len = MIN(send_len, ofconn->miss_send_len);
4090 }
30ea5d93 4091 send_len = MIN(send_len, max_len);
064af421 4092
43253595
BP
4093 /* Adjust packet length and clone if necessary. */
4094 trim_size = offsetof(struct ofp_packet_in, data) + send_len;
4095 if (clone) {
4096 packet = ofpbuf_clone_data(packet->data, trim_size);
4097 opi = packet->data;
4098 } else {
4099 packet->size = trim_size;
4100 }
4101
4102 /* Update packet headers. */
4103 opi->buffer_id = htonl(buffer_id);
4104 update_openflow_length(packet);
4105
4106 /* Hand over to packet scheduler. It might immediately call into
4107 * do_send_packet_in() or it might buffer it for a while (until a later
4108 * call to pinsched_run()). */
4109 pinsched_send(ofconn->schedulers[opi->reason], in_port,
4110 packet, do_send_packet_in, ofconn);
064af421
BP
4111}
4112
43253595
BP
4113/* Replace struct odp_msg header in 'packet' by equivalent struct
4114 * ofp_packet_in. The odp_msg must have sufficient headroom to do so (e.g. as
4115 * returned by dpif_recv()).
4116 *
4117 * The conversion is not complete: the caller still needs to trim any unneeded
4118 * payload off the end of the buffer, set the length in the OpenFlow header,
4119 * and set buffer_id. Those require us to know the controller settings and so
30ea5d93
BP
4120 * must be done on a per-controller basis.
4121 *
4122 * Returns the maximum number of bytes of the packet that should be sent to
4123 * the controller (INT_MAX if no limit). */
4124static int
43253595 4125do_convert_to_packet_in(struct ofpbuf *packet)
064af421 4126{
76ce9432 4127 struct odp_msg *msg = packet->data;
43253595
BP
4128 struct ofp_packet_in *opi;
4129 uint8_t reason;
4130 uint16_t total_len;
4131 uint16_t in_port;
30ea5d93 4132 int max_len;
43253595
BP
4133
4134 /* Extract relevant header fields */
30ea5d93
BP
4135 if (msg->type == _ODPL_ACTION_NR) {
4136 reason = OFPR_ACTION;
4137 max_len = msg->arg;
4138 } else {
4139 reason = OFPR_NO_MATCH;
4140 max_len = INT_MAX;
4141 }
43253595
BP
4142 total_len = msg->length - sizeof *msg;
4143 in_port = odp_port_to_ofp_port(msg->port);
4144
4145 /* Repurpose packet buffer by overwriting header. */
4146 ofpbuf_pull(packet, sizeof(struct odp_msg));
4147 opi = ofpbuf_push_zeros(packet, offsetof(struct ofp_packet_in, data));
4148 opi->header.version = OFP_VERSION;
4149 opi->header.type = OFPT_PACKET_IN;
4150 opi->total_len = htons(total_len);
4151 opi->in_port = htons(in_port);
4152 opi->reason = reason;
30ea5d93
BP
4153
4154 return max_len;
43253595
BP
4155}
4156
4157/* Given 'packet' containing an odp_msg of type _ODPL_ACTION_NR or
4158 * _ODPL_MISS_NR, sends an OFPT_PACKET_IN message to each OpenFlow controller
4159 * as necessary according to their individual configurations.
4160 *
4161 * 'packet' must have sufficient headroom to convert it into a struct
4162 * ofp_packet_in (e.g. as returned by dpif_recv()).
4163 *
4164 * Takes ownership of 'packet'. */
4165static void
4166send_packet_in(struct ofproto *ofproto, struct ofpbuf *packet)
4167{
76ce9432 4168 struct ofconn *ofconn, *prev;
30ea5d93 4169 int max_len;
064af421 4170
30ea5d93 4171 max_len = do_convert_to_packet_in(packet);
76ce9432
BP
4172
4173 prev = NULL;
4174 LIST_FOR_EACH (ofconn, struct ofconn, node, &ofproto->all_conns) {
c91248b3 4175 if (ofconn_receives_async_msgs(ofconn)) {
9deba63b 4176 if (prev) {
30ea5d93 4177 schedule_packet_in(prev, packet, max_len, true);
9deba63b
BP
4178 }
4179 prev = ofconn;
064af421 4180 }
76ce9432
BP
4181 }
4182 if (prev) {
30ea5d93 4183 schedule_packet_in(prev, packet, max_len, false);
76ce9432
BP
4184 } else {
4185 ofpbuf_delete(packet);
064af421 4186 }
064af421
BP
4187}
4188
4189static uint64_t
fa60c019 4190pick_datapath_id(const struct ofproto *ofproto)
064af421 4191{
fa60c019 4192 const struct ofport *port;
064af421 4193
fa60c019
BP
4194 port = port_array_get(&ofproto->ports, ODPP_LOCAL);
4195 if (port) {
4196 uint8_t ea[ETH_ADDR_LEN];
4197 int error;
4198
4199 error = netdev_get_etheraddr(port->netdev, ea);
064af421
BP
4200 if (!error) {
4201 return eth_addr_to_uint64(ea);
4202 }
4203 VLOG_WARN("could not get MAC address for %s (%s)",
fa60c019 4204 netdev_get_name(port->netdev), strerror(error));
064af421 4205 }
fa60c019 4206 return ofproto->fallback_dpid;
064af421
BP
4207}
4208
4209static uint64_t
4210pick_fallback_dpid(void)
4211{
4212 uint8_t ea[ETH_ADDR_LEN];
70150daf 4213 eth_addr_nicira_random(ea);
064af421
BP
4214 return eth_addr_to_uint64(ea);
4215}
4216\f
4217static bool
4218default_normal_ofhook_cb(const flow_t *flow, const struct ofpbuf *packet,
4219 struct odp_actions *actions, tag_type *tags,
6a07af36 4220 uint16_t *nf_output_iface, void *ofproto_)
064af421
BP
4221{
4222 struct ofproto *ofproto = ofproto_;
4223 int out_port;
4224
4225 /* Drop frames for reserved multicast addresses. */
4226 if (eth_addr_is_reserved(flow->dl_dst)) {
4227 return true;
4228 }
4229
4230 /* Learn source MAC (but don't try to learn from revalidation). */
4231 if (packet != NULL) {
4232 tag_type rev_tag = mac_learning_learn(ofproto->ml, flow->dl_src,
4233 0, flow->in_port);
4234 if (rev_tag) {
4235 /* The log messages here could actually be useful in debugging,
4236 * so keep the rate limit relatively high. */
4237 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
4238 VLOG_DBG_RL(&rl, "learned that "ETH_ADDR_FMT" is on port %"PRIu16,
4239 ETH_ADDR_ARGS(flow->dl_src), flow->in_port);
4240 ofproto_revalidate(ofproto, rev_tag);
4241 }
4242 }
4243
4244 /* Determine output port. */
4245 out_port = mac_learning_lookup_tag(ofproto->ml, flow->dl_dst, 0, tags);
4246 if (out_port < 0) {
6a07af36 4247 add_output_group_action(actions, DP_GROUP_FLOOD, nf_output_iface);
064af421
BP
4248 } else if (out_port != flow->in_port) {
4249 odp_actions_add(actions, ODPAT_OUTPUT)->output.port = out_port;
6a07af36 4250 *nf_output_iface = out_port;
064af421
BP
4251 } else {
4252 /* Drop. */
4253 }
4254
4255 return true;
4256}
4257
4258static const struct ofhooks default_ofhooks = {
4259 NULL,
4260 default_normal_ofhook_cb,
4261 NULL,
4262 NULL
4263};