]> git.proxmox.com Git - ovs.git/blame - ofproto/ofproto.c
datapath: Drop port information from odp_stats.
[ovs.git] / ofproto / ofproto.c
CommitLineData
064af421 1/*
db5ce514 2 * Copyright (c) 2009, 2010, 2011 Nicira Networks.
43253595 3 * Copyright (c) 2010 Jean Tourrilhes - HP-Labs.
064af421 4 *
a14bc59f
BP
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
064af421 8 *
a14bc59f
BP
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
064af421
BP
16 */
17
18#include <config.h>
19#include "ofproto.h"
20#include <errno.h>
21#include <inttypes.h>
9d82ec47 22#include <sys/socket.h>
064af421
BP
23#include <net/if.h>
24#include <netinet/in.h>
25#include <stdbool.h>
26#include <stdlib.h>
10a24935 27#include "byte-order.h"
064af421
BP
28#include "classifier.h"
29#include "coverage.h"
30#include "discovery.h"
31#include "dpif.h"
4f2cad2c 32#include "dynamic-string.h"
064af421 33#include "fail-open.h"
ca0f572c
BP
34#include "hash.h"
35#include "hmap.h"
064af421
BP
36#include "in-band.h"
37#include "mac-learning.h"
53ddd40a 38#include "multipath.h"
064af421
BP
39#include "netdev.h"
40#include "netflow.h"
cdee00fd 41#include "netlink.h"
09246b99 42#include "nx-match.h"
064af421
BP
43#include "odp-util.h"
44#include "ofp-print.h"
fa37b408 45#include "ofp-util.h"
72b06300 46#include "ofproto-sflow.h"
064af421
BP
47#include "ofpbuf.h"
48#include "openflow/nicira-ext.h"
49#include "openflow/openflow.h"
064af421
BP
50#include "openvswitch/datapath-protocol.h"
51#include "packets.h"
52#include "pinsched.h"
53#include "pktbuf.h"
54#include "poll-loop.h"
064af421
BP
55#include "rconn.h"
56#include "shash.h"
57#include "status.h"
fe55ad15 58#include "stream-ssl.h"
064af421
BP
59#include "svec.h"
60#include "tag.h"
61#include "timeval.h"
4f2cad2c 62#include "unixctl.h"
064af421 63#include "vconn.h"
5136ce49 64#include "vlog.h"
064af421 65
d98e6007 66VLOG_DEFINE_THIS_MODULE(ofproto);
064af421 67
cc01d0bb
BP
68COVERAGE_DEFINE(facet_changed_rule);
69COVERAGE_DEFINE(facet_revalidate);
d76f09ea 70COVERAGE_DEFINE(odp_overflow);
d76f09ea
BP
71COVERAGE_DEFINE(ofproto_agg_request);
72COVERAGE_DEFINE(ofproto_costly_flags);
73COVERAGE_DEFINE(ofproto_ctlr_action);
cc01d0bb 74COVERAGE_DEFINE(ofproto_del_rule);
d76f09ea
BP
75COVERAGE_DEFINE(ofproto_error);
76COVERAGE_DEFINE(ofproto_expiration);
77COVERAGE_DEFINE(ofproto_expired);
78COVERAGE_DEFINE(ofproto_flows_req);
79COVERAGE_DEFINE(ofproto_flush);
80COVERAGE_DEFINE(ofproto_invalidated);
d76f09ea 81COVERAGE_DEFINE(ofproto_no_packet_in);
d76f09ea
BP
82COVERAGE_DEFINE(ofproto_ofconn_stuck);
83COVERAGE_DEFINE(ofproto_ofp2odp);
84COVERAGE_DEFINE(ofproto_packet_in);
85COVERAGE_DEFINE(ofproto_packet_out);
86COVERAGE_DEFINE(ofproto_queue_req);
87COVERAGE_DEFINE(ofproto_recv_openflow);
88COVERAGE_DEFINE(ofproto_reinit_ports);
d76f09ea
BP
89COVERAGE_DEFINE(ofproto_unexpected_rule);
90COVERAGE_DEFINE(ofproto_uninstallable);
91COVERAGE_DEFINE(ofproto_update_port);
92
72b06300 93#include "sflow_api.h"
064af421 94
f29152ca
BP
95struct rule;
96
064af421 97struct ofport {
ca0f572c 98 struct hmap_node hmap_node; /* In struct ofproto's "ports" hmap. */
064af421
BP
99 struct netdev *netdev;
100 struct ofp_phy_port opp; /* In host byte order. */
ca0f572c 101 uint16_t odp_port;
064af421
BP
102};
103
104static void ofport_free(struct ofport *);
105static void hton_ofp_phy_port(struct ofp_phy_port *);
106
f29152ca
BP
107struct action_xlate_ctx {
108/* action_xlate_ctx_init() initializes these members. */
109
110 /* The ofproto. */
111 struct ofproto *ofproto;
112
113 /* Flow to which the OpenFlow actions apply. xlate_actions() will modify
114 * this flow when actions change header fields. */
115 struct flow flow;
116
117 /* The packet corresponding to 'flow', or a null pointer if we are
118 * revalidating without a packet to refer to. */
119 const struct ofpbuf *packet;
120
7aa697dd
BP
121 /* If nonnull, called just before executing a resubmit action.
122 *
123 * This is normally null so the client has to set it manually after
124 * calling action_xlate_ctx_init(). */
125 void (*resubmit_hook)(struct action_xlate_ctx *, const struct rule *);
126
f29152ca
BP
127/* xlate_actions() initializes and uses these members. The client might want
128 * to look at them after it returns. */
129
cdee00fd 130 struct ofpbuf *odp_actions; /* Datapath actions. */
f29152ca
BP
131 tag_type tags; /* Tags associated with OFPP_NORMAL actions. */
132 bool may_set_up_flow; /* True ordinarily; false if the actions must
133 * be reassessed for every packet. */
134 uint16_t nf_output_iface; /* Output interface index for NetFlow. */
135
136/* xlate_actions() initializes and uses these members, but the client has no
137 * reason to look at them. */
138
139 int recurse; /* Recursion level, via xlate_table_action. */
cdee00fd
BP
140 int last_pop_priority; /* Offset in 'odp_actions' just past most
141 * recently added ODPAT_SET_PRIORITY. */
f29152ca
BP
142};
143
144static void action_xlate_ctx_init(struct action_xlate_ctx *,
145 struct ofproto *, const struct flow *,
146 const struct ofpbuf *);
cdee00fd
BP
147static struct ofpbuf *xlate_actions(struct action_xlate_ctx *,
148 const union ofp_action *in, size_t n_in);
064af421 149
bcf84111 150/* An OpenFlow flow. */
064af421 151struct rule {
0c43ad9a 152 long long int used; /* Time last used; time created if not used. */
064af421 153 long long int created; /* Creation time. */
064af421 154
bcf84111 155 /* These statistics:
064af421 156 *
bcf84111
BP
157 * - Do include packets and bytes from facets that have been deleted or
158 * whose own statistics have been folded into the rule.
79eee1eb 159 *
bcf84111
BP
160 * - Do include packets and bytes sent "by hand" that were accounted to
161 * the rule without any facet being involved (this is a rare corner
162 * case in rule_execute()).
064af421 163 *
bcf84111
BP
164 * - Do not include packet or bytes that can be obtained from any facet's
165 * packet_count or byte_count member or that can be obtained from the
166 * datapath by, e.g., dpif_flow_get() for any facet.
167 */
168 uint64_t packet_count; /* Number of packets received. */
169 uint64_t byte_count; /* Number of bytes received. */
170
171 ovs_be64 flow_cookie; /* Controller-issued identifier. */
172
173 struct cls_rule cr; /* In owning ofproto's classifier. */
174 uint16_t idle_timeout; /* In seconds from time of last use. */
175 uint16_t hard_timeout; /* In seconds from time of creation. */
176 bool send_flow_removed; /* Send a flow removed message? */
177 int n_actions; /* Number of elements in actions[]. */
178 union ofp_action *actions; /* OpenFlow actions. */
179 struct list facets; /* List of "struct facet"s. */
064af421
BP
180};
181
bcf84111
BP
182static struct rule *rule_from_cls_rule(const struct cls_rule *);
183static bool rule_is_hidden(const struct rule *);
064af421 184
bcf84111 185static struct rule *rule_create(const struct cls_rule *,
0193b2af 186 const union ofp_action *, size_t n_actions,
ca069229 187 uint16_t idle_timeout, uint16_t hard_timeout,
8054fc48 188 ovs_be64 flow_cookie, bool send_flow_removed);
064af421 189static void rule_destroy(struct ofproto *, struct rule *);
bcf84111
BP
190static void rule_free(struct rule *);
191
192static struct rule *rule_lookup(struct ofproto *, const struct flow *);
afe75089 193static void rule_insert(struct ofproto *, struct rule *);
064af421 194static void rule_remove(struct ofproto *, struct rule *);
bcf84111
BP
195
196static void rule_send_removed(struct ofproto *, struct rule *, uint8_t reason);
197
198/* An exact-match instantiation of an OpenFlow flow. */
199struct facet {
200 long long int used; /* Time last used; time created if not used. */
201
202 /* These statistics:
203 *
204 * - Do include packets and bytes sent "by hand", e.g. with
205 * dpif_execute().
206 *
207 * - Do include packets and bytes that were obtained from the datapath
208 * when a flow was deleted (e.g. dpif_flow_del()) or when its
209 * statistics were reset (e.g. dpif_flow_put() with ODPPF_ZERO_STATS).
210 *
211 * - Do not include any packets or bytes that can currently be obtained
212 * from the datapath by, e.g., dpif_flow_get().
213 */
214 uint64_t packet_count; /* Number of packets received. */
215 uint64_t byte_count; /* Number of bytes received. */
216
217 /* Number of bytes passed to account_cb. This may include bytes that can
218 * currently obtained from the datapath (thus, it can be greater than
219 * byte_count). */
220 uint64_t accounted_bytes;
221
222 struct hmap_node hmap_node; /* In owning ofproto's 'facets' hmap. */
223 struct list list_node; /* In owning rule's 'facets' list. */
224 struct rule *rule; /* Owning rule. */
225 struct flow flow; /* Exact-match flow. */
226 bool installed; /* Installed in datapath? */
227 bool may_install; /* True ordinarily; false if actions must
228 * be reassessed for every packet. */
cf22f8cb 229 size_t actions_len; /* Number of bytes in actions[]. */
cdee00fd 230 struct nlattr *actions; /* Datapath actions. */
bcf84111
BP
231 tag_type tags; /* Tags (set only by hooks). */
232 struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
233};
234
235static struct facet *facet_create(struct ofproto *, struct rule *,
236 const struct flow *,
237 const struct ofpbuf *packet);
238static void facet_remove(struct ofproto *, struct facet *);
239static void facet_free(struct facet *);
240
241static struct facet *facet_lookup_valid(struct ofproto *, const struct flow *);
242static bool facet_revalidate(struct ofproto *, struct facet *);
243
244static void facet_install(struct ofproto *, struct facet *, bool zero_stats);
245static void facet_uninstall(struct ofproto *, struct facet *);
d530fcd2 246static void facet_flush_stats(struct ofproto *, struct facet *);
bcf84111 247
7f7ae89d
BP
248static void facet_make_actions(struct ofproto *, struct facet *,
249 const struct ofpbuf *packet);
bcf84111
BP
250static void facet_update_stats(struct ofproto *, struct facet *,
251 const struct odp_flow_stats *);
064af421 252
76ce9432
BP
253/* ofproto supports two kinds of OpenFlow connections:
254 *
5899143f
BP
255 * - "Primary" connections to ordinary OpenFlow controllers. ofproto
256 * maintains persistent connections to these controllers and by default
257 * sends them asynchronous messages such as packet-ins.
76ce9432 258 *
5899143f 259 * - "Service" connections, e.g. from ovs-ofctl. When these connections
76ce9432
BP
260 * drop, it is the other side's responsibility to reconnect them if
261 * necessary. ofproto does not send them asynchronous messages by default.
7d674866
BP
262 *
263 * Currently, active (tcp, ssl, unix) connections are always "primary"
264 * connections and passive (ptcp, pssl, punix) connections are always "service"
265 * connections. There is no inherent reason for this, but it reflects the
266 * common case.
76ce9432
BP
267 */
268enum ofconn_type {
5899143f
BP
269 OFCONN_PRIMARY, /* An ordinary OpenFlow controller. */
270 OFCONN_SERVICE /* A service connection, e.g. "ovs-ofctl". */
76ce9432 271};
064af421 272
7d674866
BP
273/* A listener for incoming OpenFlow "service" connections. */
274struct ofservice {
275 struct hmap_node node; /* In struct ofproto's "services" hmap. */
276 struct pvconn *pvconn; /* OpenFlow connection listener. */
277
278 /* These are not used by ofservice directly. They are settings for
279 * accepted "struct ofconn"s from the pvconn. */
280 int probe_interval; /* Max idle time before probing, in seconds. */
281 int rate_limit; /* Max packet-in rate in packets per second. */
282 int burst_limit; /* Limit on accumulating packet credits. */
283};
284
285static struct ofservice *ofservice_lookup(struct ofproto *,
286 const char *target);
287static int ofservice_create(struct ofproto *,
288 const struct ofproto_controller *);
289static void ofservice_reconfigure(struct ofservice *,
290 const struct ofproto_controller *);
291static void ofservice_destroy(struct ofproto *, struct ofservice *);
292
76ce9432
BP
293/* An OpenFlow connection. */
294struct ofconn {
295 struct ofproto *ofproto; /* The ofproto that owns this connection. */
296 struct list node; /* In struct ofproto's "all_conns" list. */
297 struct rconn *rconn; /* OpenFlow connection. */
298 enum ofconn_type type; /* Type. */
492f7572 299 enum nx_flow_format flow_format; /* Currently selected flow format. */
76ce9432
BP
300
301 /* OFPT_PACKET_IN related data. */
302 struct rconn_packet_counter *packet_in_counter; /* # queued on 'rconn'. */
303 struct pinsched *schedulers[2]; /* Indexed by reason code; see below. */
304 struct pktbuf *pktbuf; /* OpenFlow packet buffers. */
305 int miss_send_len; /* Bytes to send of buffered packets. */
306
307 /* Number of OpenFlow messages queued on 'rconn' as replies to OpenFlow
308 * requests, and the maximum number before we stop reading OpenFlow
309 * requests. */
064af421
BP
310#define OFCONN_REPLY_MAX 100
311 struct rconn_packet_counter *reply_counter;
76ce9432 312
5899143f 313 /* type == OFCONN_PRIMARY only. */
9deba63b 314 enum nx_role role; /* Role. */
76ce9432
BP
315 struct hmap_node hmap_node; /* In struct ofproto's "controllers" map. */
316 struct discovery *discovery; /* Controller discovery object, if enabled. */
317 struct status_category *ss; /* Switch status category. */
d2ede7bc 318 enum ofproto_band band; /* In-band or out-of-band? */
064af421
BP
319};
320
76ce9432
BP
321/* We use OFPR_NO_MATCH and OFPR_ACTION as indexes into struct ofconn's
322 * "schedulers" array. Their values are 0 and 1, and their meanings and values
323 * coincide with _ODPL_MISS_NR and _ODPL_ACTION_NR, so this is convenient. In
324 * case anything ever changes, check their values here. */
325#define N_SCHEDULERS 2
326BUILD_ASSERT_DECL(OFPR_NO_MATCH == 0);
327BUILD_ASSERT_DECL(OFPR_NO_MATCH == _ODPL_MISS_NR);
328BUILD_ASSERT_DECL(OFPR_ACTION == 1);
329BUILD_ASSERT_DECL(OFPR_ACTION == _ODPL_ACTION_NR);
330
331static struct ofconn *ofconn_create(struct ofproto *, struct rconn *,
332 enum ofconn_type);
c475ae67 333static void ofconn_destroy(struct ofconn *);
3269c562 334static void ofconn_run(struct ofconn *);
064af421 335static void ofconn_wait(struct ofconn *);
c91248b3 336static bool ofconn_receives_async_msgs(const struct ofconn *);
eb15cdbb 337static char *ofconn_make_name(const struct ofproto *, const char *target);
7d674866 338static void ofconn_set_rate_limit(struct ofconn *, int rate, int burst);
c91248b3 339
064af421
BP
340static void queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn,
341 struct rconn_packet_counter *counter);
342
856081f6
BP
343static void send_packet_in(struct ofproto *, struct dpif_upcall *,
344 const struct flow *, bool clone);
345static void do_send_packet_in(struct ofpbuf *ofp_packet_in, void *ofconn);
76ce9432 346
064af421
BP
347struct ofproto {
348 /* Settings. */
349 uint64_t datapath_id; /* Datapath ID. */
350 uint64_t fallback_dpid; /* Datapath ID if no better choice found. */
5a719c38
JP
351 char *mfr_desc; /* Manufacturer. */
352 char *hw_desc; /* Hardware. */
353 char *sw_desc; /* Software version. */
354 char *serial_desc; /* Serial number. */
8abc4ed7 355 char *dp_desc; /* Datapath description. */
064af421
BP
356
357 /* Datapath. */
c228a364 358 struct dpif *dpif;
e9e28be3 359 struct netdev_monitor *netdev_monitor;
ca0f572c 360 struct hmap ports; /* Contains "struct ofport"s. */
064af421
BP
361 struct shash port_by_name;
362 uint32_t max_ports;
363
364 /* Configuration. */
365 struct switch_status *switch_status;
064af421 366 struct fail_open *fail_open;
064af421 367 struct netflow *netflow;
72b06300 368 struct ofproto_sflow *sflow;
064af421 369
d2ede7bc
BP
370 /* In-band control. */
371 struct in_band *in_band;
372 long long int next_in_band_update;
917e50e1
BP
373 struct sockaddr_in *extra_in_band_remotes;
374 size_t n_extra_remotes;
b1da6250 375 int in_band_queue;
917e50e1 376
064af421
BP
377 /* Flow table. */
378 struct classifier cls;
064af421 379 long long int next_expiration;
bcf84111
BP
380
381 /* Facets. */
382 struct hmap facets;
383 bool need_revalidate;
064af421
BP
384 struct tag_set revalidate_set;
385
386 /* OpenFlow connections. */
76ce9432
BP
387 struct hmap controllers; /* Controller "struct ofconn"s. */
388 struct list all_conns; /* Contains "struct ofconn"s. */
31681a5d 389 enum ofproto_fail_mode fail_mode;
7d674866
BP
390
391 /* OpenFlow listeners. */
392 struct hmap services; /* Contains "struct ofservice"s. */
064af421
BP
393 struct pvconn **snoops;
394 size_t n_snoops;
395
396 /* Hooks for ovs-vswitchd. */
397 const struct ofhooks *ofhooks;
398 void *aux;
399
400 /* Used by default ofhooks. */
401 struct mac_learning *ml;
402};
403
7aa697dd
BP
404/* Map from dpif name to struct ofproto, for use by unixctl commands. */
405static struct shash all_ofprotos = SHASH_INITIALIZER(&all_ofprotos);
406
064af421
BP
407static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
408
409static const struct ofhooks default_ofhooks;
410
fa60c019 411static uint64_t pick_datapath_id(const struct ofproto *);
064af421 412static uint64_t pick_fallback_dpid(void);
76ce9432 413
0de7a4b4 414static int ofproto_expire(struct ofproto *);
4a4cdb3b 415
856081f6 416static void handle_upcall(struct ofproto *, struct dpif_upcall *);
064af421 417
3269c562 418static void handle_openflow(struct ofconn *, struct ofpbuf *);
064af421 419
ca0f572c 420static struct ofport *get_port(const struct ofproto *, uint16_t odp_port);
064af421
BP
421static void update_port(struct ofproto *, const char *devname);
422static int init_ports(struct ofproto *);
423static void reinit_ports(struct ofproto *);
424
7aa697dd
BP
425static void ofproto_unixctl_init(void);
426
064af421 427int
1a6f1e2a
JG
428ofproto_create(const char *datapath, const char *datapath_type,
429 const struct ofhooks *ofhooks, void *aux,
064af421
BP
430 struct ofproto **ofprotop)
431{
064af421 432 struct ofproto *p;
c228a364 433 struct dpif *dpif;
064af421
BP
434 int error;
435
436 *ofprotop = NULL;
437
7aa697dd
BP
438 ofproto_unixctl_init();
439
064af421 440 /* Connect to datapath and start listening for messages. */
1a6f1e2a 441 error = dpif_open(datapath, datapath_type, &dpif);
064af421
BP
442 if (error) {
443 VLOG_ERR("failed to open datapath %s: %s", datapath, strerror(error));
444 return error;
445 }
72b06300 446 error = dpif_recv_set_mask(dpif, ODPL_MISS | ODPL_ACTION | ODPL_SFLOW);
064af421
BP
447 if (error) {
448 VLOG_ERR("failed to listen on datapath %s: %s",
449 datapath, strerror(error));
c228a364 450 dpif_close(dpif);
064af421
BP
451 return error;
452 }
c228a364 453 dpif_flow_flush(dpif);
8f24562a 454 dpif_recv_purge(dpif);
064af421
BP
455
456 /* Initialize settings. */
ec6fde61 457 p = xzalloc(sizeof *p);
064af421 458 p->fallback_dpid = pick_fallback_dpid();
fa60c019 459 p->datapath_id = p->fallback_dpid;
5a719c38
JP
460 p->mfr_desc = xstrdup(DEFAULT_MFR_DESC);
461 p->hw_desc = xstrdup(DEFAULT_HW_DESC);
462 p->sw_desc = xstrdup(DEFAULT_SW_DESC);
463 p->serial_desc = xstrdup(DEFAULT_SERIAL_DESC);
23ff2821 464 p->dp_desc = xstrdup(DEFAULT_DP_DESC);
064af421
BP
465
466 /* Initialize datapath. */
467 p->dpif = dpif;
8b61709d 468 p->netdev_monitor = netdev_monitor_create();
ca0f572c 469 hmap_init(&p->ports);
064af421 470 shash_init(&p->port_by_name);
996c1b3d 471 p->max_ports = dpif_get_max_ports(dpif);
064af421
BP
472
473 /* Initialize submodules. */
474 p->switch_status = switch_status_create(p);
064af421 475 p->fail_open = NULL;
064af421 476 p->netflow = NULL;
72b06300 477 p->sflow = NULL;
064af421 478
b1da6250
BP
479 /* Initialize in-band control. */
480 p->in_band = NULL;
481 p->in_band_queue = -1;
482
064af421
BP
483 /* Initialize flow table. */
484 classifier_init(&p->cls);
064af421 485 p->next_expiration = time_msec() + 1000;
bcf84111
BP
486
487 /* Initialize facet table. */
488 hmap_init(&p->facets);
489 p->need_revalidate = false;
064af421
BP
490 tag_set_init(&p->revalidate_set);
491
492 /* Initialize OpenFlow connections. */
493 list_init(&p->all_conns);
76ce9432 494 hmap_init(&p->controllers);
7d674866 495 hmap_init(&p->services);
064af421
BP
496 p->snoops = NULL;
497 p->n_snoops = 0;
498
499 /* Initialize hooks. */
500 if (ofhooks) {
501 p->ofhooks = ofhooks;
502 p->aux = aux;
503 p->ml = NULL;
504 } else {
505 p->ofhooks = &default_ofhooks;
506 p->aux = p;
507 p->ml = mac_learning_create();
508 }
509
fa60c019
BP
510 /* Pick final datapath ID. */
511 p->datapath_id = pick_datapath_id(p);
b123cc3c 512 VLOG_INFO("using datapath ID %016"PRIx64, p->datapath_id);
fa60c019 513
7aa697dd
BP
514 shash_add_once(&all_ofprotos, dpif_name(p->dpif), p);
515
064af421
BP
516 *ofprotop = p;
517 return 0;
518}
519
520void
521ofproto_set_datapath_id(struct ofproto *p, uint64_t datapath_id)
522{
523 uint64_t old_dpid = p->datapath_id;
fa60c019 524 p->datapath_id = datapath_id ? datapath_id : pick_datapath_id(p);
064af421 525 if (p->datapath_id != old_dpid) {
b123cc3c 526 VLOG_INFO("datapath ID changed to %016"PRIx64, p->datapath_id);
76ce9432
BP
527
528 /* Force all active connections to reconnect, since there is no way to
529 * notify a controller that the datapath ID has changed. */
fa05809b 530 ofproto_reconnect_controllers(p);
064af421
BP
531 }
532}
533
76ce9432
BP
534static bool
535is_discovery_controller(const struct ofproto_controller *c)
536{
537 return !strcmp(c->target, "discover");
538}
539
540static bool
541is_in_band_controller(const struct ofproto_controller *c)
542{
543 return is_discovery_controller(c) || c->band == OFPROTO_IN_BAND;
544}
545
546/* Creates a new controller in 'ofproto'. Some of the settings are initially
547 * drawn from 'c', but update_controller() needs to be called later to finish
548 * the new ofconn's configuration. */
549static void
550add_controller(struct ofproto *ofproto, const struct ofproto_controller *c)
551{
552 struct discovery *discovery;
553 struct ofconn *ofconn;
554
555 if (is_discovery_controller(c)) {
556 int error = discovery_create(c->accept_re, c->update_resolv_conf,
557 ofproto->dpif, ofproto->switch_status,
558 &discovery);
559 if (error) {
560 return;
561 }
562 } else {
563 discovery = NULL;
564 }
565
5899143f 566 ofconn = ofconn_create(ofproto, rconn_create(5, 8), OFCONN_PRIMARY);
76ce9432
BP
567 ofconn->pktbuf = pktbuf_create();
568 ofconn->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN;
569 if (discovery) {
570 ofconn->discovery = discovery;
571 } else {
eb15cdbb
BP
572 char *name = ofconn_make_name(ofproto, c->target);
573 rconn_connect(ofconn->rconn, c->target, name);
574 free(name);
76ce9432
BP
575 }
576 hmap_insert(&ofproto->controllers, &ofconn->hmap_node,
577 hash_string(c->target, 0));
578}
579
580/* Reconfigures 'ofconn' to match 'c'. This function cannot update an ofconn's
581 * target or turn discovery on or off (these are done by creating new ofconns
582 * and deleting old ones), but it can update the rest of an ofconn's
583 * settings. */
584static void
585update_controller(struct ofconn *ofconn, const struct ofproto_controller *c)
064af421 586{
76ce9432 587 int probe_interval;
79c9f2ee 588
d2ede7bc
BP
589 ofconn->band = (is_in_band_controller(c)
590 ? OFPROTO_IN_BAND : OFPROTO_OUT_OF_BAND);
591
76ce9432 592 rconn_set_max_backoff(ofconn->rconn, c->max_backoff);
79c9f2ee 593
76ce9432
BP
594 probe_interval = c->probe_interval ? MAX(c->probe_interval, 5) : 0;
595 rconn_set_probe_interval(ofconn->rconn, probe_interval);
79c9f2ee 596
76ce9432
BP
597 if (ofconn->discovery) {
598 discovery_set_update_resolv_conf(ofconn->discovery,
599 c->update_resolv_conf);
600 discovery_set_accept_controller_re(ofconn->discovery, c->accept_re);
601 }
79c9f2ee 602
7d674866 603 ofconn_set_rate_limit(ofconn, c->rate_limit, c->burst_limit);
76ce9432 604}
79c9f2ee 605
76ce9432
BP
606static const char *
607ofconn_get_target(const struct ofconn *ofconn)
608{
eb15cdbb 609 return ofconn->discovery ? "discover" : rconn_get_target(ofconn->rconn);
76ce9432
BP
610}
611
612static struct ofconn *
613find_controller_by_target(struct ofproto *ofproto, const char *target)
614{
615 struct ofconn *ofconn;
616
4e8e4213 617 HMAP_FOR_EACH_WITH_HASH (ofconn, hmap_node,
76ce9432
BP
618 hash_string(target, 0), &ofproto->controllers) {
619 if (!strcmp(ofconn_get_target(ofconn), target)) {
620 return ofconn;
79c9f2ee 621 }
064af421 622 }
76ce9432
BP
623 return NULL;
624}
064af421 625
d2ede7bc
BP
626static void
627update_in_band_remotes(struct ofproto *ofproto)
628{
629 const struct ofconn *ofconn;
630 struct sockaddr_in *addrs;
917e50e1 631 size_t max_addrs, n_addrs;
d2ede7bc 632 bool discovery;
917e50e1 633 size_t i;
d2ede7bc 634
917e50e1
BP
635 /* Allocate enough memory for as many remotes as we could possibly have. */
636 max_addrs = ofproto->n_extra_remotes + hmap_count(&ofproto->controllers);
637 addrs = xmalloc(max_addrs * sizeof *addrs);
d2ede7bc
BP
638 n_addrs = 0;
639
640 /* Add all the remotes. */
641 discovery = false;
4e8e4213 642 HMAP_FOR_EACH (ofconn, hmap_node, &ofproto->controllers) {
d2ede7bc
BP
643 struct sockaddr_in *sin = &addrs[n_addrs];
644
487ec65f
BP
645 if (ofconn->band == OFPROTO_OUT_OF_BAND) {
646 continue;
647 }
648
d2ede7bc
BP
649 sin->sin_addr.s_addr = rconn_get_remote_ip(ofconn->rconn);
650 if (sin->sin_addr.s_addr) {
651 sin->sin_port = rconn_get_remote_port(ofconn->rconn);
652 n_addrs++;
653 }
654 if (ofconn->discovery) {
655 discovery = true;
656 }
657 }
917e50e1
BP
658 for (i = 0; i < ofproto->n_extra_remotes; i++) {
659 addrs[n_addrs++] = ofproto->extra_in_band_remotes[i];
660 }
d2ede7bc
BP
661
662 /* Create or update or destroy in-band.
663 *
664 * Ordinarily we only enable in-band if there's at least one remote
665 * address, but discovery needs the in-band rules for DHCP to be installed
666 * even before we know any remote addresses. */
667 if (n_addrs || discovery) {
668 if (!ofproto->in_band) {
669 in_band_create(ofproto, ofproto->dpif, ofproto->switch_status,
670 &ofproto->in_band);
671 }
40cae670
BP
672 if (ofproto->in_band) {
673 in_band_set_remotes(ofproto->in_band, addrs, n_addrs);
674 }
b1da6250 675 in_band_set_queue(ofproto->in_band, ofproto->in_band_queue);
d2ede7bc
BP
676 ofproto->next_in_band_update = time_msec() + 1000;
677 } else {
678 in_band_destroy(ofproto->in_band);
679 ofproto->in_band = NULL;
680 }
681
682 /* Clean up. */
683 free(addrs);
684}
685
31681a5d
JP
686static void
687update_fail_open(struct ofproto *p)
688{
689 struct ofconn *ofconn;
690
691 if (!hmap_is_empty(&p->controllers)
692 && p->fail_mode == OFPROTO_FAIL_STANDALONE) {
693 struct rconn **rconns;
694 size_t n;
695
696 if (!p->fail_open) {
697 p->fail_open = fail_open_create(p, p->switch_status);
698 }
699
700 n = 0;
701 rconns = xmalloc(hmap_count(&p->controllers) * sizeof *rconns);
4e8e4213 702 HMAP_FOR_EACH (ofconn, hmap_node, &p->controllers) {
31681a5d
JP
703 rconns[n++] = ofconn->rconn;
704 }
705
706 fail_open_set_controllers(p->fail_open, rconns, n);
707 /* p->fail_open takes ownership of 'rconns'. */
708 } else {
709 fail_open_destroy(p->fail_open);
710 p->fail_open = NULL;
711 }
712}
713
76ce9432
BP
714void
715ofproto_set_controllers(struct ofproto *p,
716 const struct ofproto_controller *controllers,
717 size_t n_controllers)
718{
719 struct shash new_controllers;
7d674866
BP
720 struct ofconn *ofconn, *next_ofconn;
721 struct ofservice *ofservice, *next_ofservice;
76ce9432 722 bool ss_exists;
76ce9432 723 size_t i;
79c9f2ee 724
7d674866
BP
725 /* Create newly configured controllers and services.
726 * Create a name to ofproto_controller mapping in 'new_controllers'. */
76ce9432
BP
727 shash_init(&new_controllers);
728 for (i = 0; i < n_controllers; i++) {
729 const struct ofproto_controller *c = &controllers[i];
730
7d674866
BP
731 if (!vconn_verify_name(c->target) || !strcmp(c->target, "discover")) {
732 if (!find_controller_by_target(p, c->target)) {
733 add_controller(p, c);
734 }
735 } else if (!pvconn_verify_name(c->target)) {
736 if (!ofservice_lookup(p, c->target) && ofservice_create(p, c)) {
737 continue;
738 }
739 } else {
740 VLOG_WARN_RL(&rl, "%s: unsupported controller \"%s\"",
741 dpif_name(p->dpif), c->target);
742 continue;
76ce9432 743 }
7d674866
BP
744
745 shash_add_once(&new_controllers, c->target, &controllers[i]);
76ce9432
BP
746 }
747
7d674866
BP
748 /* Delete controllers that are no longer configured.
749 * Update configuration of all now-existing controllers. */
76ce9432 750 ss_exists = false;
4e8e4213 751 HMAP_FOR_EACH_SAFE (ofconn, next_ofconn, hmap_node, &p->controllers) {
76ce9432
BP
752 struct ofproto_controller *c;
753
754 c = shash_find_data(&new_controllers, ofconn_get_target(ofconn));
755 if (!c) {
756 ofconn_destroy(ofconn);
79c9f2ee 757 } else {
76ce9432 758 update_controller(ofconn, c);
76ce9432
BP
759 if (ofconn->ss) {
760 ss_exists = true;
761 }
76ce9432
BP
762 }
763 }
7d674866
BP
764
765 /* Delete services that are no longer configured.
766 * Update configuration of all now-existing services. */
4e8e4213 767 HMAP_FOR_EACH_SAFE (ofservice, next_ofservice, node, &p->services) {
7d674866
BP
768 struct ofproto_controller *c;
769
770 c = shash_find_data(&new_controllers,
771 pvconn_get_name(ofservice->pvconn));
772 if (!c) {
773 ofservice_destroy(p, ofservice);
774 } else {
775 ofservice_reconfigure(ofservice, c);
776 }
777 }
778
76ce9432
BP
779 shash_destroy(&new_controllers);
780
d2ede7bc 781 update_in_band_remotes(p);
31681a5d 782 update_fail_open(p);
79c9f2ee 783
76ce9432
BP
784 if (!hmap_is_empty(&p->controllers) && !ss_exists) {
785 ofconn = CONTAINER_OF(hmap_first(&p->controllers),
786 struct ofconn, hmap_node);
787 ofconn->ss = switch_status_register(p->switch_status, "remote",
788 rconn_status_cb, ofconn->rconn);
79c9f2ee 789 }
064af421
BP
790}
791
31681a5d
JP
792void
793ofproto_set_fail_mode(struct ofproto *p, enum ofproto_fail_mode fail_mode)
794{
795 p->fail_mode = fail_mode;
796 update_fail_open(p);
797}
798
fa05809b
BP
799/* Drops the connections between 'ofproto' and all of its controllers, forcing
800 * them to reconnect. */
801void
802ofproto_reconnect_controllers(struct ofproto *ofproto)
803{
804 struct ofconn *ofconn;
805
4e8e4213 806 LIST_FOR_EACH (ofconn, node, &ofproto->all_conns) {
fa05809b
BP
807 rconn_reconnect(ofconn->rconn);
808 }
809}
810
917e50e1
BP
811static bool
812any_extras_changed(const struct ofproto *ofproto,
813 const struct sockaddr_in *extras, size_t n)
814{
815 size_t i;
816
817 if (n != ofproto->n_extra_remotes) {
818 return true;
819 }
820
821 for (i = 0; i < n; i++) {
822 const struct sockaddr_in *old = &ofproto->extra_in_band_remotes[i];
823 const struct sockaddr_in *new = &extras[i];
824
825 if (old->sin_addr.s_addr != new->sin_addr.s_addr ||
826 old->sin_port != new->sin_port) {
827 return true;
828 }
829 }
830
831 return false;
832}
833
834/* Sets the 'n' TCP port addresses in 'extras' as ones to which 'ofproto''s
835 * in-band control should guarantee access, in the same way that in-band
836 * control guarantees access to OpenFlow controllers. */
837void
838ofproto_set_extra_in_band_remotes(struct ofproto *ofproto,
839 const struct sockaddr_in *extras, size_t n)
840{
841 if (!any_extras_changed(ofproto, extras, n)) {
842 return;
843 }
844
845 free(ofproto->extra_in_band_remotes);
846 ofproto->n_extra_remotes = n;
847 ofproto->extra_in_band_remotes = xmemdup(extras, n * sizeof *extras);
848
849 update_in_band_remotes(ofproto);
850}
851
b1da6250
BP
852/* Sets the OpenFlow queue used by flows set up by in-band control on
853 * 'ofproto' to 'queue_id'. If 'queue_id' is negative, then in-band control
854 * flows will use the default queue. */
855void
856ofproto_set_in_band_queue(struct ofproto *ofproto, int queue_id)
857{
858 if (queue_id != ofproto->in_band_queue) {
859 ofproto->in_band_queue = queue_id;
860 update_in_band_remotes(ofproto);
861 }
862}
863
064af421
BP
864void
865ofproto_set_desc(struct ofproto *p,
5a719c38
JP
866 const char *mfr_desc, const char *hw_desc,
867 const char *sw_desc, const char *serial_desc,
8abc4ed7 868 const char *dp_desc)
064af421 869{
5a719c38
JP
870 struct ofp_desc_stats *ods;
871
872 if (mfr_desc) {
873 if (strlen(mfr_desc) >= sizeof ods->mfr_desc) {
874 VLOG_WARN("truncating mfr_desc, must be less than %zu characters",
875 sizeof ods->mfr_desc);
876 }
877 free(p->mfr_desc);
878 p->mfr_desc = xstrdup(mfr_desc);
064af421 879 }
5a719c38
JP
880 if (hw_desc) {
881 if (strlen(hw_desc) >= sizeof ods->hw_desc) {
882 VLOG_WARN("truncating hw_desc, must be less than %zu characters",
883 sizeof ods->hw_desc);
884 }
885 free(p->hw_desc);
886 p->hw_desc = xstrdup(hw_desc);
064af421 887 }
5a719c38
JP
888 if (sw_desc) {
889 if (strlen(sw_desc) >= sizeof ods->sw_desc) {
890 VLOG_WARN("truncating sw_desc, must be less than %zu characters",
891 sizeof ods->sw_desc);
892 }
893 free(p->sw_desc);
894 p->sw_desc = xstrdup(sw_desc);
895 }
896 if (serial_desc) {
897 if (strlen(serial_desc) >= sizeof ods->serial_num) {
898 VLOG_WARN("truncating serial_desc, must be less than %zu "
899 "characters",
900 sizeof ods->serial_num);
901 }
902 free(p->serial_desc);
903 p->serial_desc = xstrdup(serial_desc);
064af421 904 }
8abc4ed7 905 if (dp_desc) {
5a719c38
JP
906 if (strlen(dp_desc) >= sizeof ods->dp_desc) {
907 VLOG_WARN("truncating dp_desc, must be less than %zu characters",
908 sizeof ods->dp_desc);
909 }
8abc4ed7
JP
910 free(p->dp_desc);
911 p->dp_desc = xstrdup(dp_desc);
912 }
064af421
BP
913}
914
064af421
BP
915static int
916set_pvconns(struct pvconn ***pvconnsp, size_t *n_pvconnsp,
917 const struct svec *svec)
918{
919 struct pvconn **pvconns = *pvconnsp;
920 size_t n_pvconns = *n_pvconnsp;
921 int retval = 0;
922 size_t i;
923
924 for (i = 0; i < n_pvconns; i++) {
925 pvconn_close(pvconns[i]);
926 }
927 free(pvconns);
928
929 pvconns = xmalloc(svec->n * sizeof *pvconns);
930 n_pvconns = 0;
931 for (i = 0; i < svec->n; i++) {
932 const char *name = svec->names[i];
933 struct pvconn *pvconn;
934 int error;
935
936 error = pvconn_open(name, &pvconn);
937 if (!error) {
938 pvconns[n_pvconns++] = pvconn;
939 } else {
940 VLOG_ERR("failed to listen on %s: %s", name, strerror(error));
941 if (!retval) {
942 retval = error;
943 }
944 }
945 }
946
947 *pvconnsp = pvconns;
948 *n_pvconnsp = n_pvconns;
949
950 return retval;
951}
952
064af421
BP
953int
954ofproto_set_snoops(struct ofproto *ofproto, const struct svec *snoops)
955{
956 return set_pvconns(&ofproto->snoops, &ofproto->n_snoops, snoops);
957}
958
959int
0193b2af
JG
960ofproto_set_netflow(struct ofproto *ofproto,
961 const struct netflow_options *nf_options)
064af421 962{
76343538 963 if (nf_options && nf_options->collectors.n) {
064af421
BP
964 if (!ofproto->netflow) {
965 ofproto->netflow = netflow_create();
966 }
0193b2af 967 return netflow_set_options(ofproto->netflow, nf_options);
064af421
BP
968 } else {
969 netflow_destroy(ofproto->netflow);
970 ofproto->netflow = NULL;
971 return 0;
972 }
973}
974
72b06300
BP
975void
976ofproto_set_sflow(struct ofproto *ofproto,
977 const struct ofproto_sflow_options *oso)
978{
979 struct ofproto_sflow *os = ofproto->sflow;
980 if (oso) {
981 if (!os) {
982 struct ofport *ofport;
72b06300
BP
983
984 os = ofproto->sflow = ofproto_sflow_create(ofproto->dpif);
4e8e4213 985 HMAP_FOR_EACH (ofport, hmap_node, &ofproto->ports) {
ca0f572c 986 ofproto_sflow_add_port(os, ofport->odp_port,
72b06300
BP
987 netdev_get_name(ofport->netdev));
988 }
989 }
990 ofproto_sflow_set_options(os, oso);
991 } else {
992 ofproto_sflow_destroy(os);
993 ofproto->sflow = NULL;
994 }
995}
996
064af421
BP
997uint64_t
998ofproto_get_datapath_id(const struct ofproto *ofproto)
999{
1000 return ofproto->datapath_id;
1001}
1002
76ce9432 1003bool
7d674866 1004ofproto_has_primary_controller(const struct ofproto *ofproto)
064af421 1005{
76ce9432 1006 return !hmap_is_empty(&ofproto->controllers);
064af421
BP
1007}
1008
abdfe474
JP
1009enum ofproto_fail_mode
1010ofproto_get_fail_mode(const struct ofproto *p)
1011{
1012 return p->fail_mode;
1013}
1014
064af421
BP
1015void
1016ofproto_get_snoops(const struct ofproto *ofproto, struct svec *snoops)
1017{
1018 size_t i;
1019
1020 for (i = 0; i < ofproto->n_snoops; i++) {
1021 svec_add(snoops, pvconn_get_name(ofproto->snoops[i]));
1022 }
1023}
1024
1025void
1026ofproto_destroy(struct ofproto *p)
1027{
7d674866 1028 struct ofservice *ofservice, *next_ofservice;
064af421 1029 struct ofconn *ofconn, *next_ofconn;
ca0f572c 1030 struct ofport *ofport, *next_ofport;
064af421
BP
1031 size_t i;
1032
1033 if (!p) {
1034 return;
1035 }
1036
7aa697dd
BP
1037 shash_find_and_delete(&all_ofprotos, dpif_name(p->dpif));
1038
f7de2cdf 1039 /* Destroy fail-open and in-band early, since they touch the classifier. */
79c9f2ee
BP
1040 fail_open_destroy(p->fail_open);
1041 p->fail_open = NULL;
1042
1043 in_band_destroy(p->in_band);
1044 p->in_band = NULL;
917e50e1 1045 free(p->extra_in_band_remotes);
2f6d3445 1046
064af421
BP
1047 ofproto_flush_flows(p);
1048 classifier_destroy(&p->cls);
bcf84111 1049 hmap_destroy(&p->facets);
064af421 1050
4e8e4213 1051 LIST_FOR_EACH_SAFE (ofconn, next_ofconn, node, &p->all_conns) {
c475ae67 1052 ofconn_destroy(ofconn);
064af421 1053 }
76ce9432 1054 hmap_destroy(&p->controllers);
064af421 1055
c228a364 1056 dpif_close(p->dpif);
e9e28be3 1057 netdev_monitor_destroy(p->netdev_monitor);
4e8e4213 1058 HMAP_FOR_EACH_SAFE (ofport, next_ofport, hmap_node, &p->ports) {
ca0f572c 1059 hmap_remove(&p->ports, &ofport->hmap_node);
064af421
BP
1060 ofport_free(ofport);
1061 }
1062 shash_destroy(&p->port_by_name);
1063
1064 switch_status_destroy(p->switch_status);
064af421 1065 netflow_destroy(p->netflow);
72b06300 1066 ofproto_sflow_destroy(p->sflow);
064af421 1067
4e8e4213 1068 HMAP_FOR_EACH_SAFE (ofservice, next_ofservice, node, &p->services) {
7d674866 1069 ofservice_destroy(p, ofservice);
064af421 1070 }
7d674866 1071 hmap_destroy(&p->services);
064af421
BP
1072
1073 for (i = 0; i < p->n_snoops; i++) {
1074 pvconn_close(p->snoops[i]);
1075 }
1076 free(p->snoops);
1077
1078 mac_learning_destroy(p->ml);
1079
5a719c38
JP
1080 free(p->mfr_desc);
1081 free(p->hw_desc);
1082 free(p->sw_desc);
1083 free(p->serial_desc);
cb871ae0
JP
1084 free(p->dp_desc);
1085
ca0f572c 1086 hmap_destroy(&p->ports);
3b917492 1087
064af421
BP
1088 free(p);
1089}
1090
1091int
1092ofproto_run(struct ofproto *p)
1093{
1094 int error = ofproto_run1(p);
1095 if (!error) {
1096 error = ofproto_run2(p, false);
1097 }
1098 return error;
1099}
1100
e9e28be3
BP
1101static void
1102process_port_change(struct ofproto *ofproto, int error, char *devname)
1103{
1104 if (error == ENOBUFS) {
1105 reinit_ports(ofproto);
1106 } else if (!error) {
1107 update_port(ofproto, devname);
1108 free(devname);
1109 }
1110}
1111
e2bfacb6
BP
1112/* Returns a "preference level" for snooping 'ofconn'. A higher return value
1113 * means that 'ofconn' is more interesting for monitoring than a lower return
1114 * value. */
1115static int
1116snoop_preference(const struct ofconn *ofconn)
1117{
1118 switch (ofconn->role) {
1119 case NX_ROLE_MASTER:
1120 return 3;
1121 case NX_ROLE_OTHER:
1122 return 2;
1123 case NX_ROLE_SLAVE:
1124 return 1;
1125 default:
1126 /* Shouldn't happen. */
1127 return 0;
1128 }
1129}
1130
76ce9432
BP
1131/* One of ofproto's "snoop" pvconns has accepted a new connection on 'vconn'.
1132 * Connects this vconn to a controller. */
1133static void
1134add_snooper(struct ofproto *ofproto, struct vconn *vconn)
1135{
e2bfacb6 1136 struct ofconn *ofconn, *best;
76ce9432 1137
e2bfacb6
BP
1138 /* Pick a controller for monitoring. */
1139 best = NULL;
4e8e4213 1140 LIST_FOR_EACH (ofconn, node, &ofproto->all_conns) {
5899143f 1141 if (ofconn->type == OFCONN_PRIMARY
e2bfacb6
BP
1142 && (!best || snoop_preference(ofconn) > snoop_preference(best))) {
1143 best = ofconn;
76ce9432 1144 }
e2bfacb6 1145 }
76ce9432 1146
e2bfacb6
BP
1147 if (best) {
1148 rconn_add_monitor(best->rconn, vconn);
1149 } else {
1150 VLOG_INFO_RL(&rl, "no controller connection to snoop");
1151 vconn_close(vconn);
76ce9432 1152 }
76ce9432
BP
1153}
1154
064af421
BP
1155int
1156ofproto_run1(struct ofproto *p)
1157{
1158 struct ofconn *ofconn, *next_ofconn;
7d674866 1159 struct ofservice *ofservice;
064af421
BP
1160 char *devname;
1161 int error;
1162 int i;
1163
149f577a
JG
1164 if (shash_is_empty(&p->port_by_name)) {
1165 init_ports(p);
1166 }
1167
064af421 1168 for (i = 0; i < 50; i++) {
856081f6 1169 struct dpif_upcall packet;
064af421 1170
856081f6 1171 error = dpif_recv(p->dpif, &packet);
064af421
BP
1172 if (error) {
1173 if (error == ENODEV) {
1174 /* Someone destroyed the datapath behind our back. The caller
1175 * better destroy us and give up, because we're just going to
1176 * spin from here on out. */
39a559f2
BP
1177 static struct vlog_rate_limit rl2 = VLOG_RATE_LIMIT_INIT(1, 5);
1178 VLOG_ERR_RL(&rl2, "%s: datapath was destroyed externally",
c228a364 1179 dpif_name(p->dpif));
064af421
BP
1180 return ENODEV;
1181 }
1182 break;
1183 }
1184
856081f6 1185 handle_upcall(p, &packet);
064af421
BP
1186 }
1187
e9e28be3
BP
1188 while ((error = dpif_port_poll(p->dpif, &devname)) != EAGAIN) {
1189 process_port_change(p, error, devname);
1190 }
1191 while ((error = netdev_monitor_poll(p->netdev_monitor,
1192 &devname)) != EAGAIN) {
1193 process_port_change(p, error, devname);
064af421
BP
1194 }
1195
1196 if (p->in_band) {
d2ede7bc
BP
1197 if (time_msec() >= p->next_in_band_update) {
1198 update_in_band_remotes(p);
1199 }
064af421
BP
1200 in_band_run(p->in_band);
1201 }
064af421 1202
4e8e4213 1203 LIST_FOR_EACH_SAFE (ofconn, next_ofconn, node, &p->all_conns) {
3269c562 1204 ofconn_run(ofconn);
064af421
BP
1205 }
1206
7778bd15
BP
1207 /* Fail-open maintenance. Do this after processing the ofconns since
1208 * fail-open checks the status of the controller rconn. */
1209 if (p->fail_open) {
1210 fail_open_run(p->fail_open);
1211 }
1212
4e8e4213 1213 HMAP_FOR_EACH (ofservice, node, &p->services) {
064af421
BP
1214 struct vconn *vconn;
1215 int retval;
1216
7d674866 1217 retval = pvconn_accept(ofservice->pvconn, OFP_VERSION, &vconn);
064af421 1218 if (!retval) {
9794e806 1219 struct rconn *rconn;
eb15cdbb 1220 char *name;
9794e806 1221
7d674866 1222 rconn = rconn_create(ofservice->probe_interval, 0);
eb15cdbb
BP
1223 name = ofconn_make_name(p, vconn_get_name(vconn));
1224 rconn_connect_unreliably(rconn, vconn, name);
1225 free(name);
1226
7d674866
BP
1227 ofconn = ofconn_create(p, rconn, OFCONN_SERVICE);
1228 ofconn_set_rate_limit(ofconn, ofservice->rate_limit,
1229 ofservice->burst_limit);
064af421
BP
1230 } else if (retval != EAGAIN) {
1231 VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval));
1232 }
1233 }
1234
1235 for (i = 0; i < p->n_snoops; i++) {
1236 struct vconn *vconn;
1237 int retval;
1238
1239 retval = pvconn_accept(p->snoops[i], OFP_VERSION, &vconn);
1240 if (!retval) {
76ce9432 1241 add_snooper(p, vconn);
064af421
BP
1242 } else if (retval != EAGAIN) {
1243 VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval));
1244 }
1245 }
1246
1247 if (time_msec() >= p->next_expiration) {
0de7a4b4
BP
1248 int delay = ofproto_expire(p);
1249 p->next_expiration = time_msec() + delay;
064af421 1250 COVERAGE_INC(ofproto_expiration);
064af421
BP
1251 }
1252
1253 if (p->netflow) {
1254 netflow_run(p->netflow);
1255 }
72b06300
BP
1256 if (p->sflow) {
1257 ofproto_sflow_run(p->sflow);
1258 }
064af421
BP
1259
1260 return 0;
1261}
1262
064af421
BP
1263int
1264ofproto_run2(struct ofproto *p, bool revalidate_all)
1265{
bcf84111
BP
1266 /* Figure out what we need to revalidate now, if anything. */
1267 struct tag_set revalidate_set = p->revalidate_set;
1268 if (p->need_revalidate) {
1269 revalidate_all = true;
1270 }
1271
1272 /* Clear the revalidation flags. */
1273 tag_set_init(&p->revalidate_set);
1274 p->need_revalidate = false;
1275
1276 /* Now revalidate if there's anything to do. */
1277 if (revalidate_all || !tag_set_is_empty(&revalidate_set)) {
1278 struct facet *facet, *next;
1279
1280 HMAP_FOR_EACH_SAFE (facet, next, hmap_node, &p->facets) {
1281 if (revalidate_all
1282 || tag_set_intersects(&revalidate_set, facet->tags)) {
1283 facet_revalidate(p, facet);
1284 }
1285 }
064af421
BP
1286 }
1287
1288 return 0;
1289}
1290
1291void
1292ofproto_wait(struct ofproto *p)
1293{
7d674866 1294 struct ofservice *ofservice;
064af421
BP
1295 struct ofconn *ofconn;
1296 size_t i;
1297
c228a364 1298 dpif_recv_wait(p->dpif);
e9e28be3
BP
1299 dpif_port_poll_wait(p->dpif);
1300 netdev_monitor_poll_wait(p->netdev_monitor);
4e8e4213 1301 LIST_FOR_EACH (ofconn, node, &p->all_conns) {
064af421
BP
1302 ofconn_wait(ofconn);
1303 }
1304 if (p->in_band) {
7cf8b266 1305 poll_timer_wait_until(p->next_in_band_update);
064af421
BP
1306 in_band_wait(p->in_band);
1307 }
064af421
BP
1308 if (p->fail_open) {
1309 fail_open_wait(p->fail_open);
1310 }
72b06300
BP
1311 if (p->sflow) {
1312 ofproto_sflow_wait(p->sflow);
1313 }
064af421
BP
1314 if (!tag_set_is_empty(&p->revalidate_set)) {
1315 poll_immediate_wake();
1316 }
1317 if (p->need_revalidate) {
1318 /* Shouldn't happen, but if it does just go around again. */
1319 VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
1320 poll_immediate_wake();
1321 } else if (p->next_expiration != LLONG_MAX) {
7cf8b266 1322 poll_timer_wait_until(p->next_expiration);
064af421 1323 }
4e8e4213 1324 HMAP_FOR_EACH (ofservice, node, &p->services) {
7d674866 1325 pvconn_wait(ofservice->pvconn);
064af421
BP
1326 }
1327 for (i = 0; i < p->n_snoops; i++) {
1328 pvconn_wait(p->snoops[i]);
1329 }
1330}
1331
1332void
1333ofproto_revalidate(struct ofproto *ofproto, tag_type tag)
1334{
1335 tag_set_add(&ofproto->revalidate_set, tag);
1336}
1337
1338struct tag_set *
1339ofproto_get_revalidate_set(struct ofproto *ofproto)
1340{
1341 return &ofproto->revalidate_set;
1342}
1343
1344bool
1345ofproto_is_alive(const struct ofproto *p)
1346{
76ce9432 1347 return !hmap_is_empty(&p->controllers);
064af421
BP
1348}
1349
bffc0589
AE
1350void
1351ofproto_get_ofproto_controller_info(const struct ofproto * ofproto,
1352 struct shash *info)
1353{
1354 const struct ofconn *ofconn;
1355
1356 shash_init(info);
1357
1358 HMAP_FOR_EACH (ofconn, hmap_node, &ofproto->controllers) {
1359 const struct rconn *rconn = ofconn->rconn;
1360 const int last_error = rconn_get_last_error(rconn);
1361 struct ofproto_controller_info *cinfo = xmalloc(sizeof *cinfo);
1362
1363 shash_add(info, rconn_get_target(rconn), cinfo);
1364
1365 cinfo->is_connected = rconn_is_connected(rconn);
1366 cinfo->role = ofconn->role;
1367
1368 cinfo->pairs.n = 0;
1369
1370 if (last_error == EOF) {
1371 cinfo->pairs.keys[cinfo->pairs.n] = "last_error";
1372 cinfo->pairs.values[cinfo->pairs.n++] = xstrdup("End of file");
1373 } else if (last_error > 0) {
1374 cinfo->pairs.keys[cinfo->pairs.n] = "last_error";
1375 cinfo->pairs.values[cinfo->pairs.n++] =
1376 xstrdup(strerror(last_error));
1377 }
1378
1379 cinfo->pairs.keys[cinfo->pairs.n] = "state";
1380 cinfo->pairs.values[cinfo->pairs.n++] =
1381 xstrdup(rconn_get_state(rconn));
1382
1383 cinfo->pairs.keys[cinfo->pairs.n] = "time_in_state";
1384 cinfo->pairs.values[cinfo->pairs.n++] =
1385 xasprintf("%u", rconn_get_state_elapsed(rconn));
1386 }
1387}
1388
1389void
1390ofproto_free_ofproto_controller_info(struct shash *info)
1391{
1392 struct shash_node *node;
1393
1394 SHASH_FOR_EACH (node, info) {
1395 struct ofproto_controller_info *cinfo = node->data;
1396 while (cinfo->pairs.n) {
1397 free((char *) cinfo->pairs.values[--cinfo->pairs.n]);
1398 }
1399 free(cinfo);
1400 }
1401 shash_destroy(info);
1402}
1403
3a6ccc8c
BP
1404/* Deletes port number 'odp_port' from the datapath for 'ofproto'.
1405 *
1406 * This is almost the same as calling dpif_port_del() directly on the
1407 * datapath, but it also makes 'ofproto' close its open netdev for the port
1408 * (if any). This makes it possible to create a new netdev of a different
1409 * type under the same name, which otherwise the netdev library would refuse
1410 * to do because of the conflict. (The netdev would eventually get closed on
1411 * the next trip through ofproto_run(), but this interface is more direct.)
1412 *
3a6ccc8c
BP
1413 * Returns 0 if successful, otherwise a positive errno. */
1414int
1415ofproto_port_del(struct ofproto *ofproto, uint16_t odp_port)
1416{
1417 struct ofport *ofport = get_port(ofproto, odp_port);
0b61210e 1418 const char *name = ofport ? ofport->opp.name : "<unknown>";
3a6ccc8c
BP
1419 int error;
1420
1421 error = dpif_port_del(ofproto->dpif, odp_port);
1422 if (error) {
1423 VLOG_ERR("%s: failed to remove port %"PRIu16" (%s) interface (%s)",
1424 dpif_name(ofproto->dpif), odp_port, name, strerror(error));
1425 } else if (ofport) {
1426 /* 'name' is ofport->opp.name and update_port() is going to destroy
1427 * 'ofport'. Just in case update_port() refers to 'name' after it
1428 * destroys 'ofport', make a copy of it around the update_port()
1429 * call. */
1430 char *devname = xstrdup(name);
1431 update_port(ofproto, devname);
1432 free(devname);
1433 }
1434 return error;
1435}
1436
a4e2e1f2
EJ
1437/* Checks if 'ofproto' thinks 'odp_port' should be included in floods. Returns
1438 * true if 'odp_port' exists and should be included, false otherwise. */
1439bool
1440ofproto_port_is_floodable(struct ofproto *ofproto, uint16_t odp_port)
1441{
1442 struct ofport *ofport = get_port(ofproto, odp_port);
1443 return ofport && !(ofport->opp.config & OFPPC_NO_FLOOD);
1444}
1445
064af421 1446int
ae412e7d 1447ofproto_send_packet(struct ofproto *p, const struct flow *flow,
064af421
BP
1448 const union ofp_action *actions, size_t n_actions,
1449 const struct ofpbuf *packet)
1450{
f29152ca 1451 struct action_xlate_ctx ctx;
cdee00fd 1452 struct ofpbuf *odp_actions;
064af421 1453
f29152ca 1454 action_xlate_ctx_init(&ctx, p, flow, packet);
cdee00fd 1455 odp_actions = xlate_actions(&ctx, actions, n_actions);
064af421
BP
1456
1457 /* XXX Should we translate the dpif_execute() errno value into an OpenFlow
1458 * error code? */
cdee00fd
BP
1459 dpif_execute(p->dpif, odp_actions->data, odp_actions->size, packet);
1460
1461 ofpbuf_delete(odp_actions);
1462
064af421
BP
1463 return 0;
1464}
1465
fa8b054f
BP
1466/* Adds a flow to the OpenFlow flow table in 'p' that matches 'cls_rule' and
1467 * performs the 'n_actions' actions in 'actions'. The new flow will not
1468 * timeout.
1469 *
1470 * If cls_rule->priority is in the range of priorities supported by OpenFlow
1471 * (0...65535, inclusive) then the flow will be visible to OpenFlow
1472 * controllers; otherwise, it will be hidden.
1473 *
1474 * The caller retains ownership of 'cls_rule' and 'actions'. */
064af421 1475void
cf3fad8a 1476ofproto_add_flow(struct ofproto *p, const struct cls_rule *cls_rule,
fa8b054f 1477 const union ofp_action *actions, size_t n_actions)
064af421
BP
1478{
1479 struct rule *rule;
bcf84111 1480 rule = rule_create(cls_rule, actions, n_actions, 0, 0, 0, false);
afe75089 1481 rule_insert(p, rule);
064af421
BP
1482}
1483
1484void
cf3fad8a 1485ofproto_delete_flow(struct ofproto *ofproto, const struct cls_rule *target)
064af421
BP
1486{
1487 struct rule *rule;
1488
1489 rule = rule_from_cls_rule(classifier_find_rule_exactly(&ofproto->cls,
cf3fad8a 1490 target));
064af421
BP
1491 if (rule) {
1492 rule_remove(ofproto, rule);
1493 }
1494}
1495
064af421
BP
1496void
1497ofproto_flush_flows(struct ofproto *ofproto)
1498{
bcf84111 1499 struct facet *facet, *next_facet;
5ecc9d81
BP
1500 struct rule *rule, *next_rule;
1501 struct cls_cursor cursor;
bcf84111 1502
064af421 1503 COVERAGE_INC(ofproto_flush);
bcf84111
BP
1504
1505 HMAP_FOR_EACH_SAFE (facet, next_facet, hmap_node, &ofproto->facets) {
1506 /* Mark the facet as not installed so that facet_remove() doesn't
1507 * bother trying to uninstall it. There is no point in uninstalling it
1508 * individually since we are about to blow away all the facets with
1509 * dpif_flow_flush(). */
1510 facet->installed = false;
1511 facet_remove(ofproto, facet);
1512 }
5ecc9d81
BP
1513
1514 cls_cursor_init(&cursor, &ofproto->cls, NULL);
1515 CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cr, &cursor) {
1516 rule_remove(ofproto, rule);
1517 }
1518
c228a364 1519 dpif_flow_flush(ofproto->dpif);
064af421
BP
1520 if (ofproto->in_band) {
1521 in_band_flushed(ofproto->in_band);
1522 }
1523 if (ofproto->fail_open) {
1524 fail_open_flushed(ofproto->fail_open);
1525 }
1526}
1527\f
1528static void
1529reinit_ports(struct ofproto *p)
1530{
b0ec0f27 1531 struct dpif_port_dump dump;
c77d9d13
BP
1532 struct shash_node *node;
1533 struct shash devnames;
064af421 1534 struct ofport *ofport;
4c738a8d 1535 struct dpif_port dpif_port;
064af421 1536
898bf89d
JP
1537 COVERAGE_INC(ofproto_reinit_ports);
1538
c77d9d13 1539 shash_init(&devnames);
4e8e4213 1540 HMAP_FOR_EACH (ofport, hmap_node, &p->ports) {
c77d9d13 1541 shash_add_once (&devnames, ofport->opp.name, NULL);
064af421 1542 }
4c738a8d
BP
1543 DPIF_PORT_FOR_EACH (&dpif_port, &dump, p->dpif) {
1544 shash_add_once (&devnames, dpif_port.name, NULL);
064af421 1545 }
064af421 1546
c77d9d13
BP
1547 SHASH_FOR_EACH (node, &devnames) {
1548 update_port(p, node->name);
064af421 1549 }
c77d9d13 1550 shash_destroy(&devnames);
064af421
BP
1551}
1552
064af421 1553static struct ofport *
4c738a8d 1554make_ofport(const struct dpif_port *dpif_port)
064af421 1555{
149f577a 1556 struct netdev_options netdev_options;
064af421
BP
1557 enum netdev_flags flags;
1558 struct ofport *ofport;
1559 struct netdev *netdev;
064af421
BP
1560 int error;
1561
149f577a 1562 memset(&netdev_options, 0, sizeof netdev_options);
4c738a8d
BP
1563 netdev_options.name = dpif_port->name;
1564 netdev_options.type = dpif_port->type;
149f577a 1565 netdev_options.ethertype = NETDEV_ETH_TYPE_NONE;
149f577a
JG
1566
1567 error = netdev_open(&netdev_options, &netdev);
064af421
BP
1568 if (error) {
1569 VLOG_WARN_RL(&rl, "ignoring port %s (%"PRIu16") because netdev %s "
1570 "cannot be opened (%s)",
4c738a8d
BP
1571 dpif_port->name, dpif_port->port_no,
1572 dpif_port->name, strerror(error));
064af421
BP
1573 return NULL;
1574 }
1575
1576 ofport = xmalloc(sizeof *ofport);
1577 ofport->netdev = netdev;
4c738a8d
BP
1578 ofport->odp_port = dpif_port->port_no;
1579 ofport->opp.port_no = odp_port_to_ofp_port(dpif_port->port_no);
80992a35 1580 netdev_get_etheraddr(netdev, ofport->opp.hw_addr);
4c738a8d 1581 ovs_strlcpy(ofport->opp.name, dpif_port->name, sizeof ofport->opp.name);
064af421
BP
1582
1583 netdev_get_flags(netdev, &flags);
1584 ofport->opp.config = flags & NETDEV_UP ? 0 : OFPPC_PORT_DOWN;
1585
85da620e 1586 ofport->opp.state = netdev_get_carrier(netdev) ? 0 : OFPPS_LINK_DOWN;
064af421
BP
1587
1588 netdev_get_features(netdev,
1589 &ofport->opp.curr, &ofport->opp.advertised,
1590 &ofport->opp.supported, &ofport->opp.peer);
1591 return ofport;
1592}
1593
1594static bool
4c738a8d 1595ofport_conflicts(const struct ofproto *p, const struct dpif_port *dpif_port)
064af421 1596{
4c738a8d 1597 if (get_port(p, dpif_port->port_no)) {
064af421 1598 VLOG_WARN_RL(&rl, "ignoring duplicate port %"PRIu16" in datapath",
4c738a8d 1599 dpif_port->port_no);
064af421 1600 return true;
4c738a8d 1601 } else if (shash_find(&p->port_by_name, dpif_port->name)) {
064af421 1602 VLOG_WARN_RL(&rl, "ignoring duplicate device %s in datapath",
4c738a8d 1603 dpif_port->name);
064af421
BP
1604 return true;
1605 } else {
1606 return false;
1607 }
1608}
1609
1610static int
1611ofport_equal(const struct ofport *a_, const struct ofport *b_)
1612{
1613 const struct ofp_phy_port *a = &a_->opp;
1614 const struct ofp_phy_port *b = &b_->opp;
1615
1616 BUILD_ASSERT_DECL(sizeof *a == 48); /* Detect ofp_phy_port changes. */
1617 return (a->port_no == b->port_no
1618 && !memcmp(a->hw_addr, b->hw_addr, sizeof a->hw_addr)
0b61210e 1619 && !strcmp(a->name, b->name)
064af421
BP
1620 && a->state == b->state
1621 && a->config == b->config
1622 && a->curr == b->curr
1623 && a->advertised == b->advertised
1624 && a->supported == b->supported
1625 && a->peer == b->peer);
1626}
1627
1628static void
1629send_port_status(struct ofproto *p, const struct ofport *ofport,
1630 uint8_t reason)
1631{
1632 /* XXX Should limit the number of queued port status change messages. */
1633 struct ofconn *ofconn;
4e8e4213 1634 LIST_FOR_EACH (ofconn, node, &p->all_conns) {
064af421
BP
1635 struct ofp_port_status *ops;
1636 struct ofpbuf *b;
1637
197a992f
BP
1638 /* Primary controllers, even slaves, should always get port status
1639 updates. Otherwise obey ofconn_receives_async_msgs(). */
1640 if (ofconn->type != OFCONN_PRIMARY
1641 && !ofconn_receives_async_msgs(ofconn)) {
9deba63b
BP
1642 continue;
1643 }
1644
064af421
BP
1645 ops = make_openflow_xid(sizeof *ops, OFPT_PORT_STATUS, 0, &b);
1646 ops->reason = reason;
1647 ops->desc = ofport->opp;
1648 hton_ofp_phy_port(&ops->desc);
1649 queue_tx(b, ofconn, NULL);
1650 }
064af421
BP
1651}
1652
1653static void
1654ofport_install(struct ofproto *p, struct ofport *ofport)
1655{
0b61210e 1656 const char *netdev_name = ofport->opp.name;
72b06300 1657
e9e28be3 1658 netdev_monitor_add(p->netdev_monitor, ofport->netdev);
ca0f572c 1659 hmap_insert(&p->ports, &ofport->hmap_node, hash_int(ofport->odp_port, 0));
72b06300
BP
1660 shash_add(&p->port_by_name, netdev_name, ofport);
1661 if (p->sflow) {
ca0f572c 1662 ofproto_sflow_add_port(p->sflow, ofport->odp_port, netdev_name);
72b06300 1663 }
064af421
BP
1664}
1665
1666static void
1667ofport_remove(struct ofproto *p, struct ofport *ofport)
1668{
e9e28be3 1669 netdev_monitor_remove(p->netdev_monitor, ofport->netdev);
ca0f572c 1670 hmap_remove(&p->ports, &ofport->hmap_node);
064af421 1671 shash_delete(&p->port_by_name,
0b61210e 1672 shash_find(&p->port_by_name, ofport->opp.name));
72b06300 1673 if (p->sflow) {
ca0f572c 1674 ofproto_sflow_del_port(p->sflow, ofport->odp_port);
72b06300 1675 }
064af421
BP
1676}
1677
1678static void
1679ofport_free(struct ofport *ofport)
1680{
1681 if (ofport) {
1682 netdev_close(ofport->netdev);
1683 free(ofport);
1684 }
1685}
1686
ca0f572c
BP
1687static struct ofport *
1688get_port(const struct ofproto *ofproto, uint16_t odp_port)
1689{
1690 struct ofport *port;
1691
4e8e4213 1692 HMAP_FOR_EACH_IN_BUCKET (port, hmap_node,
ca0f572c
BP
1693 hash_int(odp_port, 0), &ofproto->ports) {
1694 if (port->odp_port == odp_port) {
1695 return port;
1696 }
1697 }
1698 return NULL;
1699}
1700
064af421
BP
1701static void
1702update_port(struct ofproto *p, const char *devname)
1703{
4c738a8d 1704 struct dpif_port dpif_port;
c874dc6d
BP
1705 struct ofport *old_ofport;
1706 struct ofport *new_ofport;
064af421
BP
1707 int error;
1708
1709 COVERAGE_INC(ofproto_update_port);
c874dc6d
BP
1710
1711 /* Query the datapath for port information. */
4c738a8d 1712 error = dpif_port_query_by_name(p->dpif, devname, &dpif_port);
064af421 1713
c874dc6d
BP
1714 /* Find the old ofport. */
1715 old_ofport = shash_find_data(&p->port_by_name, devname);
1716 if (!error) {
1717 if (!old_ofport) {
1718 /* There's no port named 'devname' but there might be a port with
1719 * the same port number. This could happen if a port is deleted
1720 * and then a new one added in its place very quickly, or if a port
1721 * is renamed. In the former case we want to send an OFPPR_DELETE
1722 * and an OFPPR_ADD, and in the latter case we want to send a
1723 * single OFPPR_MODIFY. We can distinguish the cases by comparing
1724 * the old port's ifindex against the new port, or perhaps less
1725 * reliably but more portably by comparing the old port's MAC
1726 * against the new port's MAC. However, this code isn't that smart
1727 * and always sends an OFPPR_MODIFY (XXX). */
4c738a8d 1728 old_ofport = get_port(p, dpif_port.port_no);
064af421 1729 }
c874dc6d 1730 } else if (error != ENOENT && error != ENODEV) {
064af421
BP
1731 VLOG_WARN_RL(&rl, "dpif_port_query_by_name returned unexpected error "
1732 "%s", strerror(error));
4c738a8d 1733 goto exit;
064af421 1734 }
c874dc6d
BP
1735
1736 /* Create a new ofport. */
4c738a8d 1737 new_ofport = !error ? make_ofport(&dpif_port) : NULL;
c874dc6d
BP
1738
1739 /* Eliminate a few pathological cases. */
1740 if (!old_ofport && !new_ofport) {
4c738a8d 1741 goto exit;
c874dc6d
BP
1742 } else if (old_ofport && new_ofport) {
1743 /* Most of the 'config' bits are OpenFlow soft state, but
bc4a55cd
BP
1744 * OFPPC_PORT_DOWN is maintained by the kernel. So transfer the
1745 * OpenFlow bits from old_ofport. (make_ofport() only sets
1746 * OFPPC_PORT_DOWN and leaves the other bits 0.) */
c874dc6d
BP
1747 new_ofport->opp.config |= old_ofport->opp.config & ~OFPPC_PORT_DOWN;
1748
1749 if (ofport_equal(old_ofport, new_ofport)) {
1750 /* False alarm--no change. */
1751 ofport_free(new_ofport);
4c738a8d 1752 goto exit;
c874dc6d
BP
1753 }
1754 }
1755
1756 /* Now deal with the normal cases. */
1757 if (old_ofport) {
1758 ofport_remove(p, old_ofport);
1759 }
1760 if (new_ofport) {
1761 ofport_install(p, new_ofport);
1762 }
1763 send_port_status(p, new_ofport ? new_ofport : old_ofport,
1764 (!old_ofport ? OFPPR_ADD
1765 : !new_ofport ? OFPPR_DELETE
1766 : OFPPR_MODIFY));
1767 ofport_free(old_ofport);
4c738a8d
BP
1768
1769exit:
1770 dpif_port_destroy(&dpif_port);
064af421
BP
1771}
1772
1773static int
1774init_ports(struct ofproto *p)
1775{
b0ec0f27 1776 struct dpif_port_dump dump;
4c738a8d 1777 struct dpif_port dpif_port;
064af421 1778
4c738a8d
BP
1779 DPIF_PORT_FOR_EACH (&dpif_port, &dump, p->dpif) {
1780 if (!ofport_conflicts(p, &dpif_port)) {
1781 struct ofport *ofport = make_ofport(&dpif_port);
064af421
BP
1782 if (ofport) {
1783 ofport_install(p, ofport);
1784 }
1785 }
1786 }
b0ec0f27 1787
064af421
BP
1788 return 0;
1789}
1790\f
1791static struct ofconn *
76ce9432 1792ofconn_create(struct ofproto *p, struct rconn *rconn, enum ofconn_type type)
064af421 1793{
76ce9432
BP
1794 struct ofconn *ofconn = xzalloc(sizeof *ofconn);
1795 ofconn->ofproto = p;
064af421
BP
1796 list_push_back(&p->all_conns, &ofconn->node);
1797 ofconn->rconn = rconn;
76ce9432 1798 ofconn->type = type;
b70eac89 1799 ofconn->flow_format = NXFF_OPENFLOW10;
9deba63b 1800 ofconn->role = NX_ROLE_OTHER;
76ce9432 1801 ofconn->packet_in_counter = rconn_packet_counter_create ();
064af421 1802 ofconn->pktbuf = NULL;
064af421 1803 ofconn->miss_send_len = 0;
064af421
BP
1804 ofconn->reply_counter = rconn_packet_counter_create ();
1805 return ofconn;
1806}
1807
1808static void
c475ae67 1809ofconn_destroy(struct ofconn *ofconn)
064af421 1810{
5899143f 1811 if (ofconn->type == OFCONN_PRIMARY) {
76ce9432
BP
1812 hmap_remove(&ofconn->ofproto->controllers, &ofconn->hmap_node);
1813 }
1814 discovery_destroy(ofconn->discovery);
1815
064af421 1816 list_remove(&ofconn->node);
76ce9432 1817 switch_status_unregister(ofconn->ss);
064af421
BP
1818 rconn_destroy(ofconn->rconn);
1819 rconn_packet_counter_destroy(ofconn->packet_in_counter);
1820 rconn_packet_counter_destroy(ofconn->reply_counter);
1821 pktbuf_destroy(ofconn->pktbuf);
1822 free(ofconn);
1823}
1824
1825static void
3269c562 1826ofconn_run(struct ofconn *ofconn)
064af421 1827{
3269c562 1828 struct ofproto *p = ofconn->ofproto;
064af421 1829 int iteration;
76ce9432
BP
1830 size_t i;
1831
1832 if (ofconn->discovery) {
1833 char *controller_name;
1834 if (rconn_is_connectivity_questionable(ofconn->rconn)) {
1835 discovery_question_connectivity(ofconn->discovery);
1836 }
1837 if (discovery_run(ofconn->discovery, &controller_name)) {
1838 if (controller_name) {
eb15cdbb
BP
1839 char *ofconn_name = ofconn_make_name(p, controller_name);
1840 rconn_connect(ofconn->rconn, controller_name, ofconn_name);
1841 free(ofconn_name);
76ce9432
BP
1842 } else {
1843 rconn_disconnect(ofconn->rconn);
1844 }
1845 }
1846 }
1847
1848 for (i = 0; i < N_SCHEDULERS; i++) {
1849 pinsched_run(ofconn->schedulers[i], do_send_packet_in, ofconn);
1850 }
064af421
BP
1851
1852 rconn_run(ofconn->rconn);
1853
1854 if (rconn_packet_counter_read (ofconn->reply_counter) < OFCONN_REPLY_MAX) {
1855 /* Limit the number of iterations to prevent other tasks from
1856 * starving. */
1857 for (iteration = 0; iteration < 50; iteration++) {
1858 struct ofpbuf *of_msg = rconn_recv(ofconn->rconn);
1859 if (!of_msg) {
1860 break;
1861 }
7778bd15
BP
1862 if (p->fail_open) {
1863 fail_open_maybe_recover(p->fail_open);
1864 }
3269c562 1865 handle_openflow(ofconn, of_msg);
064af421
BP
1866 ofpbuf_delete(of_msg);
1867 }
1868 }
1869
76ce9432 1870 if (!ofconn->discovery && !rconn_is_alive(ofconn->rconn)) {
c475ae67 1871 ofconn_destroy(ofconn);
064af421
BP
1872 }
1873}
1874
1875static void
1876ofconn_wait(struct ofconn *ofconn)
1877{
76ce9432
BP
1878 int i;
1879
1880 if (ofconn->discovery) {
1881 discovery_wait(ofconn->discovery);
1882 }
1883 for (i = 0; i < N_SCHEDULERS; i++) {
1884 pinsched_wait(ofconn->schedulers[i]);
1885 }
064af421
BP
1886 rconn_run_wait(ofconn->rconn);
1887 if (rconn_packet_counter_read (ofconn->reply_counter) < OFCONN_REPLY_MAX) {
1888 rconn_recv_wait(ofconn->rconn);
1889 } else {
1890 COVERAGE_INC(ofproto_ofconn_stuck);
1891 }
1892}
c91248b3
BP
1893
1894/* Returns true if 'ofconn' should receive asynchronous messages. */
1895static bool
1896ofconn_receives_async_msgs(const struct ofconn *ofconn)
1897{
5899143f
BP
1898 if (ofconn->type == OFCONN_PRIMARY) {
1899 /* Primary controllers always get asynchronous messages unless they
c91248b3
BP
1900 * have configured themselves as "slaves". */
1901 return ofconn->role != NX_ROLE_SLAVE;
1902 } else {
5899143f
BP
1903 /* Service connections don't get asynchronous messages unless they have
1904 * explicitly asked for them by setting a nonzero miss send length. */
c91248b3
BP
1905 return ofconn->miss_send_len > 0;
1906 }
1907}
eb15cdbb
BP
1908
1909/* Returns a human-readable name for an OpenFlow connection between 'ofproto'
1910 * and 'target', suitable for use in log messages for identifying the
1911 * connection.
1912 *
1913 * The name is dynamically allocated. The caller should free it (with free())
1914 * when it is no longer needed. */
1915static char *
1916ofconn_make_name(const struct ofproto *ofproto, const char *target)
1917{
1918 return xasprintf("%s<->%s", dpif_base_name(ofproto->dpif), target);
1919}
7d674866
BP
1920
1921static void
1922ofconn_set_rate_limit(struct ofconn *ofconn, int rate, int burst)
1923{
1924 int i;
1925
1926 for (i = 0; i < N_SCHEDULERS; i++) {
1927 struct pinsched **s = &ofconn->schedulers[i];
1928
1929 if (rate > 0) {
1930 if (!*s) {
1931 *s = pinsched_create(rate, burst,
1932 ofconn->ofproto->switch_status);
1933 } else {
1934 pinsched_set_limits(*s, rate, burst);
1935 }
1936 } else {
1937 pinsched_destroy(*s);
1938 *s = NULL;
1939 }
1940 }
1941}
1942\f
1943static void
1944ofservice_reconfigure(struct ofservice *ofservice,
1945 const struct ofproto_controller *c)
1946{
1947 ofservice->probe_interval = c->probe_interval;
1948 ofservice->rate_limit = c->rate_limit;
1949 ofservice->burst_limit = c->burst_limit;
1950}
1951
1952/* Creates a new ofservice in 'ofproto'. Returns 0 if successful, otherwise a
1953 * positive errno value. */
1954static int
1955ofservice_create(struct ofproto *ofproto, const struct ofproto_controller *c)
1956{
1957 struct ofservice *ofservice;
1958 struct pvconn *pvconn;
1959 int error;
1960
1961 error = pvconn_open(c->target, &pvconn);
1962 if (error) {
1963 return error;
1964 }
1965
1966 ofservice = xzalloc(sizeof *ofservice);
1967 hmap_insert(&ofproto->services, &ofservice->node,
1968 hash_string(c->target, 0));
1969 ofservice->pvconn = pvconn;
1970
1971 ofservice_reconfigure(ofservice, c);
1972
1973 return 0;
1974}
1975
1976static void
1977ofservice_destroy(struct ofproto *ofproto, struct ofservice *ofservice)
1978{
1979 hmap_remove(&ofproto->services, &ofservice->node);
1980 pvconn_close(ofservice->pvconn);
1981 free(ofservice);
1982}
1983
1984/* Finds and returns the ofservice within 'ofproto' that has the given
1985 * 'target', or a null pointer if none exists. */
1986static struct ofservice *
1987ofservice_lookup(struct ofproto *ofproto, const char *target)
1988{
1989 struct ofservice *ofservice;
1990
4e8e4213
BP
1991 HMAP_FOR_EACH_WITH_HASH (ofservice, node, hash_string(target, 0),
1992 &ofproto->services) {
7d674866
BP
1993 if (!strcmp(pvconn_get_name(ofservice->pvconn), target)) {
1994 return ofservice;
1995 }
1996 }
1997 return NULL;
1998}
064af421 1999\f
bcf84111
BP
2000/* Returns true if 'rule' should be hidden from the controller.
2001 *
2002 * Rules with priority higher than UINT16_MAX are set up by ofproto itself
2003 * (e.g. by in-band control) and are intentionally hidden from the
2004 * controller. */
2005static bool
2006rule_is_hidden(const struct rule *rule)
2007{
2008 return rule->cr.priority > UINT16_MAX;
2009}
2010
2011/* Creates and returns a new rule initialized as specified.
2012 *
2013 * The caller is responsible for inserting the rule into the classifier (with
2014 * rule_insert()). */
064af421 2015static struct rule *
bcf84111 2016rule_create(const struct cls_rule *cls_rule,
064af421 2017 const union ofp_action *actions, size_t n_actions,
ca069229 2018 uint16_t idle_timeout, uint16_t hard_timeout,
8054fc48 2019 ovs_be64 flow_cookie, bool send_flow_removed)
064af421 2020{
ec6fde61 2021 struct rule *rule = xzalloc(sizeof *rule);
bcf84111 2022 rule->cr = *cls_rule;
064af421
BP
2023 rule->idle_timeout = idle_timeout;
2024 rule->hard_timeout = hard_timeout;
39997502 2025 rule->flow_cookie = flow_cookie;
064af421 2026 rule->used = rule->created = time_msec();
ca069229 2027 rule->send_flow_removed = send_flow_removed;
bcf84111 2028 list_init(&rule->facets);
3dffcf07
BP
2029 if (n_actions > 0) {
2030 rule->n_actions = n_actions;
2031 rule->actions = xmemdup(actions, n_actions * sizeof *actions);
2032 }
0193b2af 2033
064af421
BP
2034 return rule;
2035}
2036
2037static struct rule *
2038rule_from_cls_rule(const struct cls_rule *cls_rule)
2039{
2040 return cls_rule ? CONTAINER_OF(cls_rule, struct rule, cr) : NULL;
2041}
2042
2043static void
2044rule_free(struct rule *rule)
2045{
2046 free(rule->actions);
064af421
BP
2047 free(rule);
2048}
2049
bcf84111
BP
2050/* Destroys 'rule' and iterates through all of its facets and revalidates them,
2051 * destroying any that no longer has a rule (which is probably all of them).
064af421 2052 *
bcf84111 2053 * The caller must have already removed 'rule' from the classifier. */
064af421
BP
2054static void
2055rule_destroy(struct ofproto *ofproto, struct rule *rule)
2056{
bcf84111
BP
2057 struct facet *facet, *next_facet;
2058 LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) {
2059 facet_revalidate(ofproto, facet);
064af421
BP
2060 }
2061 rule_free(rule);
2062}
2063
bcf84111
BP
2064/* Returns true if 'rule' has an OpenFlow OFPAT_OUTPUT or OFPAT_ENQUEUE action
2065 * that outputs to 'out_port' (output to OFPP_FLOOD and OFPP_ALL doesn't
2066 * count). */
064af421 2067static bool
8054fc48 2068rule_has_out_port(const struct rule *rule, ovs_be16 out_port)
064af421
BP
2069{
2070 const union ofp_action *oa;
2071 struct actions_iterator i;
2072
2073 if (out_port == htons(OFPP_NONE)) {
2074 return true;
2075 }
2076 for (oa = actions_first(&i, rule->actions, rule->n_actions); oa;
2077 oa = actions_next(&i)) {
c1c9c9c4 2078 if (action_outputs_to_port(oa, out_port)) {
064af421
BP
2079 return true;
2080 }
2081 }
2082 return false;
2083}
2084
750638bb
BP
2085/* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
2086 * 'packet', which arrived on 'in_port'.
2087 *
2088 * Takes ownership of 'packet'. */
9dbb9d5e 2089static bool
856081f6 2090execute_odp_actions(struct ofproto *ofproto, const struct flow *flow,
cf22f8cb 2091 const struct nlattr *odp_actions, size_t actions_len,
750638bb 2092 struct ofpbuf *packet)
9dbb9d5e 2093{
b9298d3f 2094 if (actions_len == NLA_ALIGN(NLA_HDRLEN + sizeof(uint64_t))
cdee00fd 2095 && odp_actions->nla_type == ODPAT_CONTROLLER) {
9dbb9d5e
BP
2096 /* As an optimization, avoid a round-trip from userspace to kernel to
2097 * userspace. This also avoids possibly filling up kernel packet
2098 * buffers along the way. */
856081f6 2099 struct dpif_upcall upcall;
9dbb9d5e 2100
856081f6
BP
2101 upcall.type = _ODPL_ACTION_NR;
2102 upcall.packet = packet;
2103 upcall.key = NULL;
2104 upcall.key_len = 0;
2105 upcall.userdata = nl_attr_get_u64(odp_actions);
2106 upcall.sample_pool = 0;
2107 upcall.actions = NULL;
2108 upcall.actions_len = 0;
9dbb9d5e 2109
856081f6 2110 send_packet_in(ofproto, &upcall, flow, false);
9dbb9d5e 2111
750638bb
BP
2112 return true;
2113 } else {
2114 int error;
9dbb9d5e 2115
cdee00fd 2116 error = dpif_execute(ofproto->dpif, odp_actions, actions_len, packet);
750638bb
BP
2117 ofpbuf_delete(packet);
2118 return !error;
2119 }
9dbb9d5e
BP
2120}
2121
bcf84111
BP
2122/* Executes the actions indicated by 'facet' on 'packet' and credits 'facet''s
2123 * statistics appropriately. 'packet' must have at least sizeof(struct
2124 * ofp_packet_in) bytes of headroom.
064af421 2125 *
bcf84111
BP
2126 * For correct results, 'packet' must actually be in 'facet''s flow; that is,
2127 * applying flow_extract() to 'packet' would yield the same flow as
2128 * 'facet->flow'.
064af421 2129 *
bcf84111
BP
2130 * 'facet' must have accurately composed ODP actions; that is, it must not be
2131 * in need of revalidation.
750638bb
BP
2132 *
2133 * Takes ownership of 'packet'. */
064af421 2134static void
bcf84111
BP
2135facet_execute(struct ofproto *ofproto, struct facet *facet,
2136 struct ofpbuf *packet)
064af421 2137{
750638bb 2138 struct odp_flow_stats stats;
bcf84111
BP
2139
2140 assert(ofpbuf_headroom(packet) >= sizeof(struct ofp_packet_in));
2141
2142 flow_extract_stats(&facet->flow, packet, &stats);
856081f6 2143 if (execute_odp_actions(ofproto, &facet->flow,
cdee00fd 2144 facet->actions, facet->actions_len, packet)) {
bcf84111
BP
2145 facet_update_stats(ofproto, facet, &stats);
2146 facet->used = time_msec();
2147 netflow_flow_update_time(ofproto->netflow,
2148 &facet->nf_flow, facet->used);
2149 }
2150}
2151
2152/* Executes the actions indicated by 'rule' on 'packet' and credits 'rule''s
2153 * statistics (or the statistics for one of its facets) appropriately.
2154 * 'packet' must have at least sizeof(struct ofp_packet_in) bytes of headroom.
2155 *
2156 * 'packet' doesn't necessarily have to match 'rule'. 'rule' will be credited
2157 * with statistics for 'packet' either way.
2158 *
2159 * Takes ownership of 'packet'. */
2160static void
2161rule_execute(struct ofproto *ofproto, struct rule *rule, uint16_t in_port,
2162 struct ofpbuf *packet)
2163{
f29152ca 2164 struct action_xlate_ctx ctx;
cdee00fd 2165 struct ofpbuf *odp_actions;
bcf84111 2166 struct facet *facet;
bcf84111
BP
2167 struct flow flow;
2168 size_t size;
064af421 2169
750638bb
BP
2170 assert(ofpbuf_headroom(packet) >= sizeof(struct ofp_packet_in));
2171
bcf84111
BP
2172 flow_extract(packet, 0, in_port, &flow);
2173
2174 /* First look for a related facet. If we find one, account it to that. */
2175 facet = facet_lookup_valid(ofproto, &flow);
2176 if (facet && facet->rule == rule) {
2177 facet_execute(ofproto, facet, packet);
2178 return;
064af421
BP
2179 }
2180
bcf84111
BP
2181 /* Otherwise, if 'rule' is in fact the correct rule for 'packet', then
2182 * create a new facet for it and use that. */
2183 if (rule_lookup(ofproto, &flow) == rule) {
2184 facet = facet_create(ofproto, rule, &flow, packet);
2185 facet_execute(ofproto, facet, packet);
2186 facet_install(ofproto, facet, true);
2187 return;
2188 }
2189
2190 /* We can't account anything to a facet. If we were to try, then that
2191 * facet would have a non-matching rule, busting our invariants. */
f29152ca 2192 action_xlate_ctx_init(&ctx, ofproto, &flow, packet);
cdee00fd 2193 odp_actions = xlate_actions(&ctx, rule->actions, rule->n_actions);
bcf84111 2194 size = packet->size;
856081f6 2195 if (execute_odp_actions(ofproto, &flow, odp_actions->data,
cdee00fd 2196 odp_actions->size, packet)) {
064af421 2197 rule->used = time_msec();
bcf84111
BP
2198 rule->packet_count++;
2199 rule->byte_count += size;
064af421 2200 }
cdee00fd 2201 ofpbuf_delete(odp_actions);
064af421
BP
2202}
2203
afe75089 2204/* Inserts 'rule' into 'p''s flow table. */
064af421 2205static void
afe75089 2206rule_insert(struct ofproto *p, struct rule *rule)
064af421
BP
2207{
2208 struct rule *displaced_rule;
2209
064af421 2210 displaced_rule = rule_from_cls_rule(classifier_insert(&p->cls, &rule->cr));
bcf84111
BP
2211 if (displaced_rule) {
2212 rule_destroy(p, displaced_rule);
064af421 2213 }
bcf84111 2214 p->need_revalidate = true;
064af421
BP
2215}
2216
bcf84111
BP
2217/* Creates and returns a new facet within 'ofproto' owned by 'rule', given a
2218 * 'flow' and an example 'packet' within that flow.
2219 *
2220 * The caller must already have determined that no facet with an identical
2221 * 'flow' exists in 'ofproto' and that 'flow' is the best match for 'rule' in
2222 * 'ofproto''s classifier table. */
2223static struct facet *
2224facet_create(struct ofproto *ofproto, struct rule *rule,
2225 const struct flow *flow, const struct ofpbuf *packet)
064af421 2226{
bcf84111 2227 struct facet *facet;
fbb2ea0b 2228
bcf84111
BP
2229 facet = xzalloc(sizeof *facet);
2230 facet->used = time_msec();
2231 hmap_insert(&ofproto->facets, &facet->hmap_node, flow_hash(flow, 0));
2232 list_push_back(&rule->facets, &facet->list_node);
2233 facet->rule = rule;
2234 facet->flow = *flow;
2235 netflow_flow_init(&facet->nf_flow);
2236 netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used);
2237
2238 facet_make_actions(ofproto, facet, packet);
064af421 2239
bcf84111
BP
2240 return facet;
2241}
2242
2243static void
2244facet_free(struct facet *facet)
2245{
2246 free(facet->actions);
2247 free(facet);
064af421
BP
2248}
2249
431d4707 2250/* Remove 'rule' from 'ofproto' and free up the associated memory:
431d4707
BP
2251 *
2252 * - Removes 'rule' from the classifier.
2253 *
bcf84111
BP
2254 * - If 'rule' has facets, revalidates them (and possibly uninstalls and
2255 * destroys them), via rule_destroy().
431d4707 2256 */
064af421
BP
2257static void
2258rule_remove(struct ofproto *ofproto, struct rule *rule)
2259{
bcf84111
BP
2260 COVERAGE_INC(ofproto_del_rule);
2261 ofproto->need_revalidate = true;
064af421
BP
2262 classifier_remove(&ofproto->cls, &rule->cr);
2263 rule_destroy(ofproto, rule);
2264}
2265
bcf84111
BP
2266/* Remove 'facet' from 'ofproto' and free up the associated memory:
2267 *
2268 * - If 'facet' was installed in the datapath, uninstalls it and updates its
2269 * rule's statistics, via facet_uninstall().
2270 *
2271 * - Removes 'facet' from its rule and from ofproto->facets.
2272 */
2273static void
2274facet_remove(struct ofproto *ofproto, struct facet *facet)
2275{
2276 facet_uninstall(ofproto, facet);
d530fcd2 2277 facet_flush_stats(ofproto, facet);
bcf84111
BP
2278 hmap_remove(&ofproto->facets, &facet->hmap_node);
2279 list_remove(&facet->list_node);
2280 facet_free(facet);
2281}
2282
7f7ae89d
BP
2283/* Composes the ODP actions for 'facet' based on its rule's actions. */
2284static void
bcf84111
BP
2285facet_make_actions(struct ofproto *p, struct facet *facet,
2286 const struct ofpbuf *packet)
064af421 2287{
bcf84111 2288 const struct rule *rule = facet->rule;
cdee00fd 2289 struct ofpbuf *odp_actions;
f29152ca 2290 struct action_xlate_ctx ctx;
064af421 2291
f29152ca 2292 action_xlate_ctx_init(&ctx, p, &facet->flow, packet);
cdee00fd 2293 odp_actions = xlate_actions(&ctx, rule->actions, rule->n_actions);
19739aee
JP
2294 facet->tags = ctx.tags;
2295 facet->may_install = ctx.may_set_up_flow;
2296 facet->nf_flow.output_iface = ctx.nf_output_iface;
064af421 2297
cdee00fd
BP
2298 if (facet->actions_len != odp_actions->size
2299 || memcmp(facet->actions, odp_actions->data, odp_actions->size)) {
7f7ae89d 2300 free(facet->actions);
cdee00fd
BP
2301 facet->actions_len = odp_actions->size;
2302 facet->actions = xmemdup(odp_actions->data, odp_actions->size);
064af421 2303 }
cdee00fd
BP
2304
2305 ofpbuf_delete(odp_actions);
064af421
BP
2306}
2307
2308static int
bcf84111 2309facet_put__(struct ofproto *ofproto, struct facet *facet, int flags,
064af421
BP
2310 struct odp_flow_put *put)
2311{
36956a7d
BP
2312 uint32_t keybuf[ODPUTIL_FLOW_KEY_U32S];
2313 struct ofpbuf key;
2314
2315 ofpbuf_use_stack(&key, keybuf, sizeof keybuf);
2316 odp_flow_key_from_flow(&key, &facet->flow);
2317 assert(key.base == keybuf);
2318
064af421 2319 memset(&put->flow.stats, 0, sizeof put->flow.stats);
36956a7d
BP
2320 put->flow.key = key.data;
2321 put->flow.key_len = key.size;
bcf84111 2322 put->flow.actions = facet->actions;
cdee00fd 2323 put->flow.actions_len = facet->actions_len;
ab48643b 2324 put->flow.flags = 0;
064af421 2325 put->flags = flags;
c228a364 2326 return dpif_flow_put(ofproto->dpif, put);
064af421
BP
2327}
2328
bcf84111
BP
2329/* If 'facet' is installable, inserts or re-inserts it into 'p''s datapath. If
2330 * 'zero_stats' is true, clears any existing statistics from the datapath for
2331 * 'facet'. */
064af421 2332static void
bcf84111 2333facet_install(struct ofproto *p, struct facet *facet, bool zero_stats)
064af421 2334{
bcf84111 2335 if (facet->may_install) {
064af421 2336 struct odp_flow_put put;
bcf84111 2337 int flags;
064af421 2338
bcf84111
BP
2339 flags = ODPPF_CREATE | ODPPF_MODIFY;
2340 if (zero_stats) {
2341 flags |= ODPPF_ZERO_STATS;
2342 }
2343 if (!facet_put__(p, facet, flags, &put)) {
2344 facet->installed = true;
2345 }
064af421
BP
2346 }
2347}
2348
bcf84111
BP
2349/* Ensures that the bytes in 'facet', plus 'extra_bytes', have been passed up
2350 * to the accounting hook function in the ofhooks structure. */
064af421 2351static void
bcf84111
BP
2352facet_account(struct ofproto *ofproto,
2353 struct facet *facet, uint64_t extra_bytes)
064af421 2354{
bcf84111 2355 uint64_t total_bytes = facet->byte_count + extra_bytes;
064af421
BP
2356
2357 if (ofproto->ofhooks->account_flow_cb
bcf84111 2358 && total_bytes > facet->accounted_bytes)
064af421
BP
2359 {
2360 ofproto->ofhooks->account_flow_cb(
cdee00fd 2361 &facet->flow, facet->tags, facet->actions, facet->actions_len,
bcf84111
BP
2362 total_bytes - facet->accounted_bytes, ofproto->aux);
2363 facet->accounted_bytes = total_bytes;
064af421
BP
2364 }
2365}
2366
d530fcd2 2367/* If 'rule' is installed in the datapath, uninstalls it. */
064af421 2368static void
bcf84111 2369facet_uninstall(struct ofproto *p, struct facet *facet)
064af421 2370{
bcf84111 2371 if (facet->installed) {
36956a7d 2372 uint32_t keybuf[ODPUTIL_FLOW_KEY_U32S];
064af421 2373 struct odp_flow odp_flow;
36956a7d 2374 struct ofpbuf key;
064af421 2375
36956a7d
BP
2376 ofpbuf_use_stack(&key, keybuf, sizeof keybuf);
2377 odp_flow_key_from_flow(&key, &facet->flow);
2378 assert(key.base == keybuf);
2379
2380 odp_flow.key = key.data;
2381 odp_flow.key_len = key.size;
064af421 2382 odp_flow.actions = NULL;
cdee00fd 2383 odp_flow.actions_len = 0;
ab48643b 2384 odp_flow.flags = 0;
c228a364 2385 if (!dpif_flow_del(p->dpif, &odp_flow)) {
bcf84111 2386 facet_update_stats(p, facet, &odp_flow.stats);
064af421 2387 }
bcf84111 2388 facet->installed = false;
064af421
BP
2389 }
2390}
2391
bcf84111
BP
2392/* Returns true if the only action for 'facet' is to send to the controller.
2393 * (We don't report NetFlow expiration messages for such facets because they
2394 * are just part of the control logic for the network, not real traffic). */
0193b2af 2395static bool
bcf84111 2396facet_is_controller_flow(struct facet *facet)
0193b2af 2397{
bcf84111
BP
2398 return (facet
2399 && facet->rule->n_actions == 1
2400 && action_outputs_to_port(&facet->rule->actions[0],
c1c9c9c4 2401 htons(OFPP_CONTROLLER)));
0193b2af
JG
2402}
2403
bcf84111
BP
2404/* Folds all of 'facet''s statistics into its rule. Also updates the
2405 * accounting ofhook and emits a NetFlow expiration if appropriate. */
064af421 2406static void
d530fcd2 2407facet_flush_stats(struct ofproto *ofproto, struct facet *facet)
064af421 2408{
bcf84111 2409 facet_account(ofproto, facet, 0);
064af421 2410
bcf84111 2411 if (ofproto->netflow && !facet_is_controller_flow(facet)) {
064af421 2412 struct ofexpired expired;
bcf84111
BP
2413 expired.flow = facet->flow;
2414 expired.packet_count = facet->packet_count;
2415 expired.byte_count = facet->byte_count;
2416 expired.used = facet->used;
2417 netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
2418 }
2419
2420 facet->rule->packet_count += facet->packet_count;
2421 facet->rule->byte_count += facet->byte_count;
2422
2423 /* Reset counters to prevent double counting if 'facet' ever gets
2424 * reinstalled. */
2425 facet->packet_count = 0;
2426 facet->byte_count = 0;
2427 facet->accounted_bytes = 0;
2428
2429 netflow_flow_clear(&facet->nf_flow);
2430}
2431
2432/* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
2433 * Returns it if found, otherwise a null pointer.
2434 *
2435 * The returned facet might need revalidation; use facet_lookup_valid()
2436 * instead if that is important. */
2437static struct facet *
2438facet_find(struct ofproto *ofproto, const struct flow *flow)
2439{
2440 struct facet *facet;
2441
2442 HMAP_FOR_EACH_WITH_HASH (facet, hmap_node, flow_hash(flow, 0),
2443 &ofproto->facets) {
2444 if (flow_equal(flow, &facet->flow)) {
2445 return facet;
2446 }
2447 }
2448
2449 return NULL;
2450}
2451
2452/* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
2453 * Returns it if found, otherwise a null pointer.
2454 *
2455 * The returned facet is guaranteed to be valid. */
2456static struct facet *
2457facet_lookup_valid(struct ofproto *ofproto, const struct flow *flow)
2458{
2459 struct facet *facet = facet_find(ofproto, flow);
2460
2461 /* The facet we found might not be valid, since we could be in need of
2462 * revalidation. If it is not valid, don't return it. */
2463 if (facet
2464 && ofproto->need_revalidate
2465 && !facet_revalidate(ofproto, facet)) {
2466 COVERAGE_INC(ofproto_invalidated);
2467 return NULL;
064af421 2468 }
064af421 2469
bcf84111
BP
2470 return facet;
2471}
0193b2af 2472
bcf84111
BP
2473/* Re-searches 'ofproto''s classifier for a rule matching 'facet':
2474 *
2475 * - If the rule found is different from 'facet''s current rule, moves
2476 * 'facet' to the new rule and recompiles its actions.
2477 *
2478 * - If the rule found is the same as 'facet''s current rule, leaves 'facet'
2479 * where it is and recompiles its actions anyway.
2480 *
2481 * - If there is none, destroys 'facet'.
2482 *
d530fcd2 2483 * Returns true if 'facet' still exists, false if it has been destroyed. */
bcf84111
BP
2484static bool
2485facet_revalidate(struct ofproto *ofproto, struct facet *facet)
2486{
f29152ca 2487 struct action_xlate_ctx ctx;
cdee00fd 2488 struct ofpbuf *odp_actions;
d530fcd2 2489 struct rule *new_rule;
d530fcd2 2490 bool actions_changed;
bcf84111
BP
2491
2492 COVERAGE_INC(facet_revalidate);
d530fcd2
BP
2493
2494 /* Determine the new rule. */
2495 new_rule = rule_lookup(ofproto, &facet->flow);
2496 if (!new_rule) {
2497 /* No new rule, so delete the facet. */
bcf84111
BP
2498 facet_remove(ofproto, facet);
2499 return false;
2500 }
2501
d530fcd2
BP
2502 /* Calculate new ODP actions.
2503 *
cdee00fd
BP
2504 * We do not modify any 'facet' state yet, because we might need to, e.g.,
2505 * emit a NetFlow expiration and, if so, we need to have the old state
2506 * around to properly compose it. */
f29152ca 2507 action_xlate_ctx_init(&ctx, ofproto, &facet->flow, NULL);
cdee00fd
BP
2508 odp_actions = xlate_actions(&ctx, new_rule->actions, new_rule->n_actions);
2509 actions_changed = (facet->actions_len != odp_actions->size
2510 || memcmp(facet->actions, odp_actions->data,
2511 facet->actions_len));
d530fcd2
BP
2512
2513 /* If the ODP actions changed or the installability changed, then we need
2514 * to talk to the datapath. */
2515 if (actions_changed || facet->may_install != facet->installed) {
2516 if (facet->may_install) {
36956a7d 2517 uint32_t keybuf[ODPUTIL_FLOW_KEY_U32S];
d530fcd2 2518 struct odp_flow_put put;
36956a7d
BP
2519 struct ofpbuf key;
2520
2521 ofpbuf_use_stack(&key, keybuf, sizeof keybuf);
2522 odp_flow_key_from_flow(&key, &facet->flow);
d530fcd2
BP
2523
2524 memset(&put.flow.stats, 0, sizeof put.flow.stats);
36956a7d
BP
2525 put.flow.key = key.data;
2526 put.flow.key_len = key.size;
cdee00fd
BP
2527 put.flow.actions = odp_actions->data;
2528 put.flow.actions_len = odp_actions->size;
d530fcd2
BP
2529 put.flow.flags = 0;
2530 put.flags = ODPPF_CREATE | ODPPF_MODIFY | ODPPF_ZERO_STATS;
2531 dpif_flow_put(ofproto->dpif, &put);
2532
2533 facet_update_stats(ofproto, facet, &put.flow.stats);
2534 } else {
2535 facet_uninstall(ofproto, facet);
2536 }
2537
2538 /* The datapath flow is gone or has zeroed stats, so push stats out of
2539 * 'facet' into 'rule'. */
2540 facet_flush_stats(ofproto, facet);
2541 }
2542
2543 /* Update 'facet' now that we've taken care of all the old state. */
f29152ca
BP
2544 facet->tags = ctx.tags;
2545 facet->nf_flow.output_iface = ctx.nf_output_iface;
2546 facet->may_install = ctx.may_set_up_flow;
d530fcd2
BP
2547 if (actions_changed) {
2548 free(facet->actions);
cdee00fd
BP
2549 facet->actions_len = odp_actions->size;
2550 facet->actions = xmemdup(odp_actions->data, odp_actions->size);
d530fcd2
BP
2551 }
2552 if (facet->rule != new_rule) {
bcf84111
BP
2553 COVERAGE_INC(facet_changed_rule);
2554 list_remove(&facet->list_node);
d530fcd2
BP
2555 list_push_back(&new_rule->facets, &facet->list_node);
2556 facet->rule = new_rule;
2557 facet->used = new_rule->created;
0c0afbec 2558 }
bcf84111 2559
cdfcd496
BP
2560 ofpbuf_delete(odp_actions);
2561
bcf84111 2562 return true;
064af421
BP
2563}
2564\f
2565static void
2566queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn,
2567 struct rconn_packet_counter *counter)
2568{
2569 update_openflow_length(msg);
2570 if (rconn_send(ofconn->rconn, msg, counter)) {
2571 ofpbuf_delete(msg);
2572 }
2573}
2574
064af421
BP
2575static void
2576send_error_oh(const struct ofconn *ofconn, const struct ofp_header *oh,
2577 int error)
2578{
dc4762ed 2579 struct ofpbuf *buf = ofputil_encode_error_msg(error, oh);
26c112c2
BP
2580 if (buf) {
2581 COVERAGE_INC(ofproto_error);
2582 queue_tx(buf, ofconn, ofconn->reply_counter);
2583 }
064af421
BP
2584}
2585
2586static void
2587hton_ofp_phy_port(struct ofp_phy_port *opp)
2588{
2589 opp->port_no = htons(opp->port_no);
2590 opp->config = htonl(opp->config);
2591 opp->state = htonl(opp->state);
2592 opp->curr = htonl(opp->curr);
2593 opp->advertised = htonl(opp->advertised);
2594 opp->supported = htonl(opp->supported);
2595 opp->peer = htonl(opp->peer);
2596}
2597
2598static int
d1e2cf21 2599handle_echo_request(struct ofconn *ofconn, const struct ofp_header *oh)
064af421 2600{
d1e2cf21 2601 queue_tx(make_echo_reply(oh), ofconn, ofconn->reply_counter);
064af421
BP
2602 return 0;
2603}
2604
2605static int
d1e2cf21 2606handle_features_request(struct ofconn *ofconn, const struct ofp_header *oh)
064af421
BP
2607{
2608 struct ofp_switch_features *osf;
2609 struct ofpbuf *buf;
064af421
BP
2610 struct ofport *port;
2611
2612 osf = make_openflow_xid(sizeof *osf, OFPT_FEATURES_REPLY, oh->xid, &buf);
3269c562 2613 osf->datapath_id = htonll(ofconn->ofproto->datapath_id);
064af421
BP
2614 osf->n_buffers = htonl(pktbuf_capacity());
2615 osf->n_tables = 2;
2616 osf->capabilities = htonl(OFPC_FLOW_STATS | OFPC_TABLE_STATS |
0254ae23 2617 OFPC_PORT_STATS | OFPC_ARP_MATCH_IP);
064af421
BP
2618 osf->actions = htonl((1u << OFPAT_OUTPUT) |
2619 (1u << OFPAT_SET_VLAN_VID) |
2620 (1u << OFPAT_SET_VLAN_PCP) |
2621 (1u << OFPAT_STRIP_VLAN) |
2622 (1u << OFPAT_SET_DL_SRC) |
2623 (1u << OFPAT_SET_DL_DST) |
2624 (1u << OFPAT_SET_NW_SRC) |
2625 (1u << OFPAT_SET_NW_DST) |
959a2ecd 2626 (1u << OFPAT_SET_NW_TOS) |
064af421 2627 (1u << OFPAT_SET_TP_SRC) |
c1c9c9c4
BP
2628 (1u << OFPAT_SET_TP_DST) |
2629 (1u << OFPAT_ENQUEUE));
064af421 2630
3269c562 2631 HMAP_FOR_EACH (port, hmap_node, &ofconn->ofproto->ports) {
064af421
BP
2632 hton_ofp_phy_port(ofpbuf_put(buf, &port->opp, sizeof port->opp));
2633 }
2634
2635 queue_tx(buf, ofconn, ofconn->reply_counter);
2636 return 0;
2637}
2638
2639static int
d1e2cf21 2640handle_get_config_request(struct ofconn *ofconn, const struct ofp_header *oh)
064af421
BP
2641{
2642 struct ofpbuf *buf;
2643 struct ofp_switch_config *osc;
2644 uint16_t flags;
2645 bool drop_frags;
2646
2647 /* Figure out flags. */
3269c562 2648 dpif_get_drop_frags(ofconn->ofproto->dpif, &drop_frags);
064af421 2649 flags = drop_frags ? OFPC_FRAG_DROP : OFPC_FRAG_NORMAL;
064af421
BP
2650
2651 /* Send reply. */
2652 osc = make_openflow_xid(sizeof *osc, OFPT_GET_CONFIG_REPLY, oh->xid, &buf);
2653 osc->flags = htons(flags);
2654 osc->miss_send_len = htons(ofconn->miss_send_len);
2655 queue_tx(buf, ofconn, ofconn->reply_counter);
2656
2657 return 0;
2658}
2659
2660static int
d1e2cf21 2661handle_set_config(struct ofconn *ofconn, const struct ofp_switch_config *osc)
064af421 2662{
d1e2cf21 2663 uint16_t flags = ntohs(osc->flags);
064af421 2664
5899143f 2665 if (ofconn->type == OFCONN_PRIMARY && ofconn->role != NX_ROLE_SLAVE) {
064af421
BP
2666 switch (flags & OFPC_FRAG_MASK) {
2667 case OFPC_FRAG_NORMAL:
3269c562 2668 dpif_set_drop_frags(ofconn->ofproto->dpif, false);
064af421
BP
2669 break;
2670 case OFPC_FRAG_DROP:
3269c562 2671 dpif_set_drop_frags(ofconn->ofproto->dpif, true);
064af421
BP
2672 break;
2673 default:
2674 VLOG_WARN_RL(&rl, "requested bad fragment mode (flags=%"PRIx16")",
2675 osc->flags);
2676 break;
2677 }
2678 }
2679
064af421
BP
2680 ofconn->miss_send_len = ntohs(osc->miss_send_len);
2681
2682 return 0;
2683}
2684
5f8bfd69
BP
2685/* Maximum depth of flow table recursion (due to NXAST_RESUBMIT actions) in a
2686 * flow translation. */
33b5304b 2687#define MAX_RESUBMIT_RECURSION 16
5f8bfd69 2688
064af421
BP
2689static void do_xlate_actions(const union ofp_action *in, size_t n_in,
2690 struct action_xlate_ctx *ctx);
2691
2692static void
2693add_output_action(struct action_xlate_ctx *ctx, uint16_t port)
2694{
ca0f572c 2695 const struct ofport *ofport = get_port(ctx->ofproto, port);
6cfaf517
BP
2696
2697 if (ofport) {
2698 if (ofport->opp.config & OFPPC_NO_FWD) {
2699 /* Forwarding disabled on port. */
2700 return;
2701 }
2702 } else {
2703 /*
2704 * We don't have an ofport record for this port, but it doesn't hurt to
2705 * allow forwarding to it anyhow. Maybe such a port will appear later
2706 * and we're pre-populating the flow table.
2707 */
064af421 2708 }
6cfaf517 2709
cdee00fd 2710 nl_msg_put_u32(ctx->odp_actions, ODPAT_OUTPUT, port);
6a07af36 2711 ctx->nf_output_iface = port;
064af421
BP
2712}
2713
2714static struct rule *
bcf84111 2715rule_lookup(struct ofproto *ofproto, const struct flow *flow)
064af421 2716{
3c4486a5 2717 return rule_from_cls_rule(classifier_lookup(&ofproto->cls, flow));
064af421
BP
2718}
2719
2720static void
2721xlate_table_action(struct action_xlate_ctx *ctx, uint16_t in_port)
2722{
5f8bfd69 2723 if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
2c5d1389 2724 uint16_t old_in_port;
064af421 2725 struct rule *rule;
064af421 2726
2c5d1389
BP
2727 /* Look up a flow with 'in_port' as the input port. Then restore the
2728 * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
2729 * have surprising behavior). */
2730 old_in_port = ctx->flow.in_port;
e18fe8a2 2731 ctx->flow.in_port = in_port;
bcf84111 2732 rule = rule_lookup(ctx->ofproto, &ctx->flow);
2c5d1389
BP
2733 ctx->flow.in_port = old_in_port;
2734
7aa697dd
BP
2735 if (ctx->resubmit_hook) {
2736 ctx->resubmit_hook(ctx, rule);
2737 }
2738
064af421 2739 if (rule) {
064af421
BP
2740 ctx->recurse++;
2741 do_xlate_actions(rule->actions, rule->n_actions, ctx);
2742 ctx->recurse--;
2743 }
5f8bfd69 2744 } else {
db5ce514 2745 static struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1);
5f8bfd69
BP
2746
2747 VLOG_ERR_RL(&recurse_rl, "NXAST_RESUBMIT recursed over %d times",
2748 MAX_RESUBMIT_RECURSION);
064af421
BP
2749 }
2750}
2751
f1588b1f
BP
2752static void
2753flood_packets(struct ofproto *ofproto, uint16_t odp_in_port, uint32_t mask,
cdee00fd 2754 uint16_t *nf_output_iface, struct ofpbuf *odp_actions)
f1588b1f
BP
2755{
2756 struct ofport *ofport;
2757
2758 HMAP_FOR_EACH (ofport, hmap_node, &ofproto->ports) {
2759 uint16_t odp_port = ofport->odp_port;
2760 if (odp_port != odp_in_port && !(ofport->opp.config & mask)) {
cdee00fd 2761 nl_msg_put_u32(odp_actions, ODPAT_OUTPUT, odp_port);
f1588b1f
BP
2762 }
2763 }
2764 *nf_output_iface = NF_OUT_FLOOD;
2765}
2766
064af421 2767static void
aae51f53
BP
2768xlate_output_action__(struct action_xlate_ctx *ctx,
2769 uint16_t port, uint16_t max_len)
064af421
BP
2770{
2771 uint16_t odp_port;
6a07af36
JG
2772 uint16_t prev_nf_output_iface = ctx->nf_output_iface;
2773
2774 ctx->nf_output_iface = NF_OUT_DROP;
064af421 2775
aae51f53 2776 switch (port) {
064af421 2777 case OFPP_IN_PORT:
e18fe8a2 2778 add_output_action(ctx, ctx->flow.in_port);
064af421
BP
2779 break;
2780 case OFPP_TABLE:
e18fe8a2 2781 xlate_table_action(ctx, ctx->flow.in_port);
064af421
BP
2782 break;
2783 case OFPP_NORMAL:
e18fe8a2 2784 if (!ctx->ofproto->ofhooks->normal_cb(&ctx->flow, ctx->packet,
cdee00fd 2785 ctx->odp_actions, &ctx->tags,
6a07af36 2786 &ctx->nf_output_iface,
064af421
BP
2787 ctx->ofproto->aux)) {
2788 COVERAGE_INC(ofproto_uninstallable);
d6fbec6d 2789 ctx->may_set_up_flow = false;
064af421
BP
2790 }
2791 break;
2792 case OFPP_FLOOD:
f1588b1f 2793 flood_packets(ctx->ofproto, ctx->flow.in_port, OFPPC_NO_FLOOD,
cdee00fd 2794 &ctx->nf_output_iface, ctx->odp_actions);
9628cd42 2795 break;
064af421 2796 case OFPP_ALL:
f1588b1f 2797 flood_packets(ctx->ofproto, ctx->flow.in_port, 0,
cdee00fd 2798 &ctx->nf_output_iface, ctx->odp_actions);
064af421
BP
2799 break;
2800 case OFPP_CONTROLLER:
b9298d3f 2801 nl_msg_put_u64(ctx->odp_actions, ODPAT_CONTROLLER, max_len);
064af421
BP
2802 break;
2803 case OFPP_LOCAL:
2804 add_output_action(ctx, ODPP_LOCAL);
2805 break;
2806 default:
aae51f53 2807 odp_port = ofp_port_to_odp_port(port);
e18fe8a2 2808 if (odp_port != ctx->flow.in_port) {
064af421
BP
2809 add_output_action(ctx, odp_port);
2810 }
2811 break;
2812 }
6a07af36
JG
2813
2814 if (prev_nf_output_iface == NF_OUT_FLOOD) {
2815 ctx->nf_output_iface = NF_OUT_FLOOD;
2816 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
2817 ctx->nf_output_iface = prev_nf_output_iface;
2818 } else if (prev_nf_output_iface != NF_OUT_DROP &&
2819 ctx->nf_output_iface != NF_OUT_FLOOD) {
2820 ctx->nf_output_iface = NF_OUT_MULTI;
2821 }
064af421
BP
2822}
2823
aae51f53
BP
2824static void
2825xlate_output_action(struct action_xlate_ctx *ctx,
2826 const struct ofp_action_output *oao)
2827{
2828 xlate_output_action__(ctx, ntohs(oao->port), ntohs(oao->max_len));
2829}
2830
c1c9c9c4
BP
2831/* If the final ODP action in 'ctx' is "pop priority", drop it, as an
2832 * optimization, because we're going to add another action that sets the
2833 * priority immediately after, or because there are no actions following the
2834 * pop. */
2835static void
2836remove_pop_action(struct action_xlate_ctx *ctx)
2837{
cdee00fd
BP
2838 if (ctx->odp_actions->size == ctx->last_pop_priority) {
2839 ctx->odp_actions->size -= NLA_ALIGN(NLA_HDRLEN);
2840 ctx->last_pop_priority = -1;
2841 }
2842}
2843
2844static void
2845add_pop_action(struct action_xlate_ctx *ctx)
2846{
2847 if (ctx->odp_actions->size != ctx->last_pop_priority) {
2848 nl_msg_put_flag(ctx->odp_actions, ODPAT_POP_PRIORITY);
2849 ctx->last_pop_priority = ctx->odp_actions->size;
c1c9c9c4
BP
2850 }
2851}
2852
2853static void
2854xlate_enqueue_action(struct action_xlate_ctx *ctx,
2855 const struct ofp_action_enqueue *oae)
2856{
2857 uint16_t ofp_port, odp_port;
aae51f53
BP
2858 uint32_t priority;
2859 int error;
2860
2861 error = dpif_queue_to_priority(ctx->ofproto->dpif, ntohl(oae->queue_id),
2862 &priority);
2863 if (error) {
2864 /* Fall back to ordinary output action. */
2865 xlate_output_action__(ctx, ntohs(oae->port), 0);
2866 return;
2867 }
c1c9c9c4
BP
2868
2869 /* Figure out ODP output port. */
2870 ofp_port = ntohs(oae->port);
2871 if (ofp_port != OFPP_IN_PORT) {
2872 odp_port = ofp_port_to_odp_port(ofp_port);
2873 } else {
2874 odp_port = ctx->flow.in_port;
2875 }
2876
2877 /* Add ODP actions. */
2878 remove_pop_action(ctx);
cdee00fd 2879 nl_msg_put_u32(ctx->odp_actions, ODPAT_SET_PRIORITY, priority);
c1c9c9c4 2880 add_output_action(ctx, odp_port);
cdee00fd 2881 add_pop_action(ctx);
c1c9c9c4
BP
2882
2883 /* Update NetFlow output port. */
2884 if (ctx->nf_output_iface == NF_OUT_DROP) {
2885 ctx->nf_output_iface = odp_port;
2886 } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
2887 ctx->nf_output_iface = NF_OUT_MULTI;
2888 }
2889}
2890
eedc0097
JP
2891static void
2892xlate_set_queue_action(struct action_xlate_ctx *ctx,
2893 const struct nx_action_set_queue *nasq)
2894{
2895 uint32_t priority;
2896 int error;
2897
2898 error = dpif_queue_to_priority(ctx->ofproto->dpif, ntohl(nasq->queue_id),
2899 &priority);
2900 if (error) {
2901 /* Couldn't translate queue to a priority, so ignore. A warning
2902 * has already been logged. */
2903 return;
2904 }
2905
2906 remove_pop_action(ctx);
cdee00fd 2907 nl_msg_put_u32(ctx->odp_actions, ODPAT_SET_PRIORITY, priority);
eedc0097
JP
2908}
2909
350a665f
BP
2910static void
2911xlate_set_dl_tci(struct action_xlate_ctx *ctx)
2912{
66642cb4
BP
2913 ovs_be16 tci = ctx->flow.vlan_tci;
2914 if (!(tci & htons(VLAN_CFI))) {
cdee00fd 2915 nl_msg_put_flag(ctx->odp_actions, ODPAT_STRIP_VLAN);
350a665f 2916 } else {
cdee00fd
BP
2917 nl_msg_put_be16(ctx->odp_actions, ODPAT_SET_DL_TCI,
2918 tci & ~htons(VLAN_CFI));
350a665f
BP
2919 }
2920}
2921
7b064a79
BP
2922struct xlate_reg_state {
2923 ovs_be16 vlan_tci;
2924 ovs_be64 tun_id;
2925};
2926
b6c9e612 2927static void
7b064a79
BP
2928save_reg_state(const struct action_xlate_ctx *ctx,
2929 struct xlate_reg_state *state)
b6c9e612 2930{
7b064a79
BP
2931 state->vlan_tci = ctx->flow.vlan_tci;
2932 state->tun_id = ctx->flow.tun_id;
2933}
b6c9e612 2934
7b064a79
BP
2935static void
2936update_reg_state(struct action_xlate_ctx *ctx,
2937 const struct xlate_reg_state *state)
2938{
2939 if (ctx->flow.vlan_tci != state->vlan_tci) {
b6c9e612
BP
2940 xlate_set_dl_tci(ctx);
2941 }
7b064a79 2942 if (ctx->flow.tun_id != state->tun_id) {
926947e6
BP
2943 nl_msg_put_be64(ctx->odp_actions, ODPAT_SET_TUNNEL, ctx->flow.tun_id);
2944 }
b6c9e612
BP
2945}
2946
064af421
BP
2947static void
2948xlate_nicira_action(struct action_xlate_ctx *ctx,
2949 const struct nx_action_header *nah)
2950{
2951 const struct nx_action_resubmit *nar;
659586ef 2952 const struct nx_action_set_tunnel *nast;
eedc0097 2953 const struct nx_action_set_queue *nasq;
53ddd40a 2954 const struct nx_action_multipath *nam;
e41a9130 2955 enum nx_action_subtype subtype = ntohs(nah->subtype);
7b064a79 2956 struct xlate_reg_state state;
b9298d3f 2957 ovs_be64 tun_id;
064af421
BP
2958
2959 assert(nah->vendor == htonl(NX_VENDOR_ID));
2960 switch (subtype) {
2961 case NXAST_RESUBMIT:
2962 nar = (const struct nx_action_resubmit *) nah;
2963 xlate_table_action(ctx, ofp_port_to_odp_port(ntohs(nar->in_port)));
2964 break;
2965
659586ef
JG
2966 case NXAST_SET_TUNNEL:
2967 nast = (const struct nx_action_set_tunnel *) nah;
b9298d3f
BP
2968 tun_id = htonll(ntohl(nast->tun_id));
2969 nl_msg_put_be64(ctx->odp_actions, ODPAT_SET_TUNNEL, tun_id);
2970 ctx->flow.tun_id = tun_id;
659586ef
JG
2971 break;
2972
401eeb92
BP
2973 case NXAST_DROP_SPOOFED_ARP:
2974 if (ctx->flow.dl_type == htons(ETH_TYPE_ARP)) {
cdee00fd 2975 nl_msg_put_flag(ctx->odp_actions, ODPAT_DROP_SPOOFED_ARP);
401eeb92
BP
2976 }
2977 break;
2978
eedc0097
JP
2979 case NXAST_SET_QUEUE:
2980 nasq = (const struct nx_action_set_queue *) nah;
2981 xlate_set_queue_action(ctx, nasq);
2982 break;
2983
2984 case NXAST_POP_QUEUE:
cdee00fd 2985 add_pop_action(ctx);
eedc0097
JP
2986 break;
2987
b6c9e612 2988 case NXAST_REG_MOVE:
7b064a79
BP
2989 save_reg_state(ctx, &state);
2990 nxm_execute_reg_move((const struct nx_action_reg_move *) nah,
2991 &ctx->flow);
2992 update_reg_state(ctx, &state);
b6c9e612
BP
2993 break;
2994
2995 case NXAST_REG_LOAD:
7b064a79 2996 save_reg_state(ctx, &state);
b6c9e612
BP
2997 nxm_execute_reg_load((const struct nx_action_reg_load *) nah,
2998 &ctx->flow);
7b064a79
BP
2999 update_reg_state(ctx, &state);
3000 break;
96fc46e8
BP
3001
3002 case NXAST_NOTE:
3003 /* Nothing to do. */
b6c9e612
BP
3004 break;
3005
b9298d3f
BP
3006 case NXAST_SET_TUNNEL64:
3007 tun_id = ((const struct nx_action_set_tunnel64 *) nah)->tun_id;
3008 nl_msg_put_be64(ctx->odp_actions, ODPAT_SET_TUNNEL, tun_id);
3009 ctx->flow.tun_id = tun_id;
3010 break;
3011
53ddd40a
BP
3012 case NXAST_MULTIPATH:
3013 nam = (const struct nx_action_multipath *) nah;
3014 multipath_execute(nam, &ctx->flow);
3015 break;
3016
999f0d45 3017 /* If you add a new action here that modifies flow data, don't forget to
c1c9c9c4 3018 * update the flow key in ctx->flow at the same time. */
999f0d45 3019
e41a9130 3020 case NXAST_SNAT__OBSOLETE:
064af421 3021 default:
e41a9130 3022 VLOG_DBG_RL(&rl, "unknown Nicira action type %d", (int) subtype);
064af421
BP
3023 break;
3024 }
3025}
3026
3027static void
3028do_xlate_actions(const union ofp_action *in, size_t n_in,
3029 struct action_xlate_ctx *ctx)
3030{
3031 struct actions_iterator iter;
3032 const union ofp_action *ia;
3033 const struct ofport *port;
3034
ca0f572c 3035 port = get_port(ctx->ofproto, ctx->flow.in_port);
064af421 3036 if (port && port->opp.config & (OFPPC_NO_RECV | OFPPC_NO_RECV_STP) &&
ba186119 3037 port->opp.config & (eth_addr_equals(ctx->flow.dl_dst, eth_addr_stp)
064af421
BP
3038 ? OFPPC_NO_RECV_STP : OFPPC_NO_RECV)) {
3039 /* Drop this flow. */
3040 return;
3041 }
3042
3043 for (ia = actions_first(&iter, in, n_in); ia; ia = actions_next(&iter)) {
e41a9130 3044 enum ofp_action_type type = ntohs(ia->type);
cdee00fd 3045 const struct ofp_action_dl_addr *oada;
064af421
BP
3046
3047 switch (type) {
3048 case OFPAT_OUTPUT:
3049 xlate_output_action(ctx, &ia->output);
3050 break;
3051
3052 case OFPAT_SET_VLAN_VID:
66642cb4
BP
3053 ctx->flow.vlan_tci &= ~htons(VLAN_VID_MASK);
3054 ctx->flow.vlan_tci |= ia->vlan_vid.vlan_vid | htons(VLAN_CFI);
350a665f 3055 xlate_set_dl_tci(ctx);
064af421
BP
3056 break;
3057
3058 case OFPAT_SET_VLAN_PCP:
66642cb4
BP
3059 ctx->flow.vlan_tci &= ~htons(VLAN_PCP_MASK);
3060 ctx->flow.vlan_tci |= htons(
3061 (ia->vlan_pcp.vlan_pcp << VLAN_PCP_SHIFT) | VLAN_CFI);
350a665f 3062 xlate_set_dl_tci(ctx);
064af421
BP
3063 break;
3064
3065 case OFPAT_STRIP_VLAN:
66642cb4 3066 ctx->flow.vlan_tci = htons(0);
350a665f 3067 xlate_set_dl_tci(ctx);
064af421
BP
3068 break;
3069
3070 case OFPAT_SET_DL_SRC:
cdee00fd
BP
3071 oada = ((struct ofp_action_dl_addr *) ia);
3072 nl_msg_put_unspec(ctx->odp_actions, ODPAT_SET_DL_SRC,
3073 oada->dl_addr, ETH_ADDR_LEN);
3074 memcpy(ctx->flow.dl_src, oada->dl_addr, ETH_ADDR_LEN);
064af421
BP
3075 break;
3076
3077 case OFPAT_SET_DL_DST:
cdee00fd
BP
3078 oada = ((struct ofp_action_dl_addr *) ia);
3079 nl_msg_put_unspec(ctx->odp_actions, ODPAT_SET_DL_DST,
3080 oada->dl_addr, ETH_ADDR_LEN);
3081 memcpy(ctx->flow.dl_dst, oada->dl_addr, ETH_ADDR_LEN);
064af421
BP
3082 break;
3083
3084 case OFPAT_SET_NW_SRC:
cdee00fd
BP
3085 nl_msg_put_be32(ctx->odp_actions, ODPAT_SET_NW_SRC,
3086 ia->nw_addr.nw_addr);
3087 ctx->flow.nw_src = ia->nw_addr.nw_addr;
064af421
BP
3088 break;
3089
2d70a31a 3090 case OFPAT_SET_NW_DST:
cdee00fd
BP
3091 nl_msg_put_be32(ctx->odp_actions, ODPAT_SET_NW_DST,
3092 ia->nw_addr.nw_addr);
3093 ctx->flow.nw_dst = ia->nw_addr.nw_addr;
2d38e234 3094 break;
959a2ecd
JP
3095
3096 case OFPAT_SET_NW_TOS:
cdee00fd
BP
3097 nl_msg_put_u8(ctx->odp_actions, ODPAT_SET_NW_TOS,
3098 ia->nw_tos.nw_tos);
3099 ctx->flow.nw_tos = ia->nw_tos.nw_tos;
2d70a31a
JP
3100 break;
3101
064af421 3102 case OFPAT_SET_TP_SRC:
cdee00fd
BP
3103 nl_msg_put_be16(ctx->odp_actions, ODPAT_SET_TP_SRC,
3104 ia->tp_port.tp_port);
3105 ctx->flow.tp_src = ia->tp_port.tp_port;
064af421
BP
3106 break;
3107
2d70a31a 3108 case OFPAT_SET_TP_DST:
cdee00fd
BP
3109 nl_msg_put_be16(ctx->odp_actions, ODPAT_SET_TP_DST,
3110 ia->tp_port.tp_port);
3111 ctx->flow.tp_dst = ia->tp_port.tp_port;
2d70a31a
JP
3112 break;
3113
064af421
BP
3114 case OFPAT_VENDOR:
3115 xlate_nicira_action(ctx, (const struct nx_action_header *) ia);
3116 break;
3117
c1c9c9c4
BP
3118 case OFPAT_ENQUEUE:
3119 xlate_enqueue_action(ctx, (const struct ofp_action_enqueue *) ia);
3120 break;
3121
064af421 3122 default:
e41a9130 3123 VLOG_DBG_RL(&rl, "unknown action type %d", (int) type);
064af421
BP
3124 break;
3125 }
3126 }
3127}
3128
f29152ca
BP
3129static void
3130action_xlate_ctx_init(struct action_xlate_ctx *ctx,
3131 struct ofproto *ofproto, const struct flow *flow,
3132 const struct ofpbuf *packet)
064af421 3133{
f29152ca
BP
3134 ctx->ofproto = ofproto;
3135 ctx->flow = *flow;
3136 ctx->packet = packet;
7aa697dd 3137 ctx->resubmit_hook = NULL;
f29152ca 3138}
1eb0942d 3139
cdee00fd 3140static struct ofpbuf *
f29152ca
BP
3141xlate_actions(struct action_xlate_ctx *ctx,
3142 const union ofp_action *in, size_t n_in)
3143{
064af421 3144 COVERAGE_INC(ofproto_ofp2odp);
cdee00fd
BP
3145
3146 ctx->odp_actions = ofpbuf_new(512);
f29152ca
BP
3147 ctx->tags = 0;
3148 ctx->may_set_up_flow = true;
3149 ctx->nf_output_iface = NF_OUT_DROP;
3150 ctx->recurse = 0;
cdee00fd 3151 ctx->last_pop_priority = -1;
f29152ca
BP
3152 do_xlate_actions(in, n_in, ctx);
3153 remove_pop_action(ctx);
0ad9b732 3154
d6fbec6d 3155 /* Check with in-band control to see if we're allowed to set up this
0ad9b732 3156 * flow. */
cdee00fd
BP
3157 if (!in_band_rule_check(ctx->ofproto->in_band, &ctx->flow,
3158 ctx->odp_actions->data, ctx->odp_actions->size)) {
f29152ca 3159 ctx->may_set_up_flow = false;
0ad9b732
JP
3160 }
3161
cdee00fd 3162 return ctx->odp_actions;
064af421
BP
3163}
3164
9deba63b
BP
3165/* Checks whether 'ofconn' is a slave controller. If so, returns an OpenFlow
3166 * error message code (composed with ofp_mkerr()) for the caller to propagate
3167 * upward. Otherwise, returns 0.
3168 *
2228b50d 3169 * The log message mentions 'msg_type'. */
9deba63b 3170static int
2228b50d 3171reject_slave_controller(struct ofconn *ofconn, const const char *msg_type)
9deba63b 3172{
5899143f 3173 if (ofconn->type == OFCONN_PRIMARY && ofconn->role == NX_ROLE_SLAVE) {
9deba63b 3174 static struct vlog_rate_limit perm_rl = VLOG_RATE_LIMIT_INIT(1, 5);
9deba63b 3175 VLOG_WARN_RL(&perm_rl, "rejecting %s message from slave controller",
2228b50d 3176 msg_type);
9deba63b
BP
3177
3178 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM);
3179 } else {
3180 return 0;
3181 }
3182}
3183
064af421 3184static int
d1e2cf21 3185handle_packet_out(struct ofconn *ofconn, const struct ofp_header *oh)
064af421 3186{
3269c562 3187 struct ofproto *p = ofconn->ofproto;
064af421
BP
3188 struct ofp_packet_out *opo;
3189 struct ofpbuf payload, *buffer;
ac51afaf 3190 union ofp_action *ofp_actions;
f29152ca 3191 struct action_xlate_ctx ctx;
cdee00fd 3192 struct ofpbuf *odp_actions;
ac51afaf 3193 struct ofpbuf request;
ae412e7d 3194 struct flow flow;
ac51afaf 3195 size_t n_ofp_actions;
064af421 3196 uint16_t in_port;
064af421
BP
3197 int error;
3198
ac51afaf
BP
3199 COVERAGE_INC(ofproto_packet_out);
3200
2228b50d 3201 error = reject_slave_controller(ofconn, "OFPT_PACKET_OUT");
9deba63b
BP
3202 if (error) {
3203 return error;
3204 }
3205
ac51afaf 3206 /* Get ofp_packet_out. */
0bc9407d 3207 ofpbuf_use_const(&request, oh, ntohs(oh->length));
bbc32a88 3208 opo = ofpbuf_pull(&request, offsetof(struct ofp_packet_out, actions));
ac51afaf
BP
3209
3210 /* Get actions. */
3211 error = ofputil_pull_actions(&request, ntohs(opo->actions_len),
3212 &ofp_actions, &n_ofp_actions);
064af421
BP
3213 if (error) {
3214 return error;
3215 }
064af421 3216
ac51afaf 3217 /* Get payload. */
064af421
BP
3218 if (opo->buffer_id != htonl(UINT32_MAX)) {
3219 error = pktbuf_retrieve(ofconn->pktbuf, ntohl(opo->buffer_id),
3220 &buffer, &in_port);
7778bd15 3221 if (error || !buffer) {
064af421
BP
3222 return error;
3223 }
3224 payload = *buffer;
3225 } else {
ac51afaf 3226 payload = request;
064af421
BP
3227 buffer = NULL;
3228 }
3229
ac51afaf
BP
3230 /* Extract flow, check actions. */
3231 flow_extract(&payload, 0, ofp_port_to_odp_port(ntohs(opo->in_port)),
3232 &flow);
f1defbf9 3233 error = validate_actions(ofp_actions, n_ofp_actions, &flow, p->max_ports);
ac51afaf
BP
3234 if (error) {
3235 goto exit;
3236 }
3237
3238 /* Send. */
f29152ca 3239 action_xlate_ctx_init(&ctx, p, &flow, &payload);
cdee00fd
BP
3240 odp_actions = xlate_actions(&ctx, ofp_actions, n_ofp_actions);
3241 dpif_execute(p->dpif, odp_actions->data, odp_actions->size, &payload);
3242 ofpbuf_delete(odp_actions);
064af421 3243
ac51afaf
BP
3244exit:
3245 ofpbuf_delete(buffer);
3246 return 0;
064af421
BP
3247}
3248
3249static void
3250update_port_config(struct ofproto *p, struct ofport *port,
3251 uint32_t config, uint32_t mask)
3252{
3253 mask &= config ^ port->opp.config;
3254 if (mask & OFPPC_PORT_DOWN) {
3255 if (config & OFPPC_PORT_DOWN) {
3256 netdev_turn_flags_off(port->netdev, NETDEV_UP, true);
3257 } else {
3258 netdev_turn_flags_on(port->netdev, NETDEV_UP, true);
3259 }
3260 }
f1588b1f
BP
3261#define REVALIDATE_BITS (OFPPC_NO_RECV | OFPPC_NO_RECV_STP | \
3262 OFPPC_NO_FWD | OFPPC_NO_FLOOD)
064af421
BP
3263 if (mask & REVALIDATE_BITS) {
3264 COVERAGE_INC(ofproto_costly_flags);
3265 port->opp.config ^= mask & REVALIDATE_BITS;
3266 p->need_revalidate = true;
3267 }
3268#undef REVALIDATE_BITS
064af421
BP
3269 if (mask & OFPPC_NO_PACKET_IN) {
3270 port->opp.config ^= OFPPC_NO_PACKET_IN;
3271 }
3272}
3273
3274static int
d1e2cf21 3275handle_port_mod(struct ofconn *ofconn, const struct ofp_header *oh)
064af421 3276{
3269c562 3277 struct ofproto *p = ofconn->ofproto;
d1e2cf21 3278 const struct ofp_port_mod *opm = (const struct ofp_port_mod *) oh;
064af421
BP
3279 struct ofport *port;
3280 int error;
3281
2228b50d 3282 error = reject_slave_controller(ofconn, "OFPT_PORT_MOD");
9deba63b
BP
3283 if (error) {
3284 return error;
3285 }
064af421 3286
ca0f572c 3287 port = get_port(p, ofp_port_to_odp_port(ntohs(opm->port_no)));
064af421
BP
3288 if (!port) {
3289 return ofp_mkerr(OFPET_PORT_MOD_FAILED, OFPPMFC_BAD_PORT);
3290 } else if (memcmp(port->opp.hw_addr, opm->hw_addr, OFP_ETH_ALEN)) {
3291 return ofp_mkerr(OFPET_PORT_MOD_FAILED, OFPPMFC_BAD_HW_ADDR);
3292 } else {
3293 update_port_config(p, port, ntohl(opm->config), ntohl(opm->mask));
3294 if (opm->advertise) {
3295 netdev_set_advertisements(port->netdev, ntohl(opm->advertise));
3296 }
3297 }
3298 return 0;
3299}
3300
3301static struct ofpbuf *
06a5e131 3302make_ofp_stats_reply(ovs_be32 xid, ovs_be16 type, size_t body_len)
064af421
BP
3303{
3304 struct ofp_stats_reply *osr;
3305 struct ofpbuf *msg;
3306
3307 msg = ofpbuf_new(MIN(sizeof *osr + body_len, UINT16_MAX));
3308 osr = put_openflow_xid(sizeof *osr, OFPT_STATS_REPLY, xid, msg);
3309 osr->type = type;
3310 osr->flags = htons(0);
3311 return msg;
3312}
3313
3314static struct ofpbuf *
d1e2cf21 3315start_ofp_stats_reply(const struct ofp_header *request, size_t body_len)
064af421 3316{
d1e2cf21
BP
3317 const struct ofp_stats_request *osr
3318 = (const struct ofp_stats_request *) request;
3319 return make_ofp_stats_reply(osr->header.xid, osr->type, body_len);
064af421
BP
3320}
3321
3322static void *
06a5e131
BP
3323append_ofp_stats_reply(size_t nbytes, struct ofconn *ofconn,
3324 struct ofpbuf **msgp)
064af421
BP
3325{
3326 struct ofpbuf *msg = *msgp;
3327 assert(nbytes <= UINT16_MAX - sizeof(struct ofp_stats_reply));
3328 if (nbytes + msg->size > UINT16_MAX) {
3329 struct ofp_stats_reply *reply = msg->data;
3330 reply->flags = htons(OFPSF_REPLY_MORE);
06a5e131 3331 *msgp = make_ofp_stats_reply(reply->header.xid, reply->type, nbytes);
064af421
BP
3332 queue_tx(msg, ofconn, ofconn->reply_counter);
3333 }
3334 return ofpbuf_put_uninit(*msgp, nbytes);
3335}
3336
09246b99
BP
3337static struct ofpbuf *
3338make_nxstats_reply(ovs_be32 xid, ovs_be32 subtype, size_t body_len)
3339{
3340 struct nicira_stats_msg *nsm;
3341 struct ofpbuf *msg;
3342
3343 msg = ofpbuf_new(MIN(sizeof *nsm + body_len, UINT16_MAX));
3344 nsm = put_openflow_xid(sizeof *nsm, OFPT_STATS_REPLY, xid, msg);
3345 nsm->type = htons(OFPST_VENDOR);
3346 nsm->flags = htons(0);
3347 nsm->vendor = htonl(NX_VENDOR_ID);
d5f2379b 3348 nsm->subtype = subtype;
09246b99
BP
3349 return msg;
3350}
3351
3352static struct ofpbuf *
3353start_nxstats_reply(const struct nicira_stats_msg *request, size_t body_len)
3354{
3355 return make_nxstats_reply(request->header.xid, request->subtype, body_len);
3356}
3357
3358static void
3359append_nxstats_reply(size_t nbytes, struct ofconn *ofconn,
3360 struct ofpbuf **msgp)
3361{
3362 struct ofpbuf *msg = *msgp;
3363 assert(nbytes <= UINT16_MAX - sizeof(struct nicira_stats_msg));
3364 if (nbytes + msg->size > UINT16_MAX) {
3365 struct nicira_stats_msg *reply = msg->data;
3366 reply->flags = htons(OFPSF_REPLY_MORE);
3367 *msgp = make_nxstats_reply(reply->header.xid, reply->subtype, nbytes);
3368 queue_tx(msg, ofconn, ofconn->reply_counter);
3369 }
3370 ofpbuf_prealloc_tailroom(*msgp, nbytes);
3371}
3372
064af421 3373static int
3269c562 3374handle_desc_stats_request(struct ofconn *ofconn,
d1e2cf21 3375 const struct ofp_header *request)
064af421 3376{
3269c562 3377 struct ofproto *p = ofconn->ofproto;
064af421
BP
3378 struct ofp_desc_stats *ods;
3379 struct ofpbuf *msg;
3380
06a5e131
BP
3381 msg = start_ofp_stats_reply(request, sizeof *ods);
3382 ods = append_ofp_stats_reply(sizeof *ods, ofconn, &msg);
5a719c38
JP
3383 memset(ods, 0, sizeof *ods);
3384 ovs_strlcpy(ods->mfr_desc, p->mfr_desc, sizeof ods->mfr_desc);
3385 ovs_strlcpy(ods->hw_desc, p->hw_desc, sizeof ods->hw_desc);
3386 ovs_strlcpy(ods->sw_desc, p->sw_desc, sizeof ods->sw_desc);
3387 ovs_strlcpy(ods->serial_num, p->serial_desc, sizeof ods->serial_num);
3388 ovs_strlcpy(ods->dp_desc, p->dp_desc, sizeof ods->dp_desc);
064af421
BP
3389 queue_tx(msg, ofconn, ofconn->reply_counter);
3390
3391 return 0;
3392}
3393
064af421 3394static int
3269c562 3395handle_table_stats_request(struct ofconn *ofconn,
d1e2cf21 3396 const struct ofp_header *request)
064af421 3397{
3269c562 3398 struct ofproto *p = ofconn->ofproto;
064af421
BP
3399 struct ofp_table_stats *ots;
3400 struct ofpbuf *msg;
064af421 3401
06a5e131 3402 msg = start_ofp_stats_reply(request, sizeof *ots * 2);
064af421 3403
064af421 3404 /* Classifier table. */
06a5e131 3405 ots = append_ofp_stats_reply(sizeof *ots, ofconn, &msg);
064af421 3406 memset(ots, 0, sizeof *ots);
064af421 3407 strcpy(ots->name, "classifier");
b70eac89 3408 ots->wildcards = (ofconn->flow_format == NXFF_OPENFLOW10
f9bfea14 3409 ? htonl(OFPFW_ALL) : htonl(OVSFW_ALL));
ad828225 3410 ots->max_entries = htonl(1024 * 1024); /* An arbitrary big number. */
bcf84111 3411 ots->active_count = htonl(classifier_count(&p->cls));
064af421
BP
3412 ots->lookup_count = htonll(0); /* XXX */
3413 ots->matched_count = htonll(0); /* XXX */
3414
3415 queue_tx(msg, ofconn, ofconn->reply_counter);
3416 return 0;
3417}
3418
abaad8cf 3419static void
ca0f572c 3420append_port_stat(struct ofport *port, struct ofconn *ofconn,
a4948b95 3421 struct ofpbuf **msgp)
abaad8cf
JP
3422{
3423 struct netdev_stats stats;
3424 struct ofp_port_stats *ops;
3425
d295e8e9
JP
3426 /* Intentionally ignore return value, since errors will set
3427 * 'stats' to all-1s, which is correct for OpenFlow, and
abaad8cf
JP
3428 * netdev_get_stats() will log errors. */
3429 netdev_get_stats(port->netdev, &stats);
3430
06a5e131 3431 ops = append_ofp_stats_reply(sizeof *ops, ofconn, msgp);
ca0f572c 3432 ops->port_no = htons(port->opp.port_no);
abaad8cf
JP
3433 memset(ops->pad, 0, sizeof ops->pad);
3434 ops->rx_packets = htonll(stats.rx_packets);
3435 ops->tx_packets = htonll(stats.tx_packets);
3436 ops->rx_bytes = htonll(stats.rx_bytes);
3437 ops->tx_bytes = htonll(stats.tx_bytes);
3438 ops->rx_dropped = htonll(stats.rx_dropped);
3439 ops->tx_dropped = htonll(stats.tx_dropped);
3440 ops->rx_errors = htonll(stats.rx_errors);
3441 ops->tx_errors = htonll(stats.tx_errors);
3442 ops->rx_frame_err = htonll(stats.rx_frame_errors);
3443 ops->rx_over_err = htonll(stats.rx_over_errors);
3444 ops->rx_crc_err = htonll(stats.rx_crc_errors);
3445 ops->collisions = htonll(stats.collisions);
3446}
3447
064af421 3448static int
d1e2cf21 3449handle_port_stats_request(struct ofconn *ofconn, const struct ofp_header *oh)
064af421 3450{
3269c562 3451 struct ofproto *p = ofconn->ofproto;
d1e2cf21 3452 const struct ofp_port_stats_request *psr = ofputil_stats_body(oh);
064af421
BP
3453 struct ofp_port_stats *ops;
3454 struct ofpbuf *msg;
3455 struct ofport *port;
064af421 3456
d1e2cf21 3457 msg = start_ofp_stats_reply(oh, sizeof *ops * 16);
abaad8cf 3458 if (psr->port_no != htons(OFPP_NONE)) {
ca0f572c 3459 port = get_port(p, ofp_port_to_odp_port(ntohs(psr->port_no)));
abaad8cf 3460 if (port) {
ca0f572c 3461 append_port_stat(port, ofconn, &msg);
abaad8cf
JP
3462 }
3463 } else {
4e8e4213 3464 HMAP_FOR_EACH (port, hmap_node, &p->ports) {
ca0f572c 3465 append_port_stat(port, ofconn, &msg);
abaad8cf 3466 }
064af421
BP
3467 }
3468
3469 queue_tx(msg, ofconn, ofconn->reply_counter);
3470 return 0;
3471}
3472
01149cfd 3473/* Obtains statistic counters for 'rule' within 'p' and stores them into
bcf84111
BP
3474 * '*packet_countp' and '*byte_countp'. The returned statistics include
3475 * statistics for all of 'rule''s facets. */
064af421
BP
3476static void
3477query_stats(struct ofproto *p, struct rule *rule,
3478 uint64_t *packet_countp, uint64_t *byte_countp)
3479{
36956a7d 3480 uint32_t keybuf[ODPUTIL_FLOW_KEY_U32S];
064af421 3481 uint64_t packet_count, byte_count;
bcf84111 3482 struct facet *facet;
36956a7d 3483 struct ofpbuf key;
064af421 3484
01149cfd 3485 /* Start from historical data for 'rule' itself that are no longer tracked
bcf84111 3486 * by the datapath. This counts, for example, facets that have expired. */
b3137fe8
JG
3487 packet_count = rule->packet_count;
3488 byte_count = rule->byte_count;
3489
36956a7d
BP
3490 /* Ask the datapath for statistics on all of the rule's facets. (We could
3491 * batch up statistics requests using dpif_flow_get_multiple(), but that is
3492 * not yet implemented.)
01149cfd
BP
3493 *
3494 * Also, add any statistics that are not tracked by the datapath for each
bcf84111 3495 * facet. This includes, for example, statistics for packets that were
01149cfd 3496 * executed "by hand" by ofproto via dpif_execute() but must be accounted
bcf84111 3497 * to a rule. */
36956a7d 3498 ofpbuf_use_stack(&key, keybuf, sizeof keybuf);
bcf84111 3499 LIST_FOR_EACH (facet, list_node, &rule->facets) {
36956a7d 3500 struct odp_flow odp_flow;
064af421 3501
36956a7d
BP
3502 ofpbuf_clear(&key);
3503 odp_flow_key_from_flow(&key, &facet->flow);
bcf84111 3504
36956a7d
BP
3505 odp_flow.key = key.data;
3506 odp_flow.key_len = key.size;
3507 odp_flow.actions = NULL;
3508 odp_flow.actions_len = 0;
3509 odp_flow.flags = 0;
3510 if (!dpif_flow_get(p->dpif, &odp_flow)) {
3511 packet_count += odp_flow.stats.n_packets;
3512 byte_count += odp_flow.stats.n_bytes;
064af421 3513 }
36956a7d
BP
3514
3515 packet_count += facet->packet_count;
3516 byte_count += facet->byte_count;
064af421 3517 }
064af421 3518
01149cfd 3519 /* Return the stats to the caller. */
064af421
BP
3520 *packet_countp = packet_count;
3521 *byte_countp = byte_count;
3522}
3523
c6ebb8fb
BP
3524static void
3525calc_flow_duration(long long int start, ovs_be32 *sec, ovs_be32 *nsec)
3526{
3527 long long int msecs = time_msec() - start;
3528 *sec = htonl(msecs / 1000);
3529 *nsec = htonl((msecs % 1000) * (1000 * 1000));
3530}
3531
064af421 3532static void
5ecc9d81
BP
3533put_ofp_flow_stats(struct ofconn *ofconn, struct rule *rule,
3534 ovs_be16 out_port, struct ofpbuf **replyp)
064af421 3535{
064af421
BP
3536 struct ofp_flow_stats *ofs;
3537 uint64_t packet_count, byte_count;
3538 size_t act_len, len;
3539
5ecc9d81 3540 if (rule_is_hidden(rule) || !rule_has_out_port(rule, out_port)) {
064af421
BP
3541 return;
3542 }
3543
3544 act_len = sizeof *rule->actions * rule->n_actions;
3545 len = offsetof(struct ofp_flow_stats, actions) + act_len;
3546
5ecc9d81 3547 query_stats(ofconn->ofproto, rule, &packet_count, &byte_count);
064af421 3548
5ecc9d81 3549 ofs = append_ofp_stats_reply(len, ofconn, replyp);
064af421 3550 ofs->length = htons(len);
ad828225 3551 ofs->table_id = 0;
064af421 3552 ofs->pad = 0;
ff9d3826
BP
3553 ofputil_cls_rule_to_match(&rule->cr, ofconn->flow_format, &ofs->match,
3554 rule->flow_cookie, &ofs->cookie);
c6ebb8fb 3555 calc_flow_duration(rule->created, &ofs->duration_sec, &ofs->duration_nsec);
064af421
BP
3556 ofs->priority = htons(rule->cr.priority);
3557 ofs->idle_timeout = htons(rule->idle_timeout);
3558 ofs->hard_timeout = htons(rule->hard_timeout);
39997502 3559 memset(ofs->pad2, 0, sizeof ofs->pad2);
064af421
BP
3560 ofs->packet_count = htonll(packet_count);
3561 ofs->byte_count = htonll(byte_count);
3dffcf07
BP
3562 if (rule->n_actions > 0) {
3563 memcpy(ofs->actions, rule->actions, act_len);
3564 }
064af421
BP
3565}
3566
3c4486a5
BP
3567static bool
3568is_valid_table(uint8_t table_id)
064af421 3569{
3c4486a5 3570 return table_id == 0 || table_id == 0xff;
064af421
BP
3571}
3572
3573static int
d1e2cf21 3574handle_flow_stats_request(struct ofconn *ofconn, const struct ofp_header *oh)
064af421 3575{
d1e2cf21 3576 const struct ofp_flow_stats_request *fsr = ofputil_stats_body(oh);
5ecc9d81 3577 struct ofpbuf *reply;
064af421 3578
064af421 3579 COVERAGE_INC(ofproto_flows_req);
d1e2cf21 3580 reply = start_ofp_stats_reply(oh, 1024);
3c4486a5 3581 if (is_valid_table(fsr->table_id)) {
5ecc9d81 3582 struct cls_cursor cursor;
3c4486a5 3583 struct cls_rule target;
5ecc9d81 3584 struct rule *rule;
3c4486a5 3585
d8ae4d67
BP
3586 ofputil_cls_rule_from_match(&fsr->match, 0, NXFF_OPENFLOW10, 0,
3587 &target);
5ecc9d81
BP
3588 cls_cursor_init(&cursor, &ofconn->ofproto->cls, &target);
3589 CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
3590 put_ofp_flow_stats(ofconn, rule, fsr->out_port, &reply);
3591 }
3c4486a5 3592 }
5ecc9d81 3593 queue_tx(reply, ofconn, ofconn->reply_counter);
3c4486a5 3594
064af421
BP
3595 return 0;
3596}
3597
09246b99 3598static void
5ecc9d81
BP
3599put_nx_flow_stats(struct ofconn *ofconn, struct rule *rule,
3600 ovs_be16 out_port, struct ofpbuf **replyp)
09246b99 3601{
09246b99
BP
3602 struct nx_flow_stats *nfs;
3603 uint64_t packet_count, byte_count;
3604 size_t act_len, start_len;
5ecc9d81 3605 struct ofpbuf *reply;
09246b99 3606
5ecc9d81 3607 if (rule_is_hidden(rule) || !rule_has_out_port(rule, out_port)) {
09246b99
BP
3608 return;
3609 }
3610
5ecc9d81 3611 query_stats(ofconn->ofproto, rule, &packet_count, &byte_count);
09246b99
BP
3612
3613 act_len = sizeof *rule->actions * rule->n_actions;
3614
5ecc9d81 3615 append_nxstats_reply(sizeof *nfs + NXM_MAX_LEN + act_len, ofconn, replyp);
1dfee98d 3616 start_len = (*replyp)->size;
5ecc9d81
BP
3617 reply = *replyp;
3618
3619 nfs = ofpbuf_put_uninit(reply, sizeof *nfs);
09246b99
BP
3620 nfs->table_id = 0;
3621 nfs->pad = 0;
3622 calc_flow_duration(rule->created, &nfs->duration_sec, &nfs->duration_nsec);
3623 nfs->cookie = rule->flow_cookie;
3624 nfs->priority = htons(rule->cr.priority);
3625 nfs->idle_timeout = htons(rule->idle_timeout);
3626 nfs->hard_timeout = htons(rule->hard_timeout);
5ecc9d81 3627 nfs->match_len = htons(nx_put_match(reply, &rule->cr));
09246b99
BP
3628 memset(nfs->pad2, 0, sizeof nfs->pad2);
3629 nfs->packet_count = htonll(packet_count);
3630 nfs->byte_count = htonll(byte_count);
3631 if (rule->n_actions > 0) {
5ecc9d81 3632 ofpbuf_put(reply, rule->actions, act_len);
09246b99 3633 }
5ecc9d81 3634 nfs->length = htons(reply->size - start_len);
09246b99
BP
3635}
3636
3637static int
d1e2cf21 3638handle_nxst_flow(struct ofconn *ofconn, const struct ofp_header *oh)
09246b99
BP
3639{
3640 struct nx_flow_stats_request *nfsr;
09246b99 3641 struct cls_rule target;
5ecc9d81 3642 struct ofpbuf *reply;
d1e2cf21 3643 struct ofpbuf b;
09246b99
BP
3644 int error;
3645
0bc9407d 3646 ofpbuf_use_const(&b, oh, ntohs(oh->length));
d1e2cf21 3647
09246b99 3648 /* Dissect the message. */
bbc32a88 3649 nfsr = ofpbuf_pull(&b, sizeof *nfsr);
d1e2cf21 3650 error = nx_pull_match(&b, ntohs(nfsr->match_len), 0, &target);
09246b99
BP
3651 if (error) {
3652 return error;
3653 }
d1e2cf21
BP
3654 if (b.size) {
3655 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3656 }
09246b99
BP
3657
3658 COVERAGE_INC(ofproto_flows_req);
5ecc9d81 3659 reply = start_nxstats_reply(&nfsr->nsm, 1024);
3c4486a5 3660 if (is_valid_table(nfsr->table_id)) {
5ecc9d81
BP
3661 struct cls_cursor cursor;
3662 struct rule *rule;
3663
3664 cls_cursor_init(&cursor, &ofconn->ofproto->cls, &target);
3665 CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
3666 put_nx_flow_stats(ofconn, rule, nfsr->out_port, &reply);
3667 }
3c4486a5 3668 }
5ecc9d81
BP
3669 queue_tx(reply, ofconn, ofconn->reply_counter);
3670
09246b99
BP
3671 return 0;
3672}
3673
4f2cad2c 3674static void
5ecc9d81 3675flow_stats_ds(struct ofproto *ofproto, struct rule *rule, struct ds *results)
4f2cad2c 3676{
4f2cad2c
JP
3677 uint64_t packet_count, byte_count;
3678 size_t act_len = sizeof *rule->actions * rule->n_actions;
3679
5ecc9d81 3680 query_stats(ofproto, rule, &packet_count, &byte_count);
4f2cad2c
JP
3681
3682 ds_put_format(results, "duration=%llds, ",
3683 (time_msec() - rule->created) / 1000);
52ae00b3 3684 ds_put_format(results, "priority=%u, ", rule->cr.priority);
4f2cad2c
JP
3685 ds_put_format(results, "n_packets=%"PRIu64", ", packet_count);
3686 ds_put_format(results, "n_bytes=%"PRIu64", ", byte_count);
cb833cf6 3687 cls_rule_format(&rule->cr, results);
3dffcf07
BP
3688 if (act_len > 0) {
3689 ofp_print_actions(results, &rule->actions->header, act_len);
3c8552c1
JP
3690 } else {
3691 ds_put_cstr(results, "drop");
3dffcf07 3692 }
4f2cad2c
JP
3693 ds_put_cstr(results, "\n");
3694}
3695
d295e8e9 3696/* Adds a pretty-printed description of all flows to 'results', including
4f2cad2c
JP
3697 * those marked hidden by secchan (e.g., by in-band control). */
3698void
3699ofproto_get_all_flows(struct ofproto *p, struct ds *results)
3700{
5ecc9d81
BP
3701 struct cls_cursor cursor;
3702 struct rule *rule;
064af421 3703
5ecc9d81
BP
3704 cls_cursor_init(&cursor, &p->cls, NULL);
3705 CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
3706 flow_stats_ds(p, rule, results);
064af421 3707 }
064af421
BP
3708}
3709
27d34fce
BP
3710static void
3711query_aggregate_stats(struct ofproto *ofproto, struct cls_rule *target,
734bbeb4 3712 ovs_be16 out_port, uint8_t table_id,
27d34fce
BP
3713 struct ofp_aggregate_stats_reply *oasr)
3714{
5ecc9d81
BP
3715 uint64_t total_packets = 0;
3716 uint64_t total_bytes = 0;
3717 int n_flows = 0;
27d34fce
BP
3718
3719 COVERAGE_INC(ofproto_agg_request);
5ecc9d81 3720
3c4486a5 3721 if (is_valid_table(table_id)) {
5ecc9d81
BP
3722 struct cls_cursor cursor;
3723 struct rule *rule;
3c4486a5 3724
5ecc9d81
BP
3725 cls_cursor_init(&cursor, &ofproto->cls, target);
3726 CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
3727 if (!rule_is_hidden(rule) && rule_has_out_port(rule, out_port)) {
3728 uint64_t packet_count;
3729 uint64_t byte_count;
3730
3731 query_stats(ofproto, rule, &packet_count, &byte_count);
3732
3733 total_packets += packet_count;
3734 total_bytes += byte_count;
3735 n_flows++;
3736 }
3737 }
3c4486a5 3738 }
27d34fce 3739
5ecc9d81
BP
3740 oasr->flow_count = htonl(n_flows);
3741 oasr->packet_count = htonll(total_packets);
3742 oasr->byte_count = htonll(total_bytes);
27d34fce
BP
3743 memset(oasr->pad, 0, sizeof oasr->pad);
3744}
3745
064af421 3746static int
3269c562 3747handle_aggregate_stats_request(struct ofconn *ofconn,
d1e2cf21 3748 const struct ofp_header *oh)
064af421 3749{
d1e2cf21 3750 const struct ofp_aggregate_stats_request *request = ofputil_stats_body(oh);
064af421 3751 struct ofp_aggregate_stats_reply *reply;
064af421
BP
3752 struct cls_rule target;
3753 struct ofpbuf *msg;
3754
d8ae4d67
BP
3755 ofputil_cls_rule_from_match(&request->match, 0, NXFF_OPENFLOW10, 0,
3756 &target);
064af421 3757
d1e2cf21 3758 msg = start_ofp_stats_reply(oh, sizeof *reply);
06a5e131 3759 reply = append_ofp_stats_reply(sizeof *reply, ofconn, &msg);
27d34fce
BP
3760 query_aggregate_stats(ofconn->ofproto, &target, request->out_port,
3761 request->table_id, reply);
064af421
BP
3762 queue_tx(msg, ofconn, ofconn->reply_counter);
3763 return 0;
3764}
3765
09246b99 3766static int
d1e2cf21 3767handle_nxst_aggregate(struct ofconn *ofconn, const struct ofp_header *oh)
09246b99
BP
3768{
3769 struct nx_aggregate_stats_request *request;
3770 struct ofp_aggregate_stats_reply *reply;
3771 struct cls_rule target;
d1e2cf21 3772 struct ofpbuf b;
09246b99
BP
3773 struct ofpbuf *buf;
3774 int error;
3775
0bc9407d 3776 ofpbuf_use_const(&b, oh, ntohs(oh->length));
d1e2cf21 3777
09246b99 3778 /* Dissect the message. */
bbc32a88 3779 request = ofpbuf_pull(&b, sizeof *request);
d1e2cf21 3780 error = nx_pull_match(&b, ntohs(request->match_len), 0, &target);
09246b99
BP
3781 if (error) {
3782 return error;
3783 }
d1e2cf21
BP
3784 if (b.size) {
3785 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3786 }
09246b99
BP
3787
3788 /* Reply. */
3789 COVERAGE_INC(ofproto_flows_req);
3790 buf = start_nxstats_reply(&request->nsm, sizeof *reply);
3791 reply = ofpbuf_put_uninit(buf, sizeof *reply);
3792 query_aggregate_stats(ofconn->ofproto, &target, request->out_port,
3793 request->table_id, reply);
3794 queue_tx(buf, ofconn, ofconn->reply_counter);
3795
3796 return 0;
3797}
3798
c1c9c9c4
BP
3799struct queue_stats_cbdata {
3800 struct ofconn *ofconn;
ca0f572c 3801 struct ofport *ofport;
c1c9c9c4 3802 struct ofpbuf *msg;
c1c9c9c4
BP
3803};
3804
3805static void
db9220c3 3806put_queue_stats(struct queue_stats_cbdata *cbdata, uint32_t queue_id,
c1c9c9c4
BP
3807 const struct netdev_queue_stats *stats)
3808{
3809 struct ofp_queue_stats *reply;
3810
06a5e131 3811 reply = append_ofp_stats_reply(sizeof *reply, cbdata->ofconn, &cbdata->msg);
ca0f572c 3812 reply->port_no = htons(cbdata->ofport->opp.port_no);
c1c9c9c4
BP
3813 memset(reply->pad, 0, sizeof reply->pad);
3814 reply->queue_id = htonl(queue_id);
3815 reply->tx_bytes = htonll(stats->tx_bytes);
3816 reply->tx_packets = htonll(stats->tx_packets);
3817 reply->tx_errors = htonll(stats->tx_errors);
3818}
3819
3820static void
db9220c3 3821handle_queue_stats_dump_cb(uint32_t queue_id,
c1c9c9c4
BP
3822 struct netdev_queue_stats *stats,
3823 void *cbdata_)
3824{
3825 struct queue_stats_cbdata *cbdata = cbdata_;
3826
3827 put_queue_stats(cbdata, queue_id, stats);
3828}
3829
3830static void
ca0f572c 3831handle_queue_stats_for_port(struct ofport *port, uint32_t queue_id,
c1c9c9c4
BP
3832 struct queue_stats_cbdata *cbdata)
3833{
ca0f572c 3834 cbdata->ofport = port;
c1c9c9c4
BP
3835 if (queue_id == OFPQ_ALL) {
3836 netdev_dump_queue_stats(port->netdev,
3837 handle_queue_stats_dump_cb, cbdata);
3838 } else {
3839 struct netdev_queue_stats stats;
3840
1ac788f6
BP
3841 if (!netdev_get_queue_stats(port->netdev, queue_id, &stats)) {
3842 put_queue_stats(cbdata, queue_id, &stats);
3843 }
c1c9c9c4
BP
3844 }
3845}
3846
3847static int
d1e2cf21 3848handle_queue_stats_request(struct ofconn *ofconn, const struct ofp_header *oh)
c1c9c9c4 3849{
3269c562 3850 struct ofproto *ofproto = ofconn->ofproto;
d1e2cf21 3851 const struct ofp_queue_stats_request *qsr;
c1c9c9c4
BP
3852 struct queue_stats_cbdata cbdata;
3853 struct ofport *port;
3854 unsigned int port_no;
3855 uint32_t queue_id;
3856
d1e2cf21
BP
3857 qsr = ofputil_stats_body(oh);
3858 if (!qsr) {
c1c9c9c4
BP
3859 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
3860 }
c1c9c9c4
BP
3861
3862 COVERAGE_INC(ofproto_queue_req);
3863
3864 cbdata.ofconn = ofconn;
d1e2cf21 3865 cbdata.msg = start_ofp_stats_reply(oh, 128);
c1c9c9c4
BP
3866
3867 port_no = ntohs(qsr->port_no);
3868 queue_id = ntohl(qsr->queue_id);
3869 if (port_no == OFPP_ALL) {
4e8e4213 3870 HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) {
ca0f572c 3871 handle_queue_stats_for_port(port, queue_id, &cbdata);
c1c9c9c4
BP
3872 }
3873 } else if (port_no < ofproto->max_ports) {
ca0f572c 3874 port = get_port(ofproto, ofp_port_to_odp_port(port_no));
c1c9c9c4 3875 if (port) {
ca0f572c 3876 handle_queue_stats_for_port(port, queue_id, &cbdata);
c1c9c9c4
BP
3877 }
3878 } else {
3879 ofpbuf_delete(cbdata.msg);
3880 return ofp_mkerr(OFPET_QUEUE_OP_FAILED, OFPQOFC_BAD_PORT);
3881 }
3882 queue_tx(cbdata.msg, ofconn, ofconn->reply_counter);
3883
3884 return 0;
3885}
3886
064af421
BP
3887static long long int
3888msec_from_nsec(uint64_t sec, uint32_t nsec)
3889{
3890 return !sec ? 0 : sec * 1000 + nsec / 1000000;
3891}
3892
3893static void
bcf84111
BP
3894facet_update_time(struct ofproto *ofproto, struct facet *facet,
3895 const struct odp_flow_stats *stats)
064af421
BP
3896{
3897 long long int used = msec_from_nsec(stats->used_sec, stats->used_nsec);
bcf84111
BP
3898 if (used > facet->used) {
3899 facet->used = used;
3900 if (used > facet->rule->used) {
3901 facet->rule->used = used;
4836f9f2 3902 }
bcf84111 3903 netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, used);
064af421
BP
3904 }
3905}
3906
bcf84111
BP
3907/* Folds the statistics from 'stats' into the counters in 'facet'.
3908 *
3909 * Because of the meaning of a facet's counters, it only makes sense to do this
3910 * if 'stats' are not tracked in the datapath, that is, if 'stats' represents a
3911 * packet that was sent by hand or if it represents statistics that have been
3912 * cleared out of the datapath. */
064af421 3913static void
bcf84111
BP
3914facet_update_stats(struct ofproto *ofproto, struct facet *facet,
3915 const struct odp_flow_stats *stats)
064af421 3916{
064af421 3917 if (stats->n_packets) {
bcf84111
BP
3918 facet_update_time(ofproto, facet, stats);
3919 facet->packet_count += stats->n_packets;
3920 facet->byte_count += stats->n_bytes;
3921 netflow_flow_update_flags(&facet->nf_flow, stats->tcp_flags);
064af421
BP
3922 }
3923}
3924
79eee1eb
BP
3925/* Implements OFPFC_ADD and the cases for OFPFC_MODIFY and OFPFC_MODIFY_STRICT
3926 * in which no matching flow already exists in the flow table.
3927 *
3928 * Adds the flow specified by 'ofm', which is followed by 'n_actions'
3269c562
BP
3929 * ofp_actions, to ofconn->ofproto's flow table. Returns 0 on success or an
3930 * OpenFlow error code as encoded by ofp_mkerr() on failure.
79eee1eb
BP
3931 *
3932 * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
3933 * if any. */
064af421 3934static int
3052b0c5 3935add_flow(struct ofconn *ofconn, struct flow_mod *fm)
064af421 3936{
3269c562 3937 struct ofproto *p = ofconn->ofproto;
064af421
BP
3938 struct ofpbuf *packet;
3939 struct rule *rule;
3940 uint16_t in_port;
3941 int error;
3942
3052b0c5
BP
3943 if (fm->flags & OFPFF_CHECK_OVERLAP
3944 && classifier_rule_overlaps(&p->cls, &fm->cr)) {
3945 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_OVERLAP);
49bdc010
JP
3946 }
3947
064af421 3948 error = 0;
3052b0c5
BP
3949 if (fm->buffer_id != UINT32_MAX) {
3950 error = pktbuf_retrieve(ofconn->pktbuf, fm->buffer_id,
064af421 3951 &packet, &in_port);
212fe71c
BP
3952 } else {
3953 packet = NULL;
165cd8a3 3954 in_port = UINT16_MAX;
064af421
BP
3955 }
3956
bcf84111
BP
3957 rule = rule_create(&fm->cr, fm->actions, fm->n_actions,
3958 fm->idle_timeout, fm->hard_timeout, fm->cookie,
3959 fm->flags & OFPFF_SEND_FLOW_REM);
afe75089
BP
3960 rule_insert(p, rule);
3961 if (packet) {
3962 rule_execute(p, rule, in_port, packet);
3963 }
064af421
BP
3964 return error;
3965}
3966
79eee1eb 3967static struct rule *
3052b0c5 3968find_flow_strict(struct ofproto *p, const struct flow_mod *fm)
064af421 3969{
3052b0c5 3970 return rule_from_cls_rule(classifier_find_rule_exactly(&p->cls, &fm->cr));
79eee1eb 3971}
064af421 3972
79eee1eb 3973static int
3269c562 3974send_buffered_packet(struct ofconn *ofconn,
3052b0c5 3975 struct rule *rule, uint32_t buffer_id)
79eee1eb
BP
3976{
3977 struct ofpbuf *packet;
3978 uint16_t in_port;
79eee1eb 3979 int error;
064af421 3980
3052b0c5 3981 if (buffer_id == UINT32_MAX) {
79eee1eb 3982 return 0;
064af421 3983 }
79eee1eb 3984
3052b0c5 3985 error = pktbuf_retrieve(ofconn->pktbuf, buffer_id, &packet, &in_port);
79eee1eb
BP
3986 if (error) {
3987 return error;
3988 }
3989
bcf84111 3990 rule_execute(ofconn->ofproto, rule, in_port, packet);
79eee1eb 3991
064af421
BP
3992 return 0;
3993}
79eee1eb
BP
3994\f
3995/* OFPFC_MODIFY and OFPFC_MODIFY_STRICT. */
064af421
BP
3996
3997struct modify_flows_cbdata {
3998 struct ofproto *ofproto;
3052b0c5 3999 const struct flow_mod *fm;
79eee1eb 4000 struct rule *match;
064af421
BP
4001};
4002
3052b0c5
BP
4003static int modify_flow(struct ofproto *, const struct flow_mod *,
4004 struct rule *);
79eee1eb
BP
4005
4006/* Implements OFPFC_MODIFY. Returns 0 on success or an OpenFlow error code as
4007 * encoded by ofp_mkerr() on failure.
4008 *
4009 * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
4010 * if any. */
4011static int
3052b0c5 4012modify_flows_loose(struct ofconn *ofconn, struct flow_mod *fm)
79eee1eb 4013{
5ecc9d81
BP
4014 struct ofproto *p = ofconn->ofproto;
4015 struct rule *match = NULL;
4016 struct cls_cursor cursor;
4017 struct rule *rule;
79eee1eb 4018
5ecc9d81
BP
4019 cls_cursor_init(&cursor, &p->cls, &fm->cr);
4020 CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
4021 if (!rule_is_hidden(rule)) {
4022 match = rule;
4023 modify_flow(p, fm, rule);
4024 }
4025 }
79eee1eb 4026
5ecc9d81 4027 if (match) {
d6302b0f
BP
4028 /* This credits the packet to whichever flow happened to match last.
4029 * That's weird. Maybe we should do a lookup for the flow that
4030 * actually matches the packet? Who knows. */
5ecc9d81 4031 send_buffered_packet(ofconn, match, fm->buffer_id);
79eee1eb
BP
4032 return 0;
4033 } else {
3052b0c5 4034 return add_flow(ofconn, fm);
79eee1eb
BP
4035 }
4036}
4037
4038/* Implements OFPFC_MODIFY_STRICT. Returns 0 on success or an OpenFlow error
4039 * code as encoded by ofp_mkerr() on failure.
4040 *
4041 * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
4042 * if any. */
4043static int
3052b0c5 4044modify_flow_strict(struct ofconn *ofconn, struct flow_mod *fm)
79eee1eb 4045{
3052b0c5
BP
4046 struct ofproto *p = ofconn->ofproto;
4047 struct rule *rule = find_flow_strict(p, fm);
79eee1eb 4048 if (rule && !rule_is_hidden(rule)) {
3052b0c5
BP
4049 modify_flow(p, fm, rule);
4050 return send_buffered_packet(ofconn, rule, fm->buffer_id);
79eee1eb 4051 } else {
3052b0c5 4052 return add_flow(ofconn, fm);
79eee1eb
BP
4053 }
4054}
4055
79eee1eb
BP
4056/* Implements core of OFPFC_MODIFY and OFPFC_MODIFY_STRICT where 'rule' has
4057 * been identified as a flow in 'p''s flow table to be modified, by changing
4058 * the rule's actions to match those in 'ofm' (which is followed by 'n_actions'
4059 * ofp_action[] structures). */
064af421 4060static int
3052b0c5 4061modify_flow(struct ofproto *p, const struct flow_mod *fm, struct rule *rule)
064af421 4062{
3052b0c5 4063 size_t actions_len = fm->n_actions * sizeof *rule->actions;
79eee1eb 4064
3052b0c5 4065 rule->flow_cookie = fm->cookie;
79eee1eb
BP
4066
4067 /* If the actions are the same, do nothing. */
3052b0c5
BP
4068 if (fm->n_actions == rule->n_actions
4069 && (!fm->n_actions
4070 || !memcmp(fm->actions, rule->actions, actions_len))) {
79eee1eb
BP
4071 return 0;
4072 }
4073
4074 /* Replace actions. */
4075 free(rule->actions);
3052b0c5
BP
4076 rule->actions = fm->n_actions ? xmemdup(fm->actions, actions_len) : NULL;
4077 rule->n_actions = fm->n_actions;
79eee1eb 4078
bcf84111 4079 p->need_revalidate = true;
79eee1eb
BP
4080
4081 return 0;
4082}
4083\f
4084/* OFPFC_DELETE implementation. */
4085
8054fc48 4086static void delete_flow(struct ofproto *, struct rule *, ovs_be16 out_port);
79eee1eb
BP
4087
4088/* Implements OFPFC_DELETE. */
4089static void
3052b0c5 4090delete_flows_loose(struct ofproto *p, const struct flow_mod *fm)
79eee1eb 4091{
5ecc9d81
BP
4092 struct rule *rule, *next_rule;
4093 struct cls_cursor cursor;
064af421 4094
5ecc9d81
BP
4095 cls_cursor_init(&cursor, &p->cls, &fm->cr);
4096 CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cr, &cursor) {
4097 delete_flow(p, rule, htons(fm->out_port));
4098 }
064af421
BP
4099}
4100
79eee1eb
BP
4101/* Implements OFPFC_DELETE_STRICT. */
4102static void
3052b0c5 4103delete_flow_strict(struct ofproto *p, struct flow_mod *fm)
79eee1eb 4104{
3052b0c5 4105 struct rule *rule = find_flow_strict(p, fm);
79eee1eb 4106 if (rule) {
3052b0c5 4107 delete_flow(p, rule, htons(fm->out_port));
79eee1eb
BP
4108 }
4109}
4110
79eee1eb
BP
4111/* Implements core of OFPFC_DELETE and OFPFC_DELETE_STRICT where 'rule' has
4112 * been identified as a flow to delete from 'p''s flow table, by deleting the
4113 * flow and sending out a OFPT_FLOW_REMOVED message to any interested
4114 * controller.
4115 *
4116 * Will not delete 'rule' if it is hidden. Will delete 'rule' only if
4117 * 'out_port' is htons(OFPP_NONE) or if 'rule' actually outputs to the
4118 * specified 'out_port'. */
4119static void
8054fc48 4120delete_flow(struct ofproto *p, struct rule *rule, ovs_be16 out_port)
79eee1eb
BP
4121{
4122 if (rule_is_hidden(rule)) {
4123 return;
4124 }
4125
4126 if (out_port != htons(OFPP_NONE) && !rule_has_out_port(rule, out_port)) {
4127 return;
4128 }
4129
bcf84111 4130 rule_send_removed(p, rule, OFPRR_DELETE);
79eee1eb
BP
4131 rule_remove(p, rule);
4132}
4133\f
064af421 4134static int
2e4f5fcf 4135handle_flow_mod(struct ofconn *ofconn, const struct ofp_header *oh)
064af421 4136{
3052b0c5 4137 struct ofproto *p = ofconn->ofproto;
2e4f5fcf 4138 struct flow_mod fm;
064af421
BP
4139 int error;
4140
3052b0c5 4141 error = reject_slave_controller(ofconn, "flow_mod");
9deba63b
BP
4142 if (error) {
4143 return error;
4144 }
3052b0c5 4145
2e4f5fcf 4146 error = ofputil_decode_flow_mod(&fm, oh, ofconn->flow_format);
064af421
BP
4147 if (error) {
4148 return error;
4149 }
4150
2e4f5fcf
BP
4151 /* We do not support the emergency flow cache. It will hopefully get
4152 * dropped from OpenFlow in the near future. */
4153 if (fm.flags & OFPFF_EMERG) {
49bdc010
JP
4154 /* There isn't a good fit for an error code, so just state that the
4155 * flow table is full. */
4156 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_ALL_TABLES_FULL);
4157 }
4158
2e4f5fcf
BP
4159 error = validate_actions(fm.actions, fm.n_actions,
4160 &fm.cr.flow, p->max_ports);
4161 if (error) {
4162 return error;
4163 }
4164
4165 switch (fm.command) {
3052b0c5 4166 case OFPFC_ADD:
2e4f5fcf 4167 return add_flow(ofconn, &fm);
3052b0c5
BP
4168
4169 case OFPFC_MODIFY:
2e4f5fcf 4170 return modify_flows_loose(ofconn, &fm);
3052b0c5
BP
4171
4172 case OFPFC_MODIFY_STRICT:
2e4f5fcf 4173 return modify_flow_strict(ofconn, &fm);
3052b0c5
BP
4174
4175 case OFPFC_DELETE:
2e4f5fcf 4176 delete_flows_loose(p, &fm);
3052b0c5
BP
4177 return 0;
4178
4179 case OFPFC_DELETE_STRICT:
2e4f5fcf 4180 delete_flow_strict(p, &fm);
3052b0c5
BP
4181 return 0;
4182
4183 default:
4184 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_BAD_COMMAND);
4185 }
4186}
4187
659586ef 4188static int
d1e2cf21 4189handle_tun_id_from_cookie(struct ofconn *ofconn, const struct ofp_header *oh)
659586ef 4190{
d1e2cf21
BP
4191 const struct nxt_tun_id_cookie *msg
4192 = (const struct nxt_tun_id_cookie *) oh;
659586ef 4193
b70eac89 4194 ofconn->flow_format = msg->set ? NXFF_TUN_ID_FROM_COOKIE : NXFF_OPENFLOW10;
659586ef
JG
4195 return 0;
4196}
4197
9deba63b 4198static int
d1e2cf21 4199handle_role_request(struct ofconn *ofconn, const struct ofp_header *oh)
9deba63b 4200{
d1e2cf21 4201 struct nx_role_request *nrr = (struct nx_role_request *) oh;
9deba63b
BP
4202 struct nx_role_request *reply;
4203 struct ofpbuf *buf;
4204 uint32_t role;
4205
5899143f 4206 if (ofconn->type != OFCONN_PRIMARY) {
9deba63b
BP
4207 VLOG_WARN_RL(&rl, "ignoring role request on non-controller "
4208 "connection");
4209 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM);
4210 }
4211
4212 role = ntohl(nrr->role);
4213 if (role != NX_ROLE_OTHER && role != NX_ROLE_MASTER
4214 && role != NX_ROLE_SLAVE) {
4215 VLOG_WARN_RL(&rl, "received request for unknown role %"PRIu32, role);
4216
4217 /* There's no good error code for this. */
4218 return ofp_mkerr(OFPET_BAD_REQUEST, -1);
4219 }
4220
4221 if (role == NX_ROLE_MASTER) {
4222 struct ofconn *other;
4223
3269c562 4224 HMAP_FOR_EACH (other, hmap_node, &ofconn->ofproto->controllers) {
9deba63b
BP
4225 if (other->role == NX_ROLE_MASTER) {
4226 other->role = NX_ROLE_SLAVE;
4227 }
4228 }
4229 }
4230 ofconn->role = role;
4231
d1e2cf21 4232 reply = make_nxmsg_xid(sizeof *reply, NXT_ROLE_REPLY, oh->xid, &buf);
9deba63b
BP
4233 reply->role = htonl(role);
4234 queue_tx(buf, ofconn, ofconn->reply_counter);
4235
4236 return 0;
4237}
4238
09246b99 4239static int
d1e2cf21 4240handle_nxt_set_flow_format(struct ofconn *ofconn, const struct ofp_header *oh)
09246b99 4241{
d1e2cf21
BP
4242 const struct nxt_set_flow_format *msg
4243 = (const struct nxt_set_flow_format *) oh;
09246b99 4244 uint32_t format;
09246b99
BP
4245
4246 format = ntohl(msg->format);
4247 if (format == NXFF_OPENFLOW10
4248 || format == NXFF_TUN_ID_FROM_COOKIE
4249 || format == NXFF_NXM) {
4250 ofconn->flow_format = format;
4251 return 0;
4252 } else {
4253 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM);
4254 }
4255}
4256
064af421 4257static int
d1e2cf21 4258handle_barrier_request(struct ofconn *ofconn, const struct ofp_header *oh)
246e61ea
JP
4259{
4260 struct ofp_header *ob;
4261 struct ofpbuf *buf;
4262
4263 /* Currently, everything executes synchronously, so we can just
4264 * immediately send the barrier reply. */
4265 ob = make_openflow_xid(sizeof *ob, OFPT_BARRIER_REPLY, oh->xid, &buf);
4266 queue_tx(buf, ofconn, ofconn->reply_counter);
4267 return 0;
4268}
4269
d1e2cf21
BP
4270static int
4271handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg)
064af421 4272{
d1e2cf21
BP
4273 const struct ofp_header *oh = msg->data;
4274 const struct ofputil_msg_type *type;
064af421
BP
4275 int error;
4276
d1e2cf21
BP
4277 error = ofputil_decode_msg_type(oh, &type);
4278 if (error) {
4279 return error;
4280 }
064af421 4281
d1e2cf21
BP
4282 switch (ofputil_msg_type_code(type)) {
4283 /* OpenFlow requests. */
4284 case OFPUTIL_OFPT_ECHO_REQUEST:
4285 return handle_echo_request(ofconn, oh);
064af421 4286
d1e2cf21
BP
4287 case OFPUTIL_OFPT_FEATURES_REQUEST:
4288 return handle_features_request(ofconn, oh);
064af421 4289
d1e2cf21
BP
4290 case OFPUTIL_OFPT_GET_CONFIG_REQUEST:
4291 return handle_get_config_request(ofconn, oh);
064af421 4292
d1e2cf21
BP
4293 case OFPUTIL_OFPT_SET_CONFIG:
4294 return handle_set_config(ofconn, msg->data);
064af421 4295
d1e2cf21
BP
4296 case OFPUTIL_OFPT_PACKET_OUT:
4297 return handle_packet_out(ofconn, oh);
064af421 4298
d1e2cf21
BP
4299 case OFPUTIL_OFPT_PORT_MOD:
4300 return handle_port_mod(ofconn, oh);
064af421 4301
d1e2cf21 4302 case OFPUTIL_OFPT_FLOW_MOD:
2e4f5fcf 4303 return handle_flow_mod(ofconn, oh);
064af421 4304
d1e2cf21
BP
4305 case OFPUTIL_OFPT_BARRIER_REQUEST:
4306 return handle_barrier_request(ofconn, oh);
064af421 4307
d1e2cf21
BP
4308 /* OpenFlow replies. */
4309 case OFPUTIL_OFPT_ECHO_REPLY:
4310 return 0;
246e61ea 4311
d1e2cf21
BP
4312 /* Nicira extension requests. */
4313 case OFPUTIL_NXT_STATUS_REQUEST:
4314 return switch_status_handle_request(
4315 ofconn->ofproto->switch_status, ofconn->rconn, oh);
4316
4317 case OFPUTIL_NXT_TUN_ID_FROM_COOKIE:
4318 return handle_tun_id_from_cookie(ofconn, oh);
4319
4320 case OFPUTIL_NXT_ROLE_REQUEST:
4321 return handle_role_request(ofconn, oh);
4322
4323 case OFPUTIL_NXT_SET_FLOW_FORMAT:
4324 return handle_nxt_set_flow_format(ofconn, oh);
4325
4326 case OFPUTIL_NXT_FLOW_MOD:
2e4f5fcf 4327 return handle_flow_mod(ofconn, oh);
d1e2cf21
BP
4328
4329 /* OpenFlow statistics requests. */
4330 case OFPUTIL_OFPST_DESC_REQUEST:
4331 return handle_desc_stats_request(ofconn, oh);
4332
4333 case OFPUTIL_OFPST_FLOW_REQUEST:
4334 return handle_flow_stats_request(ofconn, oh);
4335
4336 case OFPUTIL_OFPST_AGGREGATE_REQUEST:
4337 return handle_aggregate_stats_request(ofconn, oh);
4338
4339 case OFPUTIL_OFPST_TABLE_REQUEST:
4340 return handle_table_stats_request(ofconn, oh);
4341
4342 case OFPUTIL_OFPST_PORT_REQUEST:
4343 return handle_port_stats_request(ofconn, oh);
4344
4345 case OFPUTIL_OFPST_QUEUE_REQUEST:
4346 return handle_queue_stats_request(ofconn, oh);
4347
4348 /* Nicira extension statistics requests. */
4349 case OFPUTIL_NXST_FLOW_REQUEST:
4350 return handle_nxst_flow(ofconn, oh);
4351
4352 case OFPUTIL_NXST_AGGREGATE_REQUEST:
4353 return handle_nxst_aggregate(ofconn, oh);
4354
4355 case OFPUTIL_INVALID:
4356 case OFPUTIL_OFPT_HELLO:
4357 case OFPUTIL_OFPT_ERROR:
4358 case OFPUTIL_OFPT_FEATURES_REPLY:
4359 case OFPUTIL_OFPT_GET_CONFIG_REPLY:
4360 case OFPUTIL_OFPT_PACKET_IN:
4361 case OFPUTIL_OFPT_FLOW_REMOVED:
4362 case OFPUTIL_OFPT_PORT_STATUS:
4363 case OFPUTIL_OFPT_BARRIER_REPLY:
4364 case OFPUTIL_OFPT_QUEUE_GET_CONFIG_REQUEST:
4365 case OFPUTIL_OFPT_QUEUE_GET_CONFIG_REPLY:
4366 case OFPUTIL_OFPST_DESC_REPLY:
4367 case OFPUTIL_OFPST_FLOW_REPLY:
4368 case OFPUTIL_OFPST_QUEUE_REPLY:
4369 case OFPUTIL_OFPST_PORT_REPLY:
4370 case OFPUTIL_OFPST_TABLE_REPLY:
4371 case OFPUTIL_OFPST_AGGREGATE_REPLY:
4372 case OFPUTIL_NXT_STATUS_REPLY:
4373 case OFPUTIL_NXT_ROLE_REPLY:
4374 case OFPUTIL_NXT_FLOW_REMOVED:
4375 case OFPUTIL_NXST_FLOW_REPLY:
4376 case OFPUTIL_NXST_AGGREGATE_REPLY:
064af421
BP
4377 default:
4378 if (VLOG_IS_WARN_ENABLED()) {
4379 char *s = ofp_to_string(oh, ntohs(oh->length), 2);
4380 VLOG_DBG_RL(&rl, "OpenFlow message ignored: %s", s);
4381 free(s);
4382 }
d1e2cf21
BP
4383 if (oh->type == OFPT_STATS_REQUEST || oh->type == OFPT_STATS_REPLY) {
4384 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_STAT);
4385 } else {
4386 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_TYPE);
4387 }
064af421 4388 }
d1e2cf21 4389}
064af421 4390
d1e2cf21
BP
4391static void
4392handle_openflow(struct ofconn *ofconn, struct ofpbuf *ofp_msg)
4393{
4394 int error = handle_openflow__(ofconn, ofp_msg);
064af421
BP
4395 if (error) {
4396 send_error_oh(ofconn, ofp_msg->data, error);
4397 }
d1e2cf21 4398 COVERAGE_INC(ofproto_recv_openflow);
064af421
BP
4399}
4400\f
4401static void
856081f6 4402handle_miss_upcall(struct ofproto *p, struct dpif_upcall *upcall)
064af421 4403{
bcf84111 4404 struct facet *facet;
ae412e7d 4405 struct flow flow;
064af421 4406
856081f6
BP
4407 /* Obtain in_port and tun_id, at least. */
4408 odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow);
064af421 4409
856081f6
BP
4410 /* Set header pointers in 'flow'. */
4411 flow_extract(upcall->packet, flow.tun_id, flow.in_port, &flow);
4617e2c1 4412
0ad9b732
JP
4413 /* Check with in-band control to see if this packet should be sent
4414 * to the local port regardless of the flow table. */
856081f6 4415 if (in_band_msg_in_hook(p->in_band, &flow, upcall->packet)) {
cdee00fd 4416 struct ofpbuf odp_actions;
0ad9b732 4417
cdee00fd
BP
4418 ofpbuf_init(&odp_actions, 32);
4419 nl_msg_put_u32(&odp_actions, ODPAT_OUTPUT, ODPP_LOCAL);
856081f6
BP
4420 dpif_execute(p->dpif, odp_actions.data, odp_actions.size,
4421 upcall->packet);
cdee00fd 4422 ofpbuf_uninit(&odp_actions);
0ad9b732
JP
4423 }
4424
bcf84111
BP
4425 facet = facet_lookup_valid(p, &flow);
4426 if (!facet) {
4427 struct rule *rule = rule_lookup(p, &flow);
4428 if (!rule) {
4429 /* Don't send a packet-in if OFPPC_NO_PACKET_IN asserted. */
856081f6 4430 struct ofport *port = get_port(p, flow.in_port);
bcf84111
BP
4431 if (port) {
4432 if (port->opp.config & OFPPC_NO_PACKET_IN) {
4433 COVERAGE_INC(ofproto_no_packet_in);
4434 /* XXX install 'drop' flow entry */
856081f6 4435 ofpbuf_delete(upcall->packet);
bcf84111
BP
4436 return;
4437 }
4438 } else {
4439 VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16,
856081f6 4440 flow.in_port);
064af421 4441 }
064af421 4442
bcf84111 4443 COVERAGE_INC(ofproto_packet_in);
856081f6 4444 send_packet_in(p, upcall, &flow, false);
bcf84111 4445 return;
064af421 4446 }
bcf84111 4447
856081f6 4448 facet = facet_create(p, rule, &flow, upcall->packet);
bcf84111
BP
4449 } else if (!facet->may_install) {
4450 /* The facet is not installable, that is, we need to process every
4451 * packet, so process the current packet's actions into 'facet'. */
856081f6 4452 facet_make_actions(p, facet, upcall->packet);
064af421
BP
4453 }
4454
bcf84111 4455 if (facet->rule->cr.priority == FAIL_OPEN_PRIORITY) {
7778bd15
BP
4456 /*
4457 * Extra-special case for fail-open mode.
4458 *
4459 * We are in fail-open mode and the packet matched the fail-open rule,
4460 * but we are connected to a controller too. We should send the packet
4461 * up to the controller in the hope that it will try to set up a flow
4462 * and thereby allow us to exit fail-open.
4463 *
4464 * See the top-level comment in fail-open.c for more information.
4465 */
856081f6 4466 send_packet_in(p, upcall, &flow, true);
7778bd15 4467 }
750638bb 4468
856081f6 4469 facet_execute(p, facet, upcall->packet);
bcf84111 4470 facet_install(p, facet, false);
064af421 4471}
72b06300
BP
4472
4473static void
856081f6 4474handle_upcall(struct ofproto *p, struct dpif_upcall *upcall)
72b06300 4475{
856081f6 4476 struct flow flow;
72b06300 4477
856081f6 4478 switch (upcall->type) {
72b06300
BP
4479 case _ODPL_ACTION_NR:
4480 COVERAGE_INC(ofproto_ctlr_action);
856081f6
BP
4481 odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow);
4482 send_packet_in(p, upcall, &flow, false);
72b06300
BP
4483 break;
4484
4485 case _ODPL_SFLOW_NR:
4486 if (p->sflow) {
856081f6
BP
4487 odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow);
4488 ofproto_sflow_received(p->sflow, upcall, &flow);
72b06300 4489 }
856081f6 4490 ofpbuf_delete(upcall->packet);
72b06300
BP
4491 break;
4492
4493 case _ODPL_MISS_NR:
856081f6 4494 handle_miss_upcall(p, upcall);
72b06300
BP
4495 break;
4496
4497 default:
856081f6 4498 VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, upcall->type);
72b06300
BP
4499 break;
4500 }
4501}
064af421 4502\f
4a4cdb3b
BP
4503/* Flow expiration. */
4504
0de7a4b4 4505static int ofproto_dp_max_idle(const struct ofproto *);
4a4cdb3b 4506static void ofproto_update_used(struct ofproto *);
5ecc9d81 4507static void rule_expire(struct ofproto *, struct rule *);
bcf84111 4508static void ofproto_expire_facets(struct ofproto *, int dp_max_idle);
4a4cdb3b
BP
4509
4510/* This function is called periodically by ofproto_run(). Its job is to
4511 * collect updates for the flows that have been installed into the datapath,
4512 * most importantly when they last were used, and then use that information to
0de7a4b4
BP
4513 * expire flows that have not been used recently.
4514 *
4515 * Returns the number of milliseconds after which it should be called again. */
4516static int
4a4cdb3b
BP
4517ofproto_expire(struct ofproto *ofproto)
4518{
5ecc9d81
BP
4519 struct rule *rule, *next_rule;
4520 struct cls_cursor cursor;
4521 int dp_max_idle;
4a4cdb3b
BP
4522
4523 /* Update 'used' for each flow in the datapath. */
4524 ofproto_update_used(ofproto);
4525
bcf84111 4526 /* Expire facets that have been idle too long. */
5ecc9d81
BP
4527 dp_max_idle = ofproto_dp_max_idle(ofproto);
4528 ofproto_expire_facets(ofproto, dp_max_idle);
bcf84111
BP
4529
4530 /* Expire OpenFlow flows whose idle_timeout or hard_timeout has passed. */
5ecc9d81
BP
4531 cls_cursor_init(&cursor, &ofproto->cls, NULL);
4532 CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cr, &cursor) {
4533 rule_expire(ofproto, rule);
4534 }
4a4cdb3b
BP
4535
4536 /* Let the hook know that we're at a stable point: all outstanding data
4537 * in existing flows has been accounted to the account_cb. Thus, the
4538 * hook can now reasonably do operations that depend on having accurate
4539 * flow volume accounting (currently, that's just bond rebalancing). */
4540 if (ofproto->ofhooks->account_checkpoint_cb) {
4541 ofproto->ofhooks->account_checkpoint_cb(ofproto->aux);
4542 }
0de7a4b4 4543
5ecc9d81 4544 return MIN(dp_max_idle, 1000);
4a4cdb3b
BP
4545}
4546
bcf84111 4547/* Update 'used' member of installed facets. */
4a4cdb3b
BP
4548static void
4549ofproto_update_used(struct ofproto *p)
4550{
704a1e09 4551 struct dpif_flow_dump dump;
4a4cdb3b 4552
704a1e09 4553 dpif_flow_dump_start(&dump, p->dpif);
36956a7d
BP
4554 for (;;) {
4555 uint32_t keybuf[ODPUTIL_FLOW_KEY_U32S];
bcf84111 4556 struct facet *facet;
36956a7d 4557 struct odp_flow f;
ae412e7d 4558 struct flow flow;
14608a15 4559
36956a7d
BP
4560 memset(&f, 0, sizeof f);
4561 f.key = (struct nlattr *) keybuf;
4562 f.key_len = sizeof keybuf;
4563 if (!dpif_flow_dump_next(&dump, &f)) {
4564 break;
4565 }
4566
4567 if (f.key_len > sizeof keybuf) {
4568 VLOG_WARN_RL(&rl, "ODP flow key overflowed buffer");
4569 continue;
4570 }
4571 if (odp_flow_key_to_flow(f.key, f.key_len, &flow)) {
4572 struct ds s;
4573
4574 ds_init(&s);
4575 odp_flow_key_format(f.key, f.key_len, &s);
4576 VLOG_WARN_RL(&rl, "failed to convert ODP flow key to flow: %s",
4577 ds_cstr(&s));
4578 ds_destroy(&s);
4579
4580 continue;
4581 }
bcf84111 4582 facet = facet_find(p, &flow);
4a4cdb3b 4583
bcf84111 4584 if (facet && facet->installed) {
704a1e09
BP
4585 facet_update_time(p, facet, &f.stats);
4586 facet_account(p, facet, f.stats.n_bytes);
4a4cdb3b
BP
4587 } else {
4588 /* There's a flow in the datapath that we know nothing about.
4589 * Delete it. */
4590 COVERAGE_INC(ofproto_unexpected_rule);
704a1e09 4591 dpif_flow_del(p->dpif, &f);
4a4cdb3b 4592 }
4a4cdb3b 4593 }
704a1e09 4594 dpif_flow_dump_done(&dump);
4a4cdb3b
BP
4595}
4596
0de7a4b4 4597/* Calculates and returns the number of milliseconds of idle time after which
bcf84111 4598 * facets should expire from the datapath and we should fold their statistics
0de7a4b4
BP
4599 * into their parent rules in userspace. */
4600static int
4601ofproto_dp_max_idle(const struct ofproto *ofproto)
4602{
4603 /*
4604 * Idle time histogram.
4605 *
bcf84111 4606 * Most of the time a switch has a relatively small number of facets. When
0de7a4b4
BP
4607 * this is the case we might as well keep statistics for all of them in
4608 * userspace and to cache them in the kernel datapath for performance as
4609 * well.
4610 *
bcf84111 4611 * As the number of facets increases, the memory required to maintain
0de7a4b4 4612 * statistics about them in userspace and in the kernel becomes
bcf84111 4613 * significant. However, with a large number of facets it is likely that
0de7a4b4
BP
4614 * only a few of them are "heavy hitters" that consume a large amount of
4615 * bandwidth. At this point, only heavy hitters are worth caching in the
bcf84111 4616 * kernel and maintaining in userspaces; other facets we can discard.
0de7a4b4
BP
4617 *
4618 * The technique used to compute the idle time is to build a histogram with
bcf84111
BP
4619 * N_BUCKETS buckets whose width is BUCKET_WIDTH msecs each. Each facet
4620 * that is installed in the kernel gets dropped in the appropriate bucket.
0de7a4b4 4621 * After the histogram has been built, we compute the cutoff so that only
bcf84111
BP
4622 * the most-recently-used 1% of facets (but at least 1000 flows) are kept
4623 * cached. At least the most-recently-used bucket of facets is kept, so
4624 * actually an arbitrary number of facets can be kept in any given
0de7a4b4
BP
4625 * expiration run (though the next run will delete most of those unless
4626 * they receive additional data).
4627 *
bcf84111
BP
4628 * This requires a second pass through the facets, in addition to the pass
4629 * made by ofproto_update_used(), because the former function never looks
4630 * at uninstallable facets.
0de7a4b4
BP
4631 */
4632 enum { BUCKET_WIDTH = ROUND_UP(100, TIME_UPDATE_INTERVAL) };
4633 enum { N_BUCKETS = 5000 / BUCKET_WIDTH };
4634 int buckets[N_BUCKETS] = { 0 };
bcf84111 4635 struct facet *facet;
0de7a4b4 4636 int total, bucket;
0de7a4b4
BP
4637 long long int now;
4638 int i;
4639
bcf84111 4640 total = hmap_count(&ofproto->facets);
0de7a4b4
BP
4641 if (total <= 1000) {
4642 return N_BUCKETS * BUCKET_WIDTH;
4643 }
4644
4645 /* Build histogram. */
4646 now = time_msec();
bcf84111
BP
4647 HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
4648 long long int idle = now - facet->used;
0de7a4b4
BP
4649 int bucket = (idle <= 0 ? 0
4650 : idle >= BUCKET_WIDTH * N_BUCKETS ? N_BUCKETS - 1
4651 : (unsigned int) idle / BUCKET_WIDTH);
4652 buckets[bucket]++;
4653 }
4654
4655 /* Find the first bucket whose flows should be expired. */
4656 for (bucket = 0; bucket < N_BUCKETS; bucket++) {
4657 if (buckets[bucket]) {
4658 int subtotal = 0;
4659 do {
4660 subtotal += buckets[bucket++];
4661 } while (bucket < N_BUCKETS && subtotal < MAX(1000, total / 100));
4662 break;
4663 }
4664 }
4665
4666 if (VLOG_IS_DBG_ENABLED()) {
4667 struct ds s;
4668
4669 ds_init(&s);
4670 ds_put_cstr(&s, "keep");
4671 for (i = 0; i < N_BUCKETS; i++) {
4672 if (i == bucket) {
4673 ds_put_cstr(&s, ", drop");
4674 }
4675 if (buckets[i]) {
4676 ds_put_format(&s, " %d:%d", i * BUCKET_WIDTH, buckets[i]);
4677 }
4678 }
4679 VLOG_INFO("%s: %s (msec:count)",
4680 dpif_name(ofproto->dpif), ds_cstr(&s));
4681 ds_destroy(&s);
4682 }
4683
4684 return bucket * BUCKET_WIDTH;
4685}
4686
4a4cdb3b 4687static void
bcf84111 4688facet_active_timeout(struct ofproto *ofproto, struct facet *facet)
4a4cdb3b 4689{
bcf84111
BP
4690 if (ofproto->netflow && !facet_is_controller_flow(facet) &&
4691 netflow_active_timeout_expired(ofproto->netflow, &facet->nf_flow)) {
4a4cdb3b
BP
4692 struct ofexpired expired;
4693 struct odp_flow odp_flow;
4694
4695 /* Get updated flow stats.
4696 *
4697 * XXX We could avoid this call entirely if (1) ofproto_update_used()
4698 * updated TCP flags and (2) the dpif_flow_list_all() in
4699 * ofproto_update_used() zeroed TCP flags. */
4700 memset(&odp_flow, 0, sizeof odp_flow);
bcf84111 4701 if (facet->installed) {
36956a7d
BP
4702 uint32_t keybuf[ODPUTIL_FLOW_KEY_U32S];
4703 struct ofpbuf key;
4704
4705 ofpbuf_use_stack(&key, keybuf, sizeof keybuf);
4706 odp_flow_key_from_flow(&key, &facet->flow);
4707
4708 odp_flow.key = key.data;
4709 odp_flow.key_len = key.size;
4a4cdb3b
BP
4710 odp_flow.flags = ODPFF_ZERO_TCP_FLAGS;
4711 dpif_flow_get(ofproto->dpif, &odp_flow);
4712
4713 if (odp_flow.stats.n_packets) {
bcf84111
BP
4714 facet_update_time(ofproto, facet, &odp_flow.stats);
4715 netflow_flow_update_flags(&facet->nf_flow,
4a4cdb3b
BP
4716 odp_flow.stats.tcp_flags);
4717 }
4718 }
4719
bcf84111
BP
4720 expired.flow = facet->flow;
4721 expired.packet_count = facet->packet_count +
4a4cdb3b 4722 odp_flow.stats.n_packets;
bcf84111
BP
4723 expired.byte_count = facet->byte_count + odp_flow.stats.n_bytes;
4724 expired.used = facet->used;
4725
4726 netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
4727 }
4728}
4a4cdb3b 4729
bcf84111
BP
4730static void
4731ofproto_expire_facets(struct ofproto *ofproto, int dp_max_idle)
4732{
4733 long long int cutoff = time_msec() - dp_max_idle;
4734 struct facet *facet, *next_facet;
4735
4736 HMAP_FOR_EACH_SAFE (facet, next_facet, hmap_node, &ofproto->facets) {
4737 facet_active_timeout(ofproto, facet);
4738 if (facet->used < cutoff) {
4739 facet_remove(ofproto, facet);
4740 }
4a4cdb3b
BP
4741 }
4742}
4743
5ecc9d81
BP
4744/* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
4745 * then delete it entirely. */
4a4cdb3b 4746static void
5ecc9d81 4747rule_expire(struct ofproto *ofproto, struct rule *rule)
4a4cdb3b 4748{
bcf84111
BP
4749 struct facet *facet, *next_facet;
4750 long long int now;
4751 uint8_t reason;
4a4cdb3b 4752
bcf84111 4753 /* Has 'rule' expired? */
4a4cdb3b 4754 now = time_msec();
bcf84111
BP
4755 if (rule->hard_timeout
4756 && now > rule->created + rule->hard_timeout * 1000) {
4757 reason = OFPRR_HARD_TIMEOUT;
4758 } else if (rule->idle_timeout && list_is_empty(&rule->facets)
4759 && now >rule->used + rule->idle_timeout * 1000) {
4760 reason = OFPRR_IDLE_TIMEOUT;
4a4cdb3b 4761 } else {
bcf84111 4762 return;
4a4cdb3b 4763 }
064af421 4764
bcf84111 4765 COVERAGE_INC(ofproto_expired);
064af421 4766
bcf84111
BP
4767 /* Update stats. (This is a no-op if the rule expired due to an idle
4768 * timeout, because that only happens when the rule has no facets left.) */
4769 LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) {
5ecc9d81 4770 facet_remove(ofproto, facet);
064af421
BP
4771 }
4772
bcf84111
BP
4773 /* Get rid of the rule. */
4774 if (!rule_is_hidden(rule)) {
5ecc9d81 4775 rule_send_removed(ofproto, rule, reason);
bcf84111 4776 }
5ecc9d81 4777 rule_remove(ofproto, rule);
064af421 4778}
bcf84111 4779\f
064af421 4780static struct ofpbuf *
09246b99
BP
4781compose_ofp_flow_removed(struct ofconn *ofconn, const struct rule *rule,
4782 uint8_t reason)
064af421 4783{
ca069229 4784 struct ofp_flow_removed *ofr;
064af421
BP
4785 struct ofpbuf *buf;
4786
0224fbde 4787 ofr = make_openflow_xid(sizeof *ofr, OFPT_FLOW_REMOVED, htonl(0), &buf);
ff9d3826
BP
4788 ofputil_cls_rule_to_match(&rule->cr, ofconn->flow_format, &ofr->match,
4789 rule->flow_cookie, &ofr->cookie);
ca069229
JP
4790 ofr->priority = htons(rule->cr.priority);
4791 ofr->reason = reason;
c6ebb8fb 4792 calc_flow_duration(rule->created, &ofr->duration_sec, &ofr->duration_nsec);
ca069229
JP
4793 ofr->idle_timeout = htons(rule->idle_timeout);
4794 ofr->packet_count = htonll(rule->packet_count);
4795 ofr->byte_count = htonll(rule->byte_count);
064af421
BP
4796
4797 return buf;
4798}
4799
09246b99
BP
4800static struct ofpbuf *
4801compose_nx_flow_removed(const struct rule *rule, uint8_t reason)
4802{
4803 struct nx_flow_removed *nfr;
4804 struct ofpbuf *buf;
4805 int match_len;
4806
0224fbde 4807 make_nxmsg_xid(sizeof *nfr, NXT_FLOW_REMOVED, htonl(0), &buf);
09246b99 4808 match_len = nx_put_match(buf, &rule->cr);
0224fbde 4809
4d0ed519 4810 nfr = buf->data;
09246b99
BP
4811 nfr->cookie = rule->flow_cookie;
4812 nfr->priority = htons(rule->cr.priority);
4813 nfr->reason = reason;
4814 calc_flow_duration(rule->created, &nfr->duration_sec, &nfr->duration_nsec);
4815 nfr->idle_timeout = htons(rule->idle_timeout);
4816 nfr->match_len = htons(match_len);
4817 nfr->packet_count = htonll(rule->packet_count);
4818 nfr->byte_count = htonll(rule->byte_count);
4819
4820 return buf;
4821}
4822
ca069229 4823static void
bcf84111 4824rule_send_removed(struct ofproto *p, struct rule *rule, uint8_t reason)
064af421
BP
4825{
4826 struct ofconn *ofconn;
064af421 4827
3b587616
BP
4828 if (!rule->send_flow_removed) {
4829 return;
4830 }
4831
4e8e4213 4832 LIST_FOR_EACH (ofconn, node, &p->all_conns) {
7a0efeb5
BP
4833 struct ofpbuf *msg;
4834
4835 if (!rconn_is_connected(ofconn->rconn)
4836 || !ofconn_receives_async_msgs(ofconn)) {
4837 continue;
064af421 4838 }
7a0efeb5 4839
09246b99
BP
4840 msg = (ofconn->flow_format == NXFF_NXM
4841 ? compose_nx_flow_removed(rule, reason)
4842 : compose_ofp_flow_removed(ofconn, rule, reason));
6d6c7259
BP
4843
4844 /* Account flow expirations under ofconn->reply_counter, the counter
4845 * for replies to OpenFlow requests. That works because preventing
4846 * OpenFlow requests from being processed also prevents new flows from
4847 * being added (and expiring). (It also prevents processing OpenFlow
4848 * requests that would not add new flows, so it is imperfect.) */
7a0efeb5 4849 queue_tx(msg, ofconn, ofconn->reply_counter);
064af421
BP
4850 }
4851}
4852
856081f6 4853/* pinsched callback for sending 'ofp_packet_in' on 'ofconn'. */
064af421 4854static void
856081f6 4855do_send_packet_in(struct ofpbuf *ofp_packet_in, void *ofconn_)
064af421 4856{
76ce9432 4857 struct ofconn *ofconn = ofconn_;
43253595 4858
856081f6 4859 rconn_send_with_limit(ofconn->rconn, ofp_packet_in,
43253595
BP
4860 ofconn->packet_in_counter, 100);
4861}
4862
856081f6
BP
4863/* Takes 'upcall', whose packet has the flow specified by 'flow', composes an
4864 * OpenFlow packet-in message from it, and passes it to 'ofconn''s packet
4865 * scheduler for sending.
43253595 4866 *
856081f6
BP
4867 * If 'clone' is true, the caller retains ownership of 'upcall->packet'.
4868 * Otherwise, ownership is transferred to this function. */
43253595 4869static void
856081f6
BP
4870schedule_packet_in(struct ofconn *ofconn, struct dpif_upcall *upcall,
4871 const struct flow *flow, bool clone)
43253595 4872{
856081f6 4873 enum { OPI_SIZE = offsetof(struct ofp_packet_in, data) };
76ce9432 4874 struct ofproto *ofproto = ofconn->ofproto;
856081f6
BP
4875 struct ofp_packet_in *opi;
4876 int total_len, send_len;
4877 struct ofpbuf *packet;
76ce9432 4878 uint32_t buffer_id;
064af421 4879
856081f6
BP
4880 /* Get OpenFlow buffer_id. */
4881 if (upcall->type == _ODPL_ACTION_NR) {
76ce9432 4882 buffer_id = UINT32_MAX;
43253595
BP
4883 } else if (ofproto->fail_open && fail_open_is_active(ofproto->fail_open)) {
4884 buffer_id = pktbuf_get_null();
89b9612d
BP
4885 } else if (!ofconn->pktbuf) {
4886 buffer_id = UINT32_MAX;
76ce9432 4887 } else {
856081f6 4888 buffer_id = pktbuf_save(ofconn->pktbuf, upcall->packet, flow->in_port);
76ce9432 4889 }
372179d4 4890
43253595 4891 /* Figure out how much of the packet to send. */
856081f6 4892 total_len = send_len = upcall->packet->size;
43253595
BP
4893 if (buffer_id != UINT32_MAX) {
4894 send_len = MIN(send_len, ofconn->miss_send_len);
4895 }
856081f6
BP
4896 if (upcall->type == _ODPL_ACTION_NR) {
4897 send_len = MIN(send_len, upcall->userdata);
4898 }
064af421 4899
856081f6 4900 /* Copy or steal buffer for OFPT_PACKET_IN. */
43253595 4901 if (clone) {
856081f6
BP
4902 packet = ofpbuf_clone_data_with_headroom(upcall->packet->data,
4903 send_len, OPI_SIZE);
43253595 4904 } else {
856081f6
BP
4905 packet = upcall->packet;
4906 packet->size = send_len;
43253595
BP
4907 }
4908
856081f6
BP
4909 /* Add OFPT_PACKET_IN. */
4910 opi = ofpbuf_push_zeros(packet, OPI_SIZE);
4911 opi->header.version = OFP_VERSION;
4912 opi->header.type = OFPT_PACKET_IN;
4913 opi->total_len = htons(total_len);
4914 opi->in_port = htons(odp_port_to_ofp_port(flow->in_port));
4915 opi->reason = upcall->type == _ODPL_MISS_NR ? OFPR_NO_MATCH : OFPR_ACTION;
43253595
BP
4916 opi->buffer_id = htonl(buffer_id);
4917 update_openflow_length(packet);
4918
4919 /* Hand over to packet scheduler. It might immediately call into
4920 * do_send_packet_in() or it might buffer it for a while (until a later
4921 * call to pinsched_run()). */
856081f6 4922 pinsched_send(ofconn->schedulers[opi->reason], flow->in_port,
43253595 4923 packet, do_send_packet_in, ofconn);
064af421
BP
4924}
4925
856081f6
BP
4926/* Given 'upcall', of type _ODPL_ACTION_NR or _ODPL_MISS_NR, sends an
4927 * OFPT_PACKET_IN message to each OpenFlow controller as necessary according to
4928 * their individual configurations.
43253595
BP
4929 *
4930 * Takes ownership of 'packet'. */
4931static void
856081f6
BP
4932send_packet_in(struct ofproto *ofproto, struct dpif_upcall *upcall,
4933 const struct flow *flow, bool clone)
43253595 4934{
76ce9432 4935 struct ofconn *ofconn, *prev;
76ce9432
BP
4936
4937 prev = NULL;
4e8e4213 4938 LIST_FOR_EACH (ofconn, node, &ofproto->all_conns) {
c91248b3 4939 if (ofconn_receives_async_msgs(ofconn)) {
9deba63b 4940 if (prev) {
856081f6 4941 schedule_packet_in(prev, upcall, flow, true);
9deba63b
BP
4942 }
4943 prev = ofconn;
064af421 4944 }
76ce9432
BP
4945 }
4946 if (prev) {
856081f6
BP
4947 schedule_packet_in(prev, upcall, flow, clone);
4948 } else if (!clone) {
4949 ofpbuf_delete(upcall->packet);
064af421 4950 }
064af421
BP
4951}
4952
4953static uint64_t
fa60c019 4954pick_datapath_id(const struct ofproto *ofproto)
064af421 4955{
fa60c019 4956 const struct ofport *port;
064af421 4957
ca0f572c 4958 port = get_port(ofproto, ODPP_LOCAL);
fa60c019
BP
4959 if (port) {
4960 uint8_t ea[ETH_ADDR_LEN];
4961 int error;
4962
4963 error = netdev_get_etheraddr(port->netdev, ea);
064af421
BP
4964 if (!error) {
4965 return eth_addr_to_uint64(ea);
4966 }
4967 VLOG_WARN("could not get MAC address for %s (%s)",
fa60c019 4968 netdev_get_name(port->netdev), strerror(error));
064af421 4969 }
fa60c019 4970 return ofproto->fallback_dpid;
064af421
BP
4971}
4972
4973static uint64_t
4974pick_fallback_dpid(void)
4975{
4976 uint8_t ea[ETH_ADDR_LEN];
70150daf 4977 eth_addr_nicira_random(ea);
064af421
BP
4978 return eth_addr_to_uint64(ea);
4979}
4980\f
7aa697dd
BP
4981static void
4982ofproto_unixctl_list(struct unixctl_conn *conn, const char *arg OVS_UNUSED,
4983 void *aux OVS_UNUSED)
4984{
4985 const struct shash_node *node;
4986 struct ds results;
4987
4988 ds_init(&results);
4989 SHASH_FOR_EACH (node, &all_ofprotos) {
4990 ds_put_format(&results, "%s\n", node->name);
4991 }
4992 unixctl_command_reply(conn, 200, ds_cstr(&results));
4993 ds_destroy(&results);
4994}
4995
4996struct ofproto_trace {
4997 struct action_xlate_ctx ctx;
4998 struct flow flow;
4999 struct ds *result;
5000};
5001
5002static void
5003trace_format_rule(struct ds *result, int level, const struct rule *rule)
5004{
5005 ds_put_char_multiple(result, '\t', level);
5006 if (!rule) {
5007 ds_put_cstr(result, "No match\n");
5008 return;
5009 }
5010
5011 ds_put_format(result, "Rule: cookie=%#"PRIx64" ",
5012 ntohll(rule->flow_cookie));
5013 cls_rule_format(&rule->cr, result);
5014 ds_put_char(result, '\n');
5015
5016 ds_put_char_multiple(result, '\t', level);
5017 ds_put_cstr(result, "OpenFlow ");
5018 ofp_print_actions(result, (const struct ofp_action_header *) rule->actions,
5019 rule->n_actions * sizeof *rule->actions);
5020 ds_put_char(result, '\n');
5021}
5022
5023static void
5024trace_format_flow(struct ds *result, int level, const char *title,
5025 struct ofproto_trace *trace)
5026{
5027 ds_put_char_multiple(result, '\t', level);
5028 ds_put_format(result, "%s: ", title);
5029 if (flow_equal(&trace->ctx.flow, &trace->flow)) {
5030 ds_put_cstr(result, "unchanged");
5031 } else {
5032 flow_format(result, &trace->ctx.flow);
5033 trace->flow = trace->ctx.flow;
5034 }
5035 ds_put_char(result, '\n');
5036}
5037
5038static void
5039trace_resubmit(struct action_xlate_ctx *ctx, const struct rule *rule)
5040{
5041 struct ofproto_trace *trace = CONTAINER_OF(ctx, struct ofproto_trace, ctx);
5042 struct ds *result = trace->result;
5043
5044 ds_put_char(result, '\n');
5045 trace_format_flow(result, ctx->recurse + 1, "Resubmitted flow", trace);
5046 trace_format_rule(result, ctx->recurse + 1, rule);
5047}
5048
5049static void
5050ofproto_unixctl_trace(struct unixctl_conn *conn, const char *args_,
5051 void *aux OVS_UNUSED)
5052{
5053 char *dpname, *in_port_s, *tun_id_s, *packet_s;
5054 char *args = xstrdup(args_);
5055 char *save_ptr = NULL;
5056 struct ofproto *ofproto;
5057 struct ofpbuf packet;
5058 struct rule *rule;
5059 struct ds result;
5060 struct flow flow;
5061 uint16_t in_port;
11e6a15b 5062 ovs_be64 tun_id;
7aa697dd
BP
5063 char *s;
5064
5065 ofpbuf_init(&packet, strlen(args) / 2);
5066 ds_init(&result);
5067
5068 dpname = strtok_r(args, " ", &save_ptr);
5069 tun_id_s = strtok_r(NULL, " ", &save_ptr);
5070 in_port_s = strtok_r(NULL, " ", &save_ptr);
5071 packet_s = strtok_r(NULL, "", &save_ptr); /* Get entire rest of line. */
5072 if (!dpname || !in_port_s || !packet_s) {
5073 unixctl_command_reply(conn, 501, "Bad command syntax");
5074 goto exit;
5075 }
5076
5077 ofproto = shash_find_data(&all_ofprotos, dpname);
5078 if (!ofproto) {
5079 unixctl_command_reply(conn, 501, "Unknown ofproto (use ofproto/list "
5080 "for help)");
5081 goto exit;
5082 }
5083
11e6a15b 5084 tun_id = htonll(strtoull(tun_id_s, NULL, 10));
7aa697dd
BP
5085 in_port = ofp_port_to_odp_port(atoi(in_port_s));
5086
5087 packet_s = ofpbuf_put_hex(&packet, packet_s, NULL);
5088 packet_s += strspn(packet_s, " ");
5089 if (*packet_s != '\0') {
5090 unixctl_command_reply(conn, 501, "Trailing garbage in command");
5091 goto exit;
5092 }
5093 if (packet.size < ETH_HEADER_LEN) {
5094 unixctl_command_reply(conn, 501, "Packet data too short for Ethernet");
5095 goto exit;
5096 }
5097
5098 ds_put_cstr(&result, "Packet: ");
5099 s = ofp_packet_to_string(packet.data, packet.size, packet.size);
5100 ds_put_cstr(&result, s);
5101 free(s);
5102
5103 flow_extract(&packet, tun_id, in_port, &flow);
5104 ds_put_cstr(&result, "Flow: ");
5105 flow_format(&result, &flow);
5106 ds_put_char(&result, '\n');
5107
5108 rule = rule_lookup(ofproto, &flow);
5109 trace_format_rule(&result, 0, rule);
5110 if (rule) {
5111 struct ofproto_trace trace;
cdee00fd 5112 struct ofpbuf *odp_actions;
7aa697dd
BP
5113
5114 trace.result = &result;
5115 trace.flow = flow;
5116 action_xlate_ctx_init(&trace.ctx, ofproto, &flow, &packet);
5117 trace.ctx.resubmit_hook = trace_resubmit;
cdee00fd
BP
5118 odp_actions = xlate_actions(&trace.ctx,
5119 rule->actions, rule->n_actions);
7aa697dd
BP
5120
5121 ds_put_char(&result, '\n');
5122 trace_format_flow(&result, 0, "Final flow", &trace);
5123 ds_put_cstr(&result, "Datapath actions: ");
cdee00fd
BP
5124 format_odp_actions(&result, odp_actions->data, odp_actions->size);
5125 ofpbuf_delete(odp_actions);
7aa697dd
BP
5126 }
5127
5128 unixctl_command_reply(conn, 200, ds_cstr(&result));
5129
5130exit:
5131 ds_destroy(&result);
5132 ofpbuf_uninit(&packet);
5133 free(args);
5134}
5135
5136static void
5137ofproto_unixctl_init(void)
5138{
5139 static bool registered;
5140 if (registered) {
5141 return;
5142 }
5143 registered = true;
5144
5145 unixctl_command_register("ofproto/list", ofproto_unixctl_list, NULL);
5146 unixctl_command_register("ofproto/trace", ofproto_unixctl_trace, NULL);
5147}
5148\f
064af421 5149static bool
ae412e7d 5150default_normal_ofhook_cb(const struct flow *flow, const struct ofpbuf *packet,
cdee00fd 5151 struct ofpbuf *odp_actions, tag_type *tags,
6a07af36 5152 uint16_t *nf_output_iface, void *ofproto_)
064af421
BP
5153{
5154 struct ofproto *ofproto = ofproto_;
5155 int out_port;
5156
5157 /* Drop frames for reserved multicast addresses. */
5158 if (eth_addr_is_reserved(flow->dl_dst)) {
5159 return true;
5160 }
5161
5162 /* Learn source MAC (but don't try to learn from revalidation). */
5163 if (packet != NULL) {
5164 tag_type rev_tag = mac_learning_learn(ofproto->ml, flow->dl_src,
7febb910
JG
5165 0, flow->in_port,
5166 GRAT_ARP_LOCK_NONE);
064af421
BP
5167 if (rev_tag) {
5168 /* The log messages here could actually be useful in debugging,
5169 * so keep the rate limit relatively high. */
5170 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
5171 VLOG_DBG_RL(&rl, "learned that "ETH_ADDR_FMT" is on port %"PRIu16,
5172 ETH_ADDR_ARGS(flow->dl_src), flow->in_port);
5173 ofproto_revalidate(ofproto, rev_tag);
5174 }
5175 }
5176
5177 /* Determine output port. */
7febb910
JG
5178 out_port = mac_learning_lookup_tag(ofproto->ml, flow->dl_dst, 0, tags,
5179 NULL);
064af421 5180 if (out_port < 0) {
f1588b1f 5181 flood_packets(ofproto, flow->in_port, OFPPC_NO_FLOOD,
cdee00fd 5182 nf_output_iface, odp_actions);
064af421 5183 } else if (out_port != flow->in_port) {
cdee00fd 5184 nl_msg_put_u32(odp_actions, ODPAT_OUTPUT, out_port);
6a07af36 5185 *nf_output_iface = out_port;
064af421
BP
5186 } else {
5187 /* Drop. */
5188 }
5189
5190 return true;
5191}
5192
5193static const struct ofhooks default_ofhooks = {
064af421
BP
5194 default_normal_ofhook_cb,
5195 NULL,
5196 NULL
5197};