]> git.proxmox.com Git - mirror_ovs.git/blame - ofproto/ofproto-dpif.c
ovsdb: Remove prototypes for unimplemented functions.
[mirror_ovs.git] / ofproto / ofproto-dpif.c
CommitLineData
abe529af 1/*
e09ee259 2 * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
abe529af
BP
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <config.h>
18
5bee6e26 19#include "ofproto/ofproto-provider.h"
abe529af
BP
20
21#include <errno.h>
22
abe529af 23#include "bond.h"
daff3353 24#include "bundle.h"
abe529af
BP
25#include "byte-order.h"
26#include "connmgr.h"
27#include "coverage.h"
28#include "cfm.h"
29#include "dpif.h"
30#include "dynamic-string.h"
31#include "fail-open.h"
32#include "hmapx.h"
33#include "lacp.h"
75a75043 34#include "learn.h"
abe529af 35#include "mac-learning.h"
816fd533 36#include "meta-flow.h"
abe529af 37#include "multipath.h"
0a740f48 38#include "netdev-vport.h"
abe529af
BP
39#include "netdev.h"
40#include "netlink.h"
41#include "nx-match.h"
42#include "odp-util.h"
43#include "ofp-util.h"
44#include "ofpbuf.h"
f25d0cf3 45#include "ofp-actions.h"
31a19d69 46#include "ofp-parse.h"
abe529af 47#include "ofp-print.h"
9d6ac44e 48#include "ofproto-dpif-governor.h"
bae473fe 49#include "ofproto-dpif-sflow.h"
abe529af 50#include "poll-loop.h"
0d085684 51#include "simap.h"
27022416 52#include "smap.h"
abe529af 53#include "timer.h"
b9ad7294 54#include "tunnel.h"
6c1491fb 55#include "unaligned.h"
abe529af
BP
56#include "unixctl.h"
57#include "vlan-bitmap.h"
58#include "vlog.h"
59
60VLOG_DEFINE_THIS_MODULE(ofproto_dpif);
61
abe529af 62COVERAGE_DEFINE(ofproto_dpif_expired);
abe529af
BP
63COVERAGE_DEFINE(ofproto_dpif_xlate);
64COVERAGE_DEFINE(facet_changed_rule);
abe529af
BP
65COVERAGE_DEFINE(facet_revalidate);
66COVERAGE_DEFINE(facet_unexpected);
9d6ac44e 67COVERAGE_DEFINE(facet_suppress);
abe529af 68
29901626 69/* Maximum depth of flow table recursion (due to resubmit actions) in a
abe529af 70 * flow translation. */
1642690c 71#define MAX_RESUBMIT_RECURSION 64
abe529af 72
9cdaaebe
BP
73/* Number of implemented OpenFlow tables. */
74enum { N_TABLES = 255 };
c57b2226
BP
75enum { TBL_INTERNAL = N_TABLES - 1 }; /* Used for internal hidden rules. */
76BUILD_ASSERT_DECL(N_TABLES >= 2 && N_TABLES <= 255);
9cdaaebe 77
abe529af
BP
78struct ofport_dpif;
79struct ofproto_dpif;
a088a1ff 80struct flow_miss;
ac35f9c8 81struct facet;
abe529af
BP
82
83struct rule_dpif {
84 struct rule up;
85
abe529af
BP
86 /* These statistics:
87 *
88 * - Do include packets and bytes from facets that have been deleted or
89 * whose own statistics have been folded into the rule.
90 *
91 * - Do include packets and bytes sent "by hand" that were accounted to
92 * the rule without any facet being involved (this is a rare corner
93 * case in rule_execute()).
94 *
95 * - Do not include packet or bytes that can be obtained from any facet's
96 * packet_count or byte_count member or that can be obtained from the
b0f7b9b5 97 * datapath by, e.g., dpif_flow_get() for any subfacet.
abe529af
BP
98 */
99 uint64_t packet_count; /* Number of packets received. */
100 uint64_t byte_count; /* Number of bytes received. */
101
54a9cbc9
BP
102 tag_type tag; /* Caches rule_calculate_tag() result. */
103
abe529af
BP
104 struct list facets; /* List of "struct facet"s. */
105};
106
107static struct rule_dpif *rule_dpif_cast(const struct rule *rule)
108{
109 return rule ? CONTAINER_OF(rule, struct rule_dpif, up) : NULL;
110}
111
29901626 112static struct rule_dpif *rule_dpif_lookup(struct ofproto_dpif *,
c57b2226
BP
113 const struct flow *);
114static struct rule_dpif *rule_dpif_lookup__(struct ofproto_dpif *,
115 const struct flow *,
116 uint8_t table);
c376f9a3
IY
117static struct rule_dpif *rule_dpif_miss_rule(struct ofproto_dpif *ofproto,
118 const struct flow *flow);
abe529af 119
112bc5f4
BP
120static void rule_credit_stats(struct rule_dpif *,
121 const struct dpif_flow_stats *);
ac35f9c8 122static void flow_push_stats(struct facet *, const struct dpif_flow_stats *);
822d9414 123static tag_type rule_calculate_tag(const struct flow *,
5cb7a798 124 const struct minimask *, uint32_t basis);
b0f7b9b5
BP
125static void rule_invalidate(const struct rule_dpif *);
126
abe529af
BP
127#define MAX_MIRRORS 32
128typedef uint32_t mirror_mask_t;
129#define MIRROR_MASK_C(X) UINT32_C(X)
130BUILD_ASSERT_DECL(sizeof(mirror_mask_t) * CHAR_BIT >= MAX_MIRRORS);
131struct ofmirror {
132 struct ofproto_dpif *ofproto; /* Owning ofproto. */
133 size_t idx; /* In ofproto's "mirrors" array. */
134 void *aux; /* Key supplied by ofproto's client. */
135 char *name; /* Identifier for log messages. */
136
137 /* Selection criteria. */
138 struct hmapx srcs; /* Contains "struct ofbundle *"s. */
139 struct hmapx dsts; /* Contains "struct ofbundle *"s. */
140 unsigned long *vlans; /* Bitmap of chosen VLANs, NULL selects all. */
141
9ba15e2a 142 /* Output (exactly one of out == NULL and out_vlan == -1 is true). */
abe529af
BP
143 struct ofbundle *out; /* Output port or NULL. */
144 int out_vlan; /* Output VLAN or -1. */
9ba15e2a 145 mirror_mask_t dup_mirrors; /* Bitmap of mirrors with the same output. */
9d24de3b
JP
146
147 /* Counters. */
148 int64_t packet_count; /* Number of packets sent. */
149 int64_t byte_count; /* Number of bytes sent. */
abe529af
BP
150};
151
152static void mirror_destroy(struct ofmirror *);
9d24de3b
JP
153static void update_mirror_stats(struct ofproto_dpif *ofproto,
154 mirror_mask_t mirrors,
155 uint64_t packets, uint64_t bytes);
abe529af 156
abe529af 157struct ofbundle {
abe529af 158 struct hmap_node hmap_node; /* In struct ofproto's "bundles" hmap. */
6e492d81 159 struct ofproto_dpif *ofproto; /* Owning ofproto. */
abe529af
BP
160 void *aux; /* Key supplied by ofproto's client. */
161 char *name; /* Identifier for log messages. */
162
163 /* Configuration. */
164 struct list ports; /* Contains "struct ofport"s. */
ecac4ebf 165 enum port_vlan_mode vlan_mode; /* VLAN mode */
abe529af
BP
166 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
167 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
168 * NULL if all VLANs are trunked. */
169 struct lacp *lacp; /* LACP if LACP is enabled, otherwise NULL. */
170 struct bond *bond; /* Nonnull iff more than one port. */
5e9ceccd 171 bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */
abe529af
BP
172
173 /* Status. */
9e1fd49b 174 bool floodable; /* True if no port has OFPUTIL_PC_NO_FLOOD set. */
abe529af
BP
175
176 /* Port mirroring info. */
177 mirror_mask_t src_mirrors; /* Mirrors triggered when packet received. */
178 mirror_mask_t dst_mirrors; /* Mirrors triggered when packet sent. */
179 mirror_mask_t mirror_out; /* Mirrors that output to this bundle. */
180};
181
182static void bundle_remove(struct ofport *);
7bde8dd8 183static void bundle_update(struct ofbundle *);
abe529af
BP
184static void bundle_destroy(struct ofbundle *);
185static void bundle_del_port(struct ofport_dpif *);
186static void bundle_run(struct ofbundle *);
187static void bundle_wait(struct ofbundle *);
4acbc98d 188static struct ofbundle *lookup_input_bundle(const struct ofproto_dpif *,
70c2fd56
BP
189 uint16_t in_port, bool warn,
190 struct ofport_dpif **in_ofportp);
abe529af 191
33158a18
JP
192/* A controller may use OFPP_NONE as the ingress port to indicate that
193 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
194 * when an input bundle is needed for validation (e.g., mirroring or
195 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
196 * any 'port' structs, so care must be taken when dealing with it. */
197static struct ofbundle ofpp_none_bundle = {
198 .name = "OFPP_NONE",
199 .vlan_mode = PORT_VLAN_TRUNK
200};
201
21f7563c
JP
202static void stp_run(struct ofproto_dpif *ofproto);
203static void stp_wait(struct ofproto_dpif *ofproto);
851bf71d
EJ
204static int set_stp_port(struct ofport *,
205 const struct ofproto_port_stp_settings *);
21f7563c 206
5da5ec37
BP
207static bool ofbundle_includes_vlan(const struct ofbundle *, uint16_t vlan);
208
abe529af
BP
209struct action_xlate_ctx {
210/* action_xlate_ctx_init() initializes these members. */
211
212 /* The ofproto. */
213 struct ofproto_dpif *ofproto;
214
215 /* Flow to which the OpenFlow actions apply. xlate_actions() will modify
216 * this flow when actions change header fields. */
217 struct flow flow;
218
bd85dac1
AZ
219 /* stack for the push and pop actions.
220 * Each stack element is of the type "union mf_subvalue". */
221 struct ofpbuf stack;
222 union mf_subvalue init_stack[1024 / sizeof(union mf_subvalue)];
223
abe529af
BP
224 /* The packet corresponding to 'flow', or a null pointer if we are
225 * revalidating without a packet to refer to. */
226 const struct ofpbuf *packet;
227
3de9590b
BP
228 /* Should OFPP_NORMAL update the MAC learning table? Should "learn"
229 * actions update the flow table?
230 *
231 * We want to update these tables if we are actually processing a packet,
232 * or if we are accounting for packets that the datapath has processed, but
233 * not if we are just revalidating. */
234 bool may_learn;
75a75043 235
18b2a258
BP
236 /* The rule that we are currently translating, or NULL. */
237 struct rule_dpif *rule;
54834960 238
0e553d9c
BP
239 /* Union of the set of TCP flags seen so far in this flow. (Used only by
240 * NXAST_FIN_TIMEOUT. Set to zero to avoid updating updating rules'
241 * timeouts.) */
242 uint8_t tcp_flags;
243
112bc5f4
BP
244 /* If nonnull, flow translation calls this function just before executing a
245 * resubmit or OFPP_TABLE action. In addition, disables logging of traces
246 * when the recursion depth is exceeded.
247 *
248 * 'rule' is the rule being submitted into. It will be null if the
249 * resubmit or OFPP_TABLE action didn't find a matching rule.
250 *
251 * This is normally null so the client has to set it manually after
252 * calling action_xlate_ctx_init(). */
253 void (*resubmit_hook)(struct action_xlate_ctx *, struct rule_dpif *rule);
254
479df176
BP
255 /* If nonnull, flow translation calls this function to report some
256 * significant decision, e.g. to explain why OFPP_NORMAL translation
257 * dropped a packet. */
258 void (*report_hook)(struct action_xlate_ctx *, const char *s);
259
112bc5f4
BP
260 /* If nonnull, flow translation credits the specified statistics to each
261 * rule reached through a resubmit or OFPP_TABLE action.
abe529af
BP
262 *
263 * This is normally null so the client has to set it manually after
264 * calling action_xlate_ctx_init(). */
112bc5f4 265 const struct dpif_flow_stats *resubmit_stats;
abe529af 266
abe529af
BP
267/* xlate_actions() initializes and uses these members. The client might want
268 * to look at them after it returns. */
269
270 struct ofpbuf *odp_actions; /* Datapath actions. */
75a75043 271 tag_type tags; /* Tags associated with actions. */
6a7e895f 272 enum slow_path_reason slow; /* 0 if fast path may be used. */
75a75043
BP
273 bool has_learn; /* Actions include NXAST_LEARN? */
274 bool has_normal; /* Actions output to OFPP_NORMAL? */
0e553d9c 275 bool has_fin_timeout; /* Actions include NXAST_FIN_TIMEOUT? */
abe529af 276 uint16_t nf_output_iface; /* Output interface index for NetFlow. */
9d24de3b 277 mirror_mask_t mirrors; /* Bitmap of associated mirrors. */
abe529af
BP
278
279/* xlate_actions() initializes and uses these members, but the client has no
280 * reason to look at them. */
281
282 int recurse; /* Recursion level, via xlate_table_action. */
6a6455e5 283 bool max_resubmit_trigger; /* Recursed too deeply during translation. */
b3e9b2ed 284 struct flow base_flow; /* Flow at the last commit. */
deedf7e7 285 uint32_t orig_skb_priority; /* Priority when packet arrived. */
29901626 286 uint8_t table_id; /* OpenFlow table ID where flow was found. */
6ff686f2 287 uint32_t sflow_n_outputs; /* Number of output ports. */
9b56fe13 288 uint32_t sflow_odp_port; /* Output port for composing sFlow action. */
6ff686f2 289 uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
848e8809 290 bool exit; /* No further actions should be processed. */
abe529af
BP
291};
292
14f94f9a
JP
293/* Initial values of fields of the packet that may be changed during
294 * flow processing and needed later. */
295struct initial_vals {
296 /* This is the value of vlan_tci in the packet as actually received from
297 * dpif. This is the same as the facet's flow.vlan_tci unless the packet
298 * was received via a VLAN splinter. In that case, this value is 0
299 * (because the packet as actually received from the dpif had no 802.1Q
300 * tag) but the facet's flow.vlan_tci is set to the VLAN that the splinter
301 * represents.
302 *
303 * This member should be removed when the VLAN splinters feature is no
304 * longer needed. */
305 ovs_be16 vlan_tci;
c3f6c502
JP
306
307 /* If received on a tunnel, the IP TOS value of the tunnel. */
308 uint8_t tunnel_ip_tos;
14f94f9a
JP
309};
310
abe529af
BP
311static void action_xlate_ctx_init(struct action_xlate_ctx *,
312 struct ofproto_dpif *, const struct flow *,
14f94f9a
JP
313 const struct initial_vals *initial_vals,
314 struct rule_dpif *,
0e553d9c 315 uint8_t tcp_flags, const struct ofpbuf *);
050ac423 316static void xlate_actions(struct action_xlate_ctx *,
f25d0cf3 317 const struct ofpact *ofpacts, size_t ofpacts_len,
050ac423
BP
318 struct ofpbuf *odp_actions);
319static void xlate_actions_for_side_effects(struct action_xlate_ctx *,
f25d0cf3
BP
320 const struct ofpact *ofpacts,
321 size_t ofpacts_len);
0a740f48
EJ
322static void xlate_table_action(struct action_xlate_ctx *, uint16_t in_port,
323 uint8_t table_id, bool may_packet_in);
abe529af 324
6a7e895f
BP
325static size_t put_userspace_action(const struct ofproto_dpif *,
326 struct ofpbuf *odp_actions,
327 const struct flow *,
328 const union user_action_cookie *);
329
330static void compose_slow_path(const struct ofproto_dpif *, const struct flow *,
331 enum slow_path_reason,
332 uint64_t *stub, size_t stub_size,
333 const struct nlattr **actionsp,
334 size_t *actions_lenp);
335
479df176
BP
336static void xlate_report(struct action_xlate_ctx *ctx, const char *s);
337
6a7e895f
BP
338/* A subfacet (see "struct subfacet" below) has three possible installation
339 * states:
340 *
341 * - SF_NOT_INSTALLED: Not installed in the datapath. This will only be the
342 * case just after the subfacet is created, just before the subfacet is
343 * destroyed, or if the datapath returns an error when we try to install a
344 * subfacet.
345 *
346 * - SF_FAST_PATH: The subfacet's actions are installed in the datapath.
347 *
348 * - SF_SLOW_PATH: An action that sends every packet for the subfacet through
349 * ofproto_dpif is installed in the datapath.
350 */
351enum subfacet_path {
352 SF_NOT_INSTALLED, /* No datapath flow for this subfacet. */
353 SF_FAST_PATH, /* Full actions are installed. */
354 SF_SLOW_PATH, /* Send-to-userspace action is installed. */
355};
356
357static const char *subfacet_path_to_string(enum subfacet_path);
358
5f5fbd17
BP
359/* A dpif flow and actions associated with a facet.
360 *
361 * See also the large comment on struct facet. */
362struct subfacet {
363 /* Owners. */
364 struct hmap_node hmap_node; /* In struct ofproto_dpif 'subfacets' list. */
365 struct list list_node; /* In struct facet's 'facets' list. */
366 struct facet *facet; /* Owning facet. */
367
5f5fbd17
BP
368 enum odp_key_fitness key_fitness;
369 struct nlattr *key;
370 int key_len;
371
372 long long int used; /* Time last used; time created if not used. */
373
374 uint64_t dp_packet_count; /* Last known packet count in the datapath. */
375 uint64_t dp_byte_count; /* Last known byte count in the datapath. */
376
377 /* Datapath actions.
378 *
379 * These should be essentially identical for every subfacet in a facet, but
380 * may differ in trivial ways due to VLAN splinters. */
381 size_t actions_len; /* Number of bytes in actions[]. */
382 struct nlattr *actions; /* Datapath actions. */
383
6a7e895f
BP
384 enum slow_path_reason slow; /* 0 if fast path may be used. */
385 enum subfacet_path path; /* Installed in datapath? */
5f5fbd17 386
14f94f9a
JP
387 /* Initial values of the packet that may be needed later. */
388 struct initial_vals initial_vals;
a088a1ff
JP
389
390 /* Datapath port the packet arrived on. This is needed to remove
391 * flows for ports that are no longer part of the bridge. Since the
392 * flow definition only has the OpenFlow port number and the port is
393 * no longer part of the bridge, we can't determine the datapath port
394 * number needed to delete the flow from the datapath. */
395 uint32_t odp_in_port;
5f5fbd17
BP
396};
397
1d85f9e5
JP
398#define SUBFACET_DESTROY_MAX_BATCH 50
399
a088a1ff 400static struct subfacet *subfacet_create(struct facet *, struct flow_miss *miss,
459b16a1 401 long long int now);
5f5fbd17 402static struct subfacet *subfacet_find(struct ofproto_dpif *,
acf60855 403 const struct nlattr *key, size_t key_len,
9566abf9 404 uint32_t key_hash);
5f5fbd17
BP
405static void subfacet_destroy(struct subfacet *);
406static void subfacet_destroy__(struct subfacet *);
1d85f9e5
JP
407static void subfacet_destroy_batch(struct ofproto_dpif *,
408 struct subfacet **, int n);
5f5fbd17
BP
409static void subfacet_reset_dp_stats(struct subfacet *,
410 struct dpif_flow_stats *);
411static void subfacet_update_time(struct subfacet *, long long int used);
412static void subfacet_update_stats(struct subfacet *,
413 const struct dpif_flow_stats *);
414static void subfacet_make_actions(struct subfacet *,
5fe20d5d
BP
415 const struct ofpbuf *packet,
416 struct ofpbuf *odp_actions);
5f5fbd17
BP
417static int subfacet_install(struct subfacet *,
418 const struct nlattr *actions, size_t actions_len,
6a7e895f 419 struct dpif_flow_stats *, enum slow_path_reason);
5f5fbd17
BP
420static void subfacet_uninstall(struct subfacet *);
421
6a7e895f
BP
422static enum subfacet_path subfacet_want_path(enum slow_path_reason);
423
b0f7b9b5
BP
424/* An exact-match instantiation of an OpenFlow flow.
425 *
426 * A facet associates a "struct flow", which represents the Open vSwitch
b95fc6ba
BP
427 * userspace idea of an exact-match flow, with one or more subfacets. Each
428 * subfacet tracks the datapath's idea of the exact-match flow equivalent to
429 * the facet. When the kernel module (or other dpif implementation) and Open
430 * vSwitch userspace agree on the definition of a flow key, there is exactly
431 * one subfacet per facet. If the dpif implementation supports more-specific
432 * flow matching than userspace, however, a facet can have more than one
433 * subfacet, each of which corresponds to some distinction in flow that
434 * userspace simply doesn't understand.
b0f7b9b5
BP
435 *
436 * Flow expiration works in terms of subfacets, so a facet must have at least
437 * one subfacet or it will never expire, leaking memory. */
abe529af 438struct facet {
b0f7b9b5
BP
439 /* Owners. */
440 struct hmap_node hmap_node; /* In owning ofproto's 'facets' hmap. */
441 struct list list_node; /* In owning rule's 'facets' list. */
442 struct rule_dpif *rule; /* Owning rule. */
443
444 /* Owned data. */
445 struct list subfacets;
abe529af
BP
446 long long int used; /* Time last used; time created if not used. */
447
b0f7b9b5
BP
448 /* Key. */
449 struct flow flow;
450
abe529af
BP
451 /* These statistics:
452 *
453 * - Do include packets and bytes sent "by hand", e.g. with
454 * dpif_execute().
455 *
456 * - Do include packets and bytes that were obtained from the datapath
b0f7b9b5 457 * when a subfacet's statistics were reset (e.g. dpif_flow_put() with
abe529af 458 * DPIF_FP_ZERO_STATS).
b0f7b9b5
BP
459 *
460 * - Do not include packets or bytes that can be obtained from the
461 * datapath for any existing subfacet.
abe529af
BP
462 */
463 uint64_t packet_count; /* Number of packets received. */
464 uint64_t byte_count; /* Number of bytes received. */
465
b0f7b9b5 466 /* Resubmit statistics. */
9d24de3b
JP
467 uint64_t prev_packet_count; /* Number of packets from last stats push. */
468 uint64_t prev_byte_count; /* Number of bytes from last stats push. */
469 long long int prev_used; /* Used time from last stats push. */
abe529af 470
b0f7b9b5 471 /* Accounting. */
907a4c5e 472 uint64_t accounted_bytes; /* Bytes processed by facet_account(). */
b0f7b9b5 473 struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
0e553d9c 474 uint8_t tcp_flags; /* TCP flags seen for this 'rule'. */
abe529af 475
b95fc6ba
BP
476 /* Properties of datapath actions.
477 *
478 * Every subfacet has its own actions because actions can differ slightly
479 * between splintered and non-splintered subfacets due to the VLAN tag
480 * being initially different (present vs. absent). All of them have these
481 * properties in common so we just store one copy of them here. */
75a75043
BP
482 bool has_learn; /* Actions include NXAST_LEARN? */
483 bool has_normal; /* Actions output to OFPP_NORMAL? */
0e553d9c 484 bool has_fin_timeout; /* Actions include NXAST_FIN_TIMEOUT? */
b0f7b9b5 485 tag_type tags; /* Tags that would require revalidation. */
9d24de3b 486 mirror_mask_t mirrors; /* Bitmap of dependent mirrors. */
26cd7e34
BP
487
488 /* Storage for a single subfacet, to reduce malloc() time and space
489 * overhead. (A facet always has at least one subfacet and in the common
bcd516b7
JP
490 * case has exactly one subfacet. However, 'one_subfacet' may not
491 * always be valid, since it could have been removed after newer
492 * subfacets were pushed onto the 'subfacets' list.) */
26cd7e34 493 struct subfacet one_subfacet;
6cf474d7
EJ
494
495 long long int learn_rl; /* Rate limiter for facet_learn(). */
abe529af
BP
496};
497
2b459b83
BP
498static struct facet *facet_create(struct rule_dpif *,
499 const struct flow *, uint32_t hash);
15baa734 500static void facet_remove(struct facet *);
abe529af
BP
501static void facet_free(struct facet *);
502
2b459b83
BP
503static struct facet *facet_find(struct ofproto_dpif *,
504 const struct flow *, uint32_t hash);
abe529af 505static struct facet *facet_lookup_valid(struct ofproto_dpif *,
2b459b83 506 const struct flow *, uint32_t hash);
c57b2226 507static void facet_revalidate(struct facet *);
6814e51f 508static bool facet_check_consistency(struct facet *);
abe529af 509
15baa734 510static void facet_flush_stats(struct facet *);
abe529af 511
15baa734 512static void facet_update_time(struct facet *, long long int used);
bbb5d219 513static void facet_reset_counters(struct facet *);
abe529af 514static void facet_push_stats(struct facet *);
3de9590b
BP
515static void facet_learn(struct facet *);
516static void facet_account(struct facet *);
abe529af 517
1a1e3a0a
JP
518static struct subfacet *facet_get_subfacet(struct facet *);
519
abe529af
BP
520static bool facet_is_controller_flow(struct facet *);
521
abe529af 522struct ofport_dpif {
acf60855 523 struct hmap_node odp_port_node; /* In dpif_backer's "odp_to_ofport_map". */
abe529af
BP
524 struct ofport up;
525
526 uint32_t odp_port;
527 struct ofbundle *bundle; /* Bundle that contains this port, if any. */
528 struct list bundle_node; /* In struct ofbundle's "ports" list. */
529 struct cfm *cfm; /* Connectivity Fault Management, if any. */
530 tag_type tag; /* Tag associated with this port. */
015e08bc 531 bool may_enable; /* May be enabled in bonds. */
3e5b3fdb 532 long long int carrier_seq; /* Carrier status changes. */
b9ad7294 533 struct tnl_port *tnl_port; /* Tunnel handle, or null. */
21f7563c 534
52a90c29 535 /* Spanning tree. */
21f7563c
JP
536 struct stp_port *stp_port; /* Spanning Tree Protocol, if any. */
537 enum stp_state stp_state; /* Always STP_DISABLED if STP not in use. */
538 long long int stp_state_entered;
8b36f51e
EJ
539
540 struct hmap priorities; /* Map of attached 'priority_to_dscp's. */
52a90c29
BP
541
542 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
543 *
544 * This is deprecated. It is only for compatibility with broken device
545 * drivers in old versions of Linux that do not properly support VLANs when
546 * VLAN devices are not used. When broken device drivers are no longer in
547 * widespread use, we will delete these interfaces. */
548 uint16_t realdev_ofp_port;
549 int vlandev_vid;
8b36f51e
EJ
550};
551
552/* Node in 'ofport_dpif''s 'priorities' map. Used to maintain a map from
553 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
554 * traffic egressing the 'ofport' with that priority should be marked with. */
555struct priority_to_dscp {
556 struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'priorities' map. */
557 uint32_t priority; /* Priority of this queue (see struct flow). */
558
559 uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
abe529af
BP
560};
561
52a90c29
BP
562/* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
563 *
564 * This is deprecated. It is only for compatibility with broken device drivers
565 * in old versions of Linux that do not properly support VLANs when VLAN
566 * devices are not used. When broken device drivers are no longer in
567 * widespread use, we will delete these interfaces. */
568struct vlan_splinter {
569 struct hmap_node realdev_vid_node;
570 struct hmap_node vlandev_node;
571 uint16_t realdev_ofp_port;
572 uint16_t vlandev_ofp_port;
573 int vid;
574};
575
576static uint32_t vsp_realdev_to_vlandev(const struct ofproto_dpif *,
577 uint32_t realdev, ovs_be16 vlan_tci);
b98d8985 578static bool vsp_adjust_flow(const struct ofproto_dpif *, struct flow *);
52a90c29
BP
579static void vsp_remove(struct ofport_dpif *);
580static void vsp_add(struct ofport_dpif *, uint16_t realdev_ofp_port, int vid);
581
e1b1d06a
JP
582static uint32_t ofp_port_to_odp_port(const struct ofproto_dpif *,
583 uint16_t ofp_port);
584static uint16_t odp_port_to_ofp_port(const struct ofproto_dpif *,
585 uint32_t odp_port);
586
abe529af
BP
587static struct ofport_dpif *
588ofport_dpif_cast(const struct ofport *ofport)
589{
cb22974d 590 ovs_assert(ofport->ofproto->ofproto_class == &ofproto_dpif_class);
abe529af
BP
591 return ofport ? CONTAINER_OF(ofport, struct ofport_dpif, up) : NULL;
592}
593
594static void port_run(struct ofport_dpif *);
0aa66d6e 595static void port_run_fast(struct ofport_dpif *);
abe529af 596static void port_wait(struct ofport_dpif *);
a5610457 597static int set_cfm(struct ofport *, const struct cfm_settings *);
8b36f51e 598static void ofport_clear_priorities(struct ofport_dpif *);
abe529af 599
7ee20df1
BP
600struct dpif_completion {
601 struct list list_node;
602 struct ofoperation *op;
603};
604
54a9cbc9
BP
605/* Extra information about a classifier table.
606 * Currently used just for optimized flow revalidation. */
607struct table_dpif {
608 /* If either of these is nonnull, then this table has a form that allows
609 * flows to be tagged to avoid revalidating most flows for the most common
610 * kinds of flow table changes. */
611 struct cls_table *catchall_table; /* Table that wildcards all fields. */
612 struct cls_table *other_table; /* Table with any other wildcard set. */
613 uint32_t basis; /* Keeps each table's tags separate. */
614};
615
3c4a309c
BP
616/* Reasons that we might need to revalidate every facet, and corresponding
617 * coverage counters.
618 *
619 * A value of 0 means that there is no need to revalidate.
620 *
621 * It would be nice to have some cleaner way to integrate with coverage
622 * counters, but with only a few reasons I guess this is good enough for
623 * now. */
624enum revalidate_reason {
625 REV_RECONFIGURE = 1, /* Switch configuration changed. */
626 REV_STP, /* Spanning tree protocol port status change. */
627 REV_PORT_TOGGLED, /* Port enabled or disabled by CFM, LACP, ...*/
628 REV_FLOW_TABLE, /* Flow table changed. */
629 REV_INCONSISTENCY /* Facet self-check failed. */
630};
631COVERAGE_DEFINE(rev_reconfigure);
632COVERAGE_DEFINE(rev_stp);
633COVERAGE_DEFINE(rev_port_toggled);
634COVERAGE_DEFINE(rev_flow_table);
635COVERAGE_DEFINE(rev_inconsistency);
636
8f73d537
EJ
637/* Drop keys are odp flow keys which have drop flows installed in the kernel.
638 * These are datapath flows which have no associated ofproto, if they did we
639 * would use facets. */
640struct drop_key {
641 struct hmap_node hmap_node;
642 struct nlattr *key;
643 size_t key_len;
644};
645
acf60855
JP
646/* All datapaths of a given type share a single dpif backer instance. */
647struct dpif_backer {
648 char *type;
649 int refcount;
650 struct dpif *dpif;
651 struct timer next_expiration;
652 struct hmap odp_to_ofport_map; /* ODP port to ofport mapping. */
2cc3c58e 653
7d82ab2e 654 struct simap tnl_backers; /* Set of dpif ports backing tunnels. */
b9ad7294 655
2cc3c58e
EJ
656 /* Facet revalidation flags applying to facets which use this backer. */
657 enum revalidate_reason need_revalidate; /* Revalidate every facet. */
658 struct tag_set revalidate_set; /* Revalidate only matching facets. */
8f73d537
EJ
659
660 struct hmap drop_keys; /* Set of dropped odp keys. */
acf60855
JP
661};
662
663/* All existing ofproto_backer instances, indexed by ofproto->up.type. */
664static struct shash all_dpif_backers = SHASH_INITIALIZER(&all_dpif_backers);
665
8f73d537 666static void drop_key_clear(struct dpif_backer *);
acf60855
JP
667static struct ofport_dpif *
668odp_port_to_ofport(const struct dpif_backer *, uint32_t odp_port);
669
735d7efb
AZ
670static void dpif_stats_update_hit_count(struct ofproto_dpif *ofproto,
671 uint64_t delta);
672
abe529af 673struct ofproto_dpif {
b44a10b7 674 struct hmap_node all_ofproto_dpifs_node; /* In 'all_ofproto_dpifs'. */
abe529af 675 struct ofproto up;
acf60855 676 struct dpif_backer *backer;
abe529af 677
c57b2226
BP
678 /* Special OpenFlow rules. */
679 struct rule_dpif *miss_rule; /* Sends flow table misses to controller. */
680 struct rule_dpif *no_packet_in_rule; /* Drops flow table misses. */
681
6c1491fb
BP
682 /* Statistics. */
683 uint64_t n_matches;
684
abe529af
BP
685 /* Bridging. */
686 struct netflow *netflow;
bae473fe 687 struct dpif_sflow *sflow;
abe529af
BP
688 struct hmap bundles; /* Contains "struct ofbundle"s. */
689 struct mac_learning *ml;
690 struct ofmirror *mirrors[MAX_MIRRORS];
ccb7c863 691 bool has_mirrors;
abe529af
BP
692 bool has_bonded_bundles;
693
abe529af
BP
694 /* Facets. */
695 struct hmap facets;
b0f7b9b5 696 struct hmap subfacets;
9d6ac44e 697 struct governor *governor;
4d2a0f39 698 long long int consistency_rl;
54a9cbc9
BP
699
700 /* Revalidation. */
701 struct table_dpif tables[N_TABLES];
7ee20df1
BP
702
703 /* Support for debugging async flow mods. */
704 struct list completions;
daff3353
EJ
705
706 bool has_bundle_action; /* True when the first bundle action appears. */
6527c598
PS
707 struct netdev_stats stats; /* To account packets generated and consumed in
708 * userspace. */
21f7563c
JP
709
710 /* Spanning tree. */
711 struct stp *stp;
712 long long int stp_last_tick;
52a90c29
BP
713
714 /* VLAN splinters. */
715 struct hmap realdev_vid_map; /* (realdev,vid) -> vlandev. */
716 struct hmap vlandev_map; /* vlandev -> (realdev,vid). */
e1b1d06a 717
acf60855 718 /* Ports. */
0a740f48
EJ
719 struct sset ports; /* Set of standard port names. */
720 struct sset ghost_ports; /* Ports with no datapath port. */
acf60855
JP
721 struct sset port_poll_set; /* Queued names for port_poll() reply. */
722 int port_poll_errno; /* Last errno for port_poll() reply. */
735d7efb
AZ
723
724 /* Per ofproto's dpif stats. */
725 uint64_t n_hit;
726 uint64_t n_missed;
abe529af
BP
727};
728
7ee20df1
BP
729/* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only
730 * for debugging the asynchronous flow_mod implementation.) */
731static bool clogged;
732
b44a10b7
BP
733/* All existing ofproto_dpif instances, indexed by ->up.name. */
734static struct hmap all_ofproto_dpifs = HMAP_INITIALIZER(&all_ofproto_dpifs);
735
abe529af
BP
736static void ofproto_dpif_unixctl_init(void);
737
738static struct ofproto_dpif *
739ofproto_dpif_cast(const struct ofproto *ofproto)
740{
cb22974d 741 ovs_assert(ofproto->ofproto_class == &ofproto_dpif_class);
abe529af
BP
742 return CONTAINER_OF(ofproto, struct ofproto_dpif, up);
743}
744
4acbc98d 745static struct ofport_dpif *get_ofp_port(const struct ofproto_dpif *,
abe529af 746 uint16_t ofp_port);
4acbc98d 747static struct ofport_dpif *get_odp_port(const struct ofproto_dpif *,
abe529af 748 uint32_t odp_port);
6a6455e5 749static void ofproto_trace(struct ofproto_dpif *, const struct flow *,
14f94f9a
JP
750 const struct ofpbuf *,
751 const struct initial_vals *, struct ds *);
abe529af
BP
752
753/* Packet processing. */
754static void update_learning_table(struct ofproto_dpif *,
755 const struct flow *, int vlan,
756 struct ofbundle *);
501f8d1f
BP
757/* Upcalls. */
758#define FLOW_MISS_MAX_BATCH 50
acf60855 759static int handle_upcalls(struct dpif_backer *, unsigned int max_batch);
abe529af
BP
760
761/* Flow expiration. */
acf60855 762static int expire(struct dpif_backer *);
abe529af 763
6fca1ffb
BP
764/* NetFlow. */
765static void send_netflow_active_timeouts(struct ofproto_dpif *);
766
abe529af 767/* Utilities. */
52a90c29 768static int send_packet(const struct ofport_dpif *, struct ofpbuf *packet);
6a7d1a39
BP
769static size_t compose_sflow_action(const struct ofproto_dpif *,
770 struct ofpbuf *odp_actions,
771 const struct flow *, uint32_t odp_port);
c06bba01
JP
772static void add_mirror_actions(struct action_xlate_ctx *ctx,
773 const struct flow *flow);
abe529af
BP
774/* Global variables. */
775static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
acf60855
JP
776
777/* Initial mappings of port to bridge mappings. */
778static struct shash init_ofp_ports = SHASH_INITIALIZER(&init_ofp_ports);
abe529af
BP
779\f
780/* Factory functions. */
781
b0408fca 782static void
acf60855 783init(const struct shash *iface_hints)
b0408fca 784{
acf60855
JP
785 struct shash_node *node;
786
787 /* Make a local copy, since we don't own 'iface_hints' elements. */
788 SHASH_FOR_EACH(node, iface_hints) {
789 const struct iface_hint *orig_hint = node->data;
790 struct iface_hint *new_hint = xmalloc(sizeof *new_hint);
791
792 new_hint->br_name = xstrdup(orig_hint->br_name);
793 new_hint->br_type = xstrdup(orig_hint->br_type);
794 new_hint->ofp_port = orig_hint->ofp_port;
795
796 shash_add(&init_ofp_ports, node->name, new_hint);
797 }
b0408fca
JP
798}
799
abe529af
BP
800static void
801enumerate_types(struct sset *types)
802{
803 dp_enumerate_types(types);
804}
805
806static int
807enumerate_names(const char *type, struct sset *names)
808{
acf60855
JP
809 struct ofproto_dpif *ofproto;
810
811 sset_clear(names);
812 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
813 if (strcmp(type, ofproto->up.type)) {
814 continue;
815 }
816 sset_add(names, ofproto->up.name);
817 }
818
819 return 0;
abe529af
BP
820}
821
822static int
823del(const char *type, const char *name)
824{
825 struct dpif *dpif;
826 int error;
827
828 error = dpif_open(name, type, &dpif);
829 if (!error) {
830 error = dpif_delete(dpif);
831 dpif_close(dpif);
832 }
833 return error;
834}
835\f
0aeaabc8
JP
836static const char *
837port_open_type(const char *datapath_type, const char *port_type)
838{
839 return dpif_port_open_type(datapath_type, port_type);
840}
841
acf60855
JP
842/* Type functions. */
843
476cb42a
BP
844static struct ofproto_dpif *
845lookup_ofproto_dpif_by_port_name(const char *name)
846{
847 struct ofproto_dpif *ofproto;
848
849 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
850 if (sset_contains(&ofproto->ports, name)) {
851 return ofproto;
852 }
853 }
854
855 return NULL;
856}
857
acf60855
JP
858static int
859type_run(const char *type)
860{
861 struct dpif_backer *backer;
862 char *devname;
863 int error;
864
865 backer = shash_find_data(&all_dpif_backers, type);
866 if (!backer) {
867 /* This is not necessarily a problem, since backers are only
868 * created on demand. */
869 return 0;
870 }
871
872 dpif_run(backer->dpif);
873
2cc3c58e
EJ
874 if (backer->need_revalidate
875 || !tag_set_is_empty(&backer->revalidate_set)) {
876 struct tag_set revalidate_set = backer->revalidate_set;
877 bool need_revalidate = backer->need_revalidate;
878 struct ofproto_dpif *ofproto;
a614d823
KM
879 struct simap_node *node;
880 struct simap tmp_backers;
881
882 /* Handle tunnel garbage collection. */
883 simap_init(&tmp_backers);
884 simap_swap(&backer->tnl_backers, &tmp_backers);
885
886 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
887 struct ofport_dpif *iter;
888
889 if (backer != ofproto->backer) {
890 continue;
891 }
892
893 HMAP_FOR_EACH (iter, up.hmap_node, &ofproto->up.ports) {
894 const char *dp_port;
895
896 if (!iter->tnl_port) {
897 continue;
898 }
899
900 dp_port = netdev_vport_get_dpif_port(iter->up.netdev);
901 node = simap_find(&tmp_backers, dp_port);
902 if (node) {
903 simap_put(&backer->tnl_backers, dp_port, node->data);
904 simap_delete(&tmp_backers, node);
905 node = simap_find(&backer->tnl_backers, dp_port);
906 } else {
907 node = simap_find(&backer->tnl_backers, dp_port);
908 if (!node) {
909 uint32_t odp_port = UINT32_MAX;
910
911 if (!dpif_port_add(backer->dpif, iter->up.netdev,
912 &odp_port)) {
913 simap_put(&backer->tnl_backers, dp_port, odp_port);
914 node = simap_find(&backer->tnl_backers, dp_port);
915 }
916 }
917 }
918
919 iter->odp_port = node ? node->data : OVSP_NONE;
920 if (tnl_port_reconfigure(&iter->up, iter->odp_port,
921 &iter->tnl_port)) {
922 backer->need_revalidate = REV_RECONFIGURE;
923 }
924 }
925 }
926
927 SIMAP_FOR_EACH (node, &tmp_backers) {
928 dpif_port_del(backer->dpif, node->data);
929 }
930 simap_destroy(&tmp_backers);
2cc3c58e
EJ
931
932 switch (backer->need_revalidate) {
933 case REV_RECONFIGURE: COVERAGE_INC(rev_reconfigure); break;
934 case REV_STP: COVERAGE_INC(rev_stp); break;
935 case REV_PORT_TOGGLED: COVERAGE_INC(rev_port_toggled); break;
936 case REV_FLOW_TABLE: COVERAGE_INC(rev_flow_table); break;
937 case REV_INCONSISTENCY: COVERAGE_INC(rev_inconsistency); break;
938 }
939
8f73d537
EJ
940 if (backer->need_revalidate) {
941 /* Clear the drop_keys in case we should now be accepting some
942 * formerly dropped flows. */
943 drop_key_clear(backer);
944 }
945
f728af2e
BP
946 /* Clear the revalidation flags. */
947 tag_set_init(&backer->revalidate_set);
948 backer->need_revalidate = 0;
949
2cc3c58e 950 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
f231418e 951 struct facet *facet, *next;
2cc3c58e
EJ
952
953 if (ofproto->backer != backer) {
954 continue;
955 }
956
f231418e 957 HMAP_FOR_EACH_SAFE (facet, next, hmap_node, &ofproto->facets) {
2cc3c58e
EJ
958 if (need_revalidate
959 || tag_set_intersects(&revalidate_set, facet->tags)) {
960 facet_revalidate(facet);
961 }
962 }
963 }
2cc3c58e
EJ
964 }
965
acf60855
JP
966 if (timer_expired(&backer->next_expiration)) {
967 int delay = expire(backer);
968 timer_set_duration(&backer->next_expiration, delay);
969 }
970
971 /* Check for port changes in the dpif. */
972 while ((error = dpif_port_poll(backer->dpif, &devname)) == 0) {
476cb42a 973 struct ofproto_dpif *ofproto;
acf60855
JP
974 struct dpif_port port;
975
976 /* Don't report on the datapath's device. */
977 if (!strcmp(devname, dpif_base_name(backer->dpif))) {
c83b89ab 978 goto next;
acf60855
JP
979 }
980
b9ad7294
EJ
981 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
982 &all_ofproto_dpifs) {
7d82ab2e 983 if (simap_contains(&ofproto->backer->tnl_backers, devname)) {
b9ad7294
EJ
984 goto next;
985 }
986 }
987
476cb42a 988 ofproto = lookup_ofproto_dpif_by_port_name(devname);
acf60855
JP
989 if (dpif_port_query_by_name(backer->dpif, devname, &port)) {
990 /* The port was removed. If we know the datapath,
991 * report it through poll_set(). If we don't, it may be
992 * notifying us of a removal we initiated, so ignore it.
993 * If there's a pending ENOBUFS, let it stand, since
994 * everything will be reevaluated. */
995 if (ofproto && ofproto->port_poll_errno != ENOBUFS) {
996 sset_add(&ofproto->port_poll_set, devname);
997 ofproto->port_poll_errno = 0;
998 }
acf60855
JP
999 } else if (!ofproto) {
1000 /* The port was added, but we don't know with which
1001 * ofproto we should associate it. Delete it. */
1002 dpif_port_del(backer->dpif, port.port_no);
1003 }
5b5e6a4c 1004 dpif_port_destroy(&port);
acf60855 1005
c83b89ab 1006 next:
acf60855
JP
1007 free(devname);
1008 }
1009
1010 if (error != EAGAIN) {
1011 struct ofproto_dpif *ofproto;
1012
1013 /* There was some sort of error, so propagate it to all
1014 * ofprotos that use this backer. */
1015 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
1016 &all_ofproto_dpifs) {
1017 if (ofproto->backer == backer) {
1018 sset_clear(&ofproto->port_poll_set);
1019 ofproto->port_poll_errno = error;
1020 }
1021 }
1022 }
1023
1024 return 0;
1025}
1026
1027static int
1028type_run_fast(const char *type)
1029{
1030 struct dpif_backer *backer;
1031 unsigned int work;
1032
1033 backer = shash_find_data(&all_dpif_backers, type);
1034 if (!backer) {
1035 /* This is not necessarily a problem, since backers are only
1036 * created on demand. */
1037 return 0;
1038 }
1039
1040 /* Handle one or more batches of upcalls, until there's nothing left to do
1041 * or until we do a fixed total amount of work.
1042 *
1043 * We do work in batches because it can be much cheaper to set up a number
1044 * of flows and fire off their patches all at once. We do multiple batches
1045 * because in some cases handling a packet can cause another packet to be
1046 * queued almost immediately as part of the return flow. Both
1047 * optimizations can make major improvements on some benchmarks and
1048 * presumably for real traffic as well. */
1049 work = 0;
1050 while (work < FLOW_MISS_MAX_BATCH) {
1051 int retval = handle_upcalls(backer, FLOW_MISS_MAX_BATCH - work);
1052 if (retval <= 0) {
1053 return -retval;
1054 }
1055 work += retval;
1056 }
1057
1058 return 0;
1059}
1060
1061static void
1062type_wait(const char *type)
1063{
1064 struct dpif_backer *backer;
1065
1066 backer = shash_find_data(&all_dpif_backers, type);
1067 if (!backer) {
1068 /* This is not necessarily a problem, since backers are only
1069 * created on demand. */
1070 return;
1071 }
1072
1073 timer_wait(&backer->next_expiration);
1074}
1075\f
abe529af
BP
1076/* Basic life-cycle. */
1077
c57b2226
BP
1078static int add_internal_flows(struct ofproto_dpif *);
1079
abe529af
BP
1080static struct ofproto *
1081alloc(void)
1082{
1083 struct ofproto_dpif *ofproto = xmalloc(sizeof *ofproto);
1084 return &ofproto->up;
1085}
1086
1087static void
1088dealloc(struct ofproto *ofproto_)
1089{
1090 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1091 free(ofproto);
1092}
1093
acf60855
JP
1094static void
1095close_dpif_backer(struct dpif_backer *backer)
1096{
1097 struct shash_node *node;
1098
cb22974d 1099 ovs_assert(backer->refcount > 0);
acf60855
JP
1100
1101 if (--backer->refcount) {
1102 return;
1103 }
1104
8f73d537
EJ
1105 drop_key_clear(backer);
1106 hmap_destroy(&backer->drop_keys);
1107
7d82ab2e 1108 simap_destroy(&backer->tnl_backers);
acf60855
JP
1109 hmap_destroy(&backer->odp_to_ofport_map);
1110 node = shash_find(&all_dpif_backers, backer->type);
1111 free(backer->type);
1112 shash_delete(&all_dpif_backers, node);
1113 dpif_close(backer->dpif);
1114
1115 free(backer);
1116}
1117
1118/* Datapath port slated for removal from datapath. */
1119struct odp_garbage {
1120 struct list list_node;
1121 uint32_t odp_port;
1122};
1123
1124static int
1125open_dpif_backer(const char *type, struct dpif_backer **backerp)
1126{
1127 struct dpif_backer *backer;
1128 struct dpif_port_dump port_dump;
1129 struct dpif_port port;
1130 struct shash_node *node;
1131 struct list garbage_list;
1132 struct odp_garbage *garbage, *next;
1133 struct sset names;
1134 char *backer_name;
1135 const char *name;
1136 int error;
1137
1138 backer = shash_find_data(&all_dpif_backers, type);
1139 if (backer) {
1140 backer->refcount++;
1141 *backerp = backer;
1142 return 0;
1143 }
1144
1145 backer_name = xasprintf("ovs-%s", type);
1146
1147 /* Remove any existing datapaths, since we assume we're the only
1148 * userspace controlling the datapath. */
1149 sset_init(&names);
1150 dp_enumerate_names(type, &names);
1151 SSET_FOR_EACH(name, &names) {
1152 struct dpif *old_dpif;
1153
1154 /* Don't remove our backer if it exists. */
1155 if (!strcmp(name, backer_name)) {
1156 continue;
1157 }
1158
1159 if (dpif_open(name, type, &old_dpif)) {
1160 VLOG_WARN("couldn't open old datapath %s to remove it", name);
1161 } else {
1162 dpif_delete(old_dpif);
1163 dpif_close(old_dpif);
1164 }
1165 }
1166 sset_destroy(&names);
1167
1168 backer = xmalloc(sizeof *backer);
1169
1170 error = dpif_create_and_open(backer_name, type, &backer->dpif);
1171 free(backer_name);
1172 if (error) {
1173 VLOG_ERR("failed to open datapath of type %s: %s", type,
1174 strerror(error));
4c1b1289 1175 free(backer);
acf60855
JP
1176 return error;
1177 }
1178
1179 backer->type = xstrdup(type);
1180 backer->refcount = 1;
1181 hmap_init(&backer->odp_to_ofport_map);
8f73d537 1182 hmap_init(&backer->drop_keys);
acf60855 1183 timer_set_duration(&backer->next_expiration, 1000);
2cc3c58e 1184 backer->need_revalidate = 0;
7d82ab2e 1185 simap_init(&backer->tnl_backers);
2cc3c58e 1186 tag_set_init(&backer->revalidate_set);
acf60855
JP
1187 *backerp = backer;
1188
1189 dpif_flow_flush(backer->dpif);
1190
1191 /* Loop through the ports already on the datapath and remove any
1192 * that we don't need anymore. */
1193 list_init(&garbage_list);
1194 dpif_port_dump_start(&port_dump, backer->dpif);
1195 while (dpif_port_dump_next(&port_dump, &port)) {
1196 node = shash_find(&init_ofp_ports, port.name);
1197 if (!node && strcmp(port.name, dpif_base_name(backer->dpif))) {
1198 garbage = xmalloc(sizeof *garbage);
1199 garbage->odp_port = port.port_no;
1200 list_push_front(&garbage_list, &garbage->list_node);
1201 }
1202 }
1203 dpif_port_dump_done(&port_dump);
1204
1205 LIST_FOR_EACH_SAFE (garbage, next, list_node, &garbage_list) {
1206 dpif_port_del(backer->dpif, garbage->odp_port);
1207 list_remove(&garbage->list_node);
1208 free(garbage);
1209 }
1210
1211 shash_add(&all_dpif_backers, type, backer);
1212
1213 error = dpif_recv_set(backer->dpif, true);
1214 if (error) {
1215 VLOG_ERR("failed to listen on datapath of type %s: %s",
1216 type, strerror(error));
1217 close_dpif_backer(backer);
1218 return error;
1219 }
1220
1221 return error;
1222}
1223
abe529af 1224static int
0f5f95a9 1225construct(struct ofproto *ofproto_)
abe529af
BP
1226{
1227 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
acf60855 1228 struct shash_node *node, *next;
91858960 1229 int max_ports;
abe529af
BP
1230 int error;
1231 int i;
1232
acf60855 1233 error = open_dpif_backer(ofproto->up.type, &ofproto->backer);
abe529af 1234 if (error) {
abe529af
BP
1235 return error;
1236 }
1237
acf60855 1238 max_ports = dpif_get_max_ports(ofproto->backer->dpif);
91858960
BP
1239 ofproto_init_max_ports(ofproto_, MIN(max_ports, OFPP_MAX));
1240
6c1491fb 1241 ofproto->n_matches = 0;
abe529af 1242
abe529af
BP
1243 ofproto->netflow = NULL;
1244 ofproto->sflow = NULL;
21f7563c 1245 ofproto->stp = NULL;
abe529af 1246 hmap_init(&ofproto->bundles);
e764773c 1247 ofproto->ml = mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME);
abe529af
BP
1248 for (i = 0; i < MAX_MIRRORS; i++) {
1249 ofproto->mirrors[i] = NULL;
1250 }
1251 ofproto->has_bonded_bundles = false;
1252
abe529af 1253 hmap_init(&ofproto->facets);
b0f7b9b5 1254 hmap_init(&ofproto->subfacets);
9d6ac44e 1255 ofproto->governor = NULL;
4d2a0f39 1256 ofproto->consistency_rl = LLONG_MIN;
54a9cbc9
BP
1257
1258 for (i = 0; i < N_TABLES; i++) {
1259 struct table_dpif *table = &ofproto->tables[i];
1260
1261 table->catchall_table = NULL;
1262 table->other_table = NULL;
1263 table->basis = random_uint32();
1264 }
abe529af 1265
7ee20df1
BP
1266 list_init(&ofproto->completions);
1267
abe529af
BP
1268 ofproto_dpif_unixctl_init();
1269
ccb7c863 1270 ofproto->has_mirrors = false;
daff3353
EJ
1271 ofproto->has_bundle_action = false;
1272
52a90c29
BP
1273 hmap_init(&ofproto->vlandev_map);
1274 hmap_init(&ofproto->realdev_vid_map);
1275
acf60855 1276 sset_init(&ofproto->ports);
0a740f48 1277 sset_init(&ofproto->ghost_ports);
acf60855
JP
1278 sset_init(&ofproto->port_poll_set);
1279 ofproto->port_poll_errno = 0;
1280
1281 SHASH_FOR_EACH_SAFE (node, next, &init_ofp_ports) {
4f9e08a5 1282 struct iface_hint *iface_hint = node->data;
acf60855
JP
1283
1284 if (!strcmp(iface_hint->br_name, ofproto->up.name)) {
1285 /* Check if the datapath already has this port. */
1286 if (dpif_port_exists(ofproto->backer->dpif, node->name)) {
1287 sset_add(&ofproto->ports, node->name);
1288 }
1289
1290 free(iface_hint->br_name);
1291 free(iface_hint->br_type);
4f9e08a5 1292 free(iface_hint);
acf60855
JP
1293 shash_delete(&init_ofp_ports, node);
1294 }
1295 }
e1b1d06a 1296
b44a10b7
BP
1297 hmap_insert(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node,
1298 hash_string(ofproto->up.name, 0));
6527c598 1299 memset(&ofproto->stats, 0, sizeof ofproto->stats);
0f5f95a9
BP
1300
1301 ofproto_init_tables(ofproto_, N_TABLES);
c57b2226
BP
1302 error = add_internal_flows(ofproto);
1303 ofproto->up.tables[TBL_INTERNAL].flags = OFTABLE_HIDDEN | OFTABLE_READONLY;
1304
735d7efb
AZ
1305 ofproto->n_hit = 0;
1306 ofproto->n_missed = 0;
1307
c57b2226
BP
1308 return error;
1309}
1310
1311static int
1312add_internal_flow(struct ofproto_dpif *ofproto, int id,
f25d0cf3 1313 const struct ofpbuf *ofpacts, struct rule_dpif **rulep)
c57b2226
BP
1314{
1315 struct ofputil_flow_mod fm;
1316 int error;
1317
81a76618
BP
1318 match_init_catchall(&fm.match);
1319 fm.priority = 0;
1320 match_set_reg(&fm.match, 0, id);
623e1caf 1321 fm.new_cookie = htonll(0);
c57b2226
BP
1322 fm.cookie = htonll(0);
1323 fm.cookie_mask = htonll(0);
1324 fm.table_id = TBL_INTERNAL;
1325 fm.command = OFPFC_ADD;
1326 fm.idle_timeout = 0;
1327 fm.hard_timeout = 0;
1328 fm.buffer_id = 0;
1329 fm.out_port = 0;
1330 fm.flags = 0;
f25d0cf3
BP
1331 fm.ofpacts = ofpacts->data;
1332 fm.ofpacts_len = ofpacts->size;
c57b2226
BP
1333
1334 error = ofproto_flow_mod(&ofproto->up, &fm);
1335 if (error) {
1336 VLOG_ERR_RL(&rl, "failed to add internal flow %d (%s)",
1337 id, ofperr_to_string(error));
1338 return error;
1339 }
1340
81a76618 1341 *rulep = rule_dpif_lookup__(ofproto, &fm.match.flow, TBL_INTERNAL);
cb22974d 1342 ovs_assert(*rulep != NULL);
0f5f95a9 1343
abe529af
BP
1344 return 0;
1345}
1346
c57b2226
BP
1347static int
1348add_internal_flows(struct ofproto_dpif *ofproto)
1349{
f25d0cf3
BP
1350 struct ofpact_controller *controller;
1351 uint64_t ofpacts_stub[128 / 8];
1352 struct ofpbuf ofpacts;
c57b2226
BP
1353 int error;
1354 int id;
1355
f25d0cf3 1356 ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
c57b2226
BP
1357 id = 1;
1358
f25d0cf3
BP
1359 controller = ofpact_put_CONTROLLER(&ofpacts);
1360 controller->max_len = UINT16_MAX;
1361 controller->controller_id = 0;
1362 controller->reason = OFPR_NO_MATCH;
1363 ofpact_pad(&ofpacts);
1364
1365 error = add_internal_flow(ofproto, id++, &ofpacts, &ofproto->miss_rule);
c57b2226
BP
1366 if (error) {
1367 return error;
1368 }
1369
f25d0cf3
BP
1370 ofpbuf_clear(&ofpacts);
1371 error = add_internal_flow(ofproto, id++, &ofpacts,
c57b2226
BP
1372 &ofproto->no_packet_in_rule);
1373 return error;
1374}
1375
7ee20df1
BP
1376static void
1377complete_operations(struct ofproto_dpif *ofproto)
1378{
1379 struct dpif_completion *c, *next;
1380
1381 LIST_FOR_EACH_SAFE (c, next, list_node, &ofproto->completions) {
1382 ofoperation_complete(c->op, 0);
1383 list_remove(&c->list_node);
1384 free(c);
1385 }
1386}
1387
abe529af
BP
1388static void
1389destruct(struct ofproto *ofproto_)
1390{
1391 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
7ee20df1 1392 struct rule_dpif *rule, *next_rule;
d0918789 1393 struct oftable *table;
abe529af
BP
1394 int i;
1395
b44a10b7 1396 hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node);
7ee20df1
BP
1397 complete_operations(ofproto);
1398
0697b5c3
BP
1399 OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
1400 struct cls_cursor cursor;
1401
d0918789 1402 cls_cursor_init(&cursor, &table->cls, NULL);
0697b5c3
BP
1403 CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
1404 ofproto_rule_destroy(&rule->up);
1405 }
7ee20df1
BP
1406 }
1407
abe529af
BP
1408 for (i = 0; i < MAX_MIRRORS; i++) {
1409 mirror_destroy(ofproto->mirrors[i]);
1410 }
1411
1412 netflow_destroy(ofproto->netflow);
bae473fe 1413 dpif_sflow_destroy(ofproto->sflow);
abe529af
BP
1414 hmap_destroy(&ofproto->bundles);
1415 mac_learning_destroy(ofproto->ml);
1416
1417 hmap_destroy(&ofproto->facets);
b0f7b9b5 1418 hmap_destroy(&ofproto->subfacets);
9d6ac44e 1419 governor_destroy(ofproto->governor);
abe529af 1420
52a90c29
BP
1421 hmap_destroy(&ofproto->vlandev_map);
1422 hmap_destroy(&ofproto->realdev_vid_map);
1423
acf60855 1424 sset_destroy(&ofproto->ports);
0a740f48 1425 sset_destroy(&ofproto->ghost_ports);
acf60855 1426 sset_destroy(&ofproto->port_poll_set);
e1b1d06a 1427
acf60855 1428 close_dpif_backer(ofproto->backer);
abe529af
BP
1429}
1430
1431static int
5fcc0d00 1432run_fast(struct ofproto *ofproto_)
abe529af
BP
1433{
1434 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
0aa66d6e 1435 struct ofport_dpif *ofport;
abe529af 1436
0aa66d6e
EJ
1437 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
1438 port_run_fast(ofport);
1439 }
1440
5fcc0d00
BP
1441 return 0;
1442}
1443
1444static int
1445run(struct ofproto *ofproto_)
1446{
1447 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1448 struct ofport_dpif *ofport;
1449 struct ofbundle *bundle;
1450 int error;
1451
1452 if (!clogged) {
1453 complete_operations(ofproto);
1454 }
5fcc0d00
BP
1455
1456 error = run_fast(ofproto_);
1457 if (error) {
1458 return error;
abe529af
BP
1459 }
1460
abe529af 1461 if (ofproto->netflow) {
6fca1ffb
BP
1462 if (netflow_run(ofproto->netflow)) {
1463 send_netflow_active_timeouts(ofproto);
1464 }
abe529af
BP
1465 }
1466 if (ofproto->sflow) {
bae473fe 1467 dpif_sflow_run(ofproto->sflow);
abe529af
BP
1468 }
1469
1470 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
1471 port_run(ofport);
1472 }
1473 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
1474 bundle_run(bundle);
1475 }
1476
21f7563c 1477 stp_run(ofproto);
2cc3c58e 1478 mac_learning_run(ofproto->ml, &ofproto->backer->revalidate_set);
abe529af 1479
6814e51f 1480 /* Check the consistency of a random facet, to aid debugging. */
4d2a0f39
EJ
1481 if (time_msec() >= ofproto->consistency_rl
1482 && !hmap_is_empty(&ofproto->facets)
2cc3c58e 1483 && !ofproto->backer->need_revalidate) {
6814e51f
BP
1484 struct facet *facet;
1485
4d2a0f39
EJ
1486 ofproto->consistency_rl = time_msec() + 250;
1487
6814e51f
BP
1488 facet = CONTAINER_OF(hmap_random_node(&ofproto->facets),
1489 struct facet, hmap_node);
2cc3c58e
EJ
1490 if (!tag_set_intersects(&ofproto->backer->revalidate_set,
1491 facet->tags)) {
6814e51f 1492 if (!facet_check_consistency(facet)) {
2cc3c58e 1493 ofproto->backer->need_revalidate = REV_INCONSISTENCY;
6814e51f
BP
1494 }
1495 }
1496 }
1497
9d6ac44e
BP
1498 if (ofproto->governor) {
1499 size_t n_subfacets;
1500
1501 governor_run(ofproto->governor);
1502
1503 /* If the governor has shrunk to its minimum size and the number of
1504 * subfacets has dwindled, then drop the governor entirely.
1505 *
1506 * For hysteresis, the number of subfacets to drop the governor is
1507 * smaller than the number needed to trigger its creation. */
1508 n_subfacets = hmap_count(&ofproto->subfacets);
1509 if (n_subfacets * 4 < ofproto->up.flow_eviction_threshold
1510 && governor_is_idle(ofproto->governor)) {
1511 governor_destroy(ofproto->governor);
1512 ofproto->governor = NULL;
1513 }
1514 }
1515
abe529af
BP
1516 return 0;
1517}
1518
1519static void
1520wait(struct ofproto *ofproto_)
1521{
1522 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1523 struct ofport_dpif *ofport;
1524 struct ofbundle *bundle;
1525
7ee20df1
BP
1526 if (!clogged && !list_is_empty(&ofproto->completions)) {
1527 poll_immediate_wake();
1528 }
1529
acf60855
JP
1530 dpif_wait(ofproto->backer->dpif);
1531 dpif_recv_wait(ofproto->backer->dpif);
abe529af 1532 if (ofproto->sflow) {
bae473fe 1533 dpif_sflow_wait(ofproto->sflow);
abe529af 1534 }
2cc3c58e 1535 if (!tag_set_is_empty(&ofproto->backer->revalidate_set)) {
abe529af
BP
1536 poll_immediate_wake();
1537 }
1538 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
1539 port_wait(ofport);
1540 }
1541 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
1542 bundle_wait(bundle);
1543 }
6fca1ffb
BP
1544 if (ofproto->netflow) {
1545 netflow_wait(ofproto->netflow);
1546 }
1c313b88 1547 mac_learning_wait(ofproto->ml);
21f7563c 1548 stp_wait(ofproto);
2cc3c58e 1549 if (ofproto->backer->need_revalidate) {
abe529af
BP
1550 /* Shouldn't happen, but if it does just go around again. */
1551 VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
1552 poll_immediate_wake();
abe529af 1553 }
9d6ac44e
BP
1554 if (ofproto->governor) {
1555 governor_wait(ofproto->governor);
1556 }
abe529af
BP
1557}
1558
0d085684
BP
1559static void
1560get_memory_usage(const struct ofproto *ofproto_, struct simap *usage)
1561{
1562 const struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1563
1564 simap_increase(usage, "facets", hmap_count(&ofproto->facets));
1565 simap_increase(usage, "subfacets", hmap_count(&ofproto->subfacets));
1566}
1567
abe529af
BP
1568static void
1569flush(struct ofproto *ofproto_)
1570{
1571 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
acf60855
JP
1572 struct subfacet *subfacet, *next_subfacet;
1573 struct subfacet *batch[SUBFACET_DESTROY_MAX_BATCH];
1574 int n_batch;
b0f7b9b5 1575
acf60855
JP
1576 n_batch = 0;
1577 HMAP_FOR_EACH_SAFE (subfacet, next_subfacet, hmap_node,
1578 &ofproto->subfacets) {
1579 if (subfacet->path != SF_NOT_INSTALLED) {
1580 batch[n_batch++] = subfacet;
1581 if (n_batch >= SUBFACET_DESTROY_MAX_BATCH) {
1582 subfacet_destroy_batch(ofproto, batch, n_batch);
1583 n_batch = 0;
1584 }
1585 } else {
1586 subfacet_destroy(subfacet);
b0f7b9b5 1587 }
abe529af 1588 }
acf60855
JP
1589
1590 if (n_batch > 0) {
1591 subfacet_destroy_batch(ofproto, batch, n_batch);
1592 }
abe529af
BP
1593}
1594
6c1491fb
BP
1595static void
1596get_features(struct ofproto *ofproto_ OVS_UNUSED,
9e1fd49b 1597 bool *arp_match_ip, enum ofputil_action_bitmap *actions)
6c1491fb
BP
1598{
1599 *arp_match_ip = true;
9e1fd49b
BP
1600 *actions = (OFPUTIL_A_OUTPUT |
1601 OFPUTIL_A_SET_VLAN_VID |
1602 OFPUTIL_A_SET_VLAN_PCP |
1603 OFPUTIL_A_STRIP_VLAN |
1604 OFPUTIL_A_SET_DL_SRC |
1605 OFPUTIL_A_SET_DL_DST |
1606 OFPUTIL_A_SET_NW_SRC |
1607 OFPUTIL_A_SET_NW_DST |
1608 OFPUTIL_A_SET_NW_TOS |
1609 OFPUTIL_A_SET_TP_SRC |
1610 OFPUTIL_A_SET_TP_DST |
1611 OFPUTIL_A_ENQUEUE);
6c1491fb
BP
1612}
1613
1614static void
307975da 1615get_tables(struct ofproto *ofproto_, struct ofp12_table_stats *ots)
6c1491fb
BP
1616{
1617 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
a8d9304d 1618 struct dpif_dp_stats s;
6c1491fb
BP
1619
1620 strcpy(ots->name, "classifier");
1621
acf60855
JP
1622 dpif_get_dp_stats(ofproto->backer->dpif, &s);
1623
307975da
SH
1624 ots->lookup_count = htonll(s.n_hit + s.n_missed);
1625 ots->matched_count = htonll(s.n_hit + ofproto->n_matches);
6c1491fb
BP
1626}
1627
abe529af
BP
1628static struct ofport *
1629port_alloc(void)
1630{
1631 struct ofport_dpif *port = xmalloc(sizeof *port);
1632 return &port->up;
1633}
1634
1635static void
1636port_dealloc(struct ofport *port_)
1637{
1638 struct ofport_dpif *port = ofport_dpif_cast(port_);
1639 free(port);
1640}
1641
1642static int
1643port_construct(struct ofport *port_)
1644{
1645 struct ofport_dpif *port = ofport_dpif_cast(port_);
1646 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
b9ad7294 1647 const struct netdev *netdev = port->up.netdev;
e1b1d06a
JP
1648 struct dpif_port dpif_port;
1649 int error;
abe529af 1650
2cc3c58e 1651 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
1652 port->bundle = NULL;
1653 port->cfm = NULL;
1654 port->tag = tag_create_random();
d5ffa7f2 1655 port->may_enable = true;
21f7563c
JP
1656 port->stp_port = NULL;
1657 port->stp_state = STP_DISABLED;
b9ad7294 1658 port->tnl_port = NULL;
8b36f51e 1659 hmap_init(&port->priorities);
52a90c29
BP
1660 port->realdev_ofp_port = 0;
1661 port->vlandev_vid = 0;
b9ad7294 1662 port->carrier_seq = netdev_get_carrier_resets(netdev);
abe529af 1663
b9ad7294 1664 if (netdev_vport_is_patch(netdev)) {
0a740f48
EJ
1665 /* XXX By bailing out here, we don't do required sFlow work. */
1666 port->odp_port = OVSP_NONE;
1667 return 0;
1668 }
1669
acf60855 1670 error = dpif_port_query_by_name(ofproto->backer->dpif,
b9ad7294 1671 netdev_vport_get_dpif_port(netdev),
e1b1d06a
JP
1672 &dpif_port);
1673 if (error) {
1674 return error;
1675 }
1676
1677 port->odp_port = dpif_port.port_no;
1678
b9ad7294
EJ
1679 if (netdev_get_tunnel_config(netdev)) {
1680 port->tnl_port = tnl_port_add(&port->up, port->odp_port);
1681 } else {
1682 /* Sanity-check that a mapping doesn't already exist. This
1683 * shouldn't happen for non-tunnel ports. */
1684 if (odp_port_to_ofp_port(ofproto, port->odp_port) != OFPP_NONE) {
1685 VLOG_ERR("port %s already has an OpenFlow port number",
1686 dpif_port.name);
da78d43d 1687 dpif_port_destroy(&dpif_port);
b9ad7294
EJ
1688 return EBUSY;
1689 }
e1b1d06a 1690
b9ad7294
EJ
1691 hmap_insert(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node,
1692 hash_int(port->odp_port, 0));
1693 }
da78d43d 1694 dpif_port_destroy(&dpif_port);
e1b1d06a 1695
abe529af 1696 if (ofproto->sflow) {
e1b1d06a 1697 dpif_sflow_add_port(ofproto->sflow, port_, port->odp_port);
abe529af
BP
1698 }
1699
1700 return 0;
1701}
1702
1703static void
1704port_destruct(struct ofport *port_)
1705{
1706 struct ofport_dpif *port = ofport_dpif_cast(port_);
1707 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
b9ad7294 1708 const char *dp_port_name = netdev_vport_get_dpif_port(port->up.netdev);
02f8d646 1709 const char *devname = netdev_get_name(port->up.netdev);
abe529af 1710
a614d823 1711 if (dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
acf60855
JP
1712 /* The underlying device is still there, so delete it. This
1713 * happens when the ofproto is being destroyed, since the caller
1714 * assumes that removal of attached ports will happen as part of
1715 * destruction. */
a614d823
KM
1716 if (!port->tnl_port) {
1717 dpif_port_del(ofproto->backer->dpif, port->odp_port);
1718 }
1719 ofproto->backer->need_revalidate = REV_RECONFIGURE;
acf60855
JP
1720 }
1721
b9ad7294 1722 if (port->odp_port != OVSP_NONE && !port->tnl_port) {
0a740f48
EJ
1723 hmap_remove(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node);
1724 }
1725
b9ad7294 1726 tnl_port_del(port->tnl_port);
02f8d646 1727 sset_find_and_delete(&ofproto->ports, devname);
0a740f48 1728 sset_find_and_delete(&ofproto->ghost_ports, devname);
2cc3c58e 1729 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af 1730 bundle_remove(port_);
a5610457 1731 set_cfm(port_, NULL);
abe529af 1732 if (ofproto->sflow) {
bae473fe 1733 dpif_sflow_del_port(ofproto->sflow, port->odp_port);
abe529af 1734 }
8b36f51e
EJ
1735
1736 ofport_clear_priorities(port);
1737 hmap_destroy(&port->priorities);
abe529af
BP
1738}
1739
1740static void
1741port_modified(struct ofport *port_)
1742{
1743 struct ofport_dpif *port = ofport_dpif_cast(port_);
1744
1745 if (port->bundle && port->bundle->bond) {
1746 bond_slave_set_netdev(port->bundle->bond, port, port->up.netdev);
1747 }
1748}
1749
1750static void
9e1fd49b 1751port_reconfigured(struct ofport *port_, enum ofputil_port_config old_config)
abe529af
BP
1752{
1753 struct ofport_dpif *port = ofport_dpif_cast(port_);
1754 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
9e1fd49b 1755 enum ofputil_port_config changed = old_config ^ port->up.pp.config;
abe529af 1756
9e1fd49b 1757 if (changed & (OFPUTIL_PC_NO_RECV | OFPUTIL_PC_NO_RECV_STP |
c57b2226
BP
1758 OFPUTIL_PC_NO_FWD | OFPUTIL_PC_NO_FLOOD |
1759 OFPUTIL_PC_NO_PACKET_IN)) {
2cc3c58e 1760 ofproto->backer->need_revalidate = REV_RECONFIGURE;
7bde8dd8 1761
9e1fd49b 1762 if (changed & OFPUTIL_PC_NO_FLOOD && port->bundle) {
7bde8dd8
JP
1763 bundle_update(port->bundle);
1764 }
abe529af
BP
1765 }
1766}
1767
1768static int
1769set_sflow(struct ofproto *ofproto_,
1770 const struct ofproto_sflow_options *sflow_options)
1771{
1772 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
bae473fe 1773 struct dpif_sflow *ds = ofproto->sflow;
6ff686f2 1774
abe529af 1775 if (sflow_options) {
bae473fe 1776 if (!ds) {
abe529af
BP
1777 struct ofport_dpif *ofport;
1778
4213f19d 1779 ds = ofproto->sflow = dpif_sflow_create();
abe529af 1780 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
e1b1d06a 1781 dpif_sflow_add_port(ds, &ofport->up, ofport->odp_port);
abe529af 1782 }
2cc3c58e 1783 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af 1784 }
bae473fe 1785 dpif_sflow_set_options(ds, sflow_options);
abe529af 1786 } else {
6ff686f2
PS
1787 if (ds) {
1788 dpif_sflow_destroy(ds);
2cc3c58e 1789 ofproto->backer->need_revalidate = REV_RECONFIGURE;
6ff686f2
PS
1790 ofproto->sflow = NULL;
1791 }
abe529af
BP
1792 }
1793 return 0;
1794}
1795
1796static int
a5610457 1797set_cfm(struct ofport *ofport_, const struct cfm_settings *s)
abe529af
BP
1798{
1799 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1800 int error;
1801
a5610457 1802 if (!s) {
abe529af
BP
1803 error = 0;
1804 } else {
1805 if (!ofport->cfm) {
8c977421
EJ
1806 struct ofproto_dpif *ofproto;
1807
1808 ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2cc3c58e 1809 ofproto->backer->need_revalidate = REV_RECONFIGURE;
6f629657 1810 ofport->cfm = cfm_create(netdev_get_name(ofport->up.netdev));
abe529af
BP
1811 }
1812
a5610457 1813 if (cfm_configure(ofport->cfm, s)) {
abe529af
BP
1814 return 0;
1815 }
1816
1817 error = EINVAL;
1818 }
1819 cfm_destroy(ofport->cfm);
1820 ofport->cfm = NULL;
1821 return error;
1822}
1823
9a9e3786
BP
1824static bool
1825get_cfm_status(const struct ofport *ofport_,
1826 struct ofproto_cfm_status *status)
1de11730
EJ
1827{
1828 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1829
1830 if (ofport->cfm) {
9a9e3786
BP
1831 status->faults = cfm_get_fault(ofport->cfm);
1832 status->remote_opstate = cfm_get_opup(ofport->cfm);
1833 status->health = cfm_get_health(ofport->cfm);
1834 cfm_get_remote_mpids(ofport->cfm, &status->rmps, &status->n_rmps);
1835 return true;
1de11730 1836 } else {
9a9e3786 1837 return false;
1de11730
EJ
1838 }
1839}
abe529af 1840\f
21f7563c
JP
1841/* Spanning Tree. */
1842
1843static void
1844send_bpdu_cb(struct ofpbuf *pkt, int port_num, void *ofproto_)
1845{
1846 struct ofproto_dpif *ofproto = ofproto_;
1847 struct stp_port *sp = stp_get_port(ofproto->stp, port_num);
1848 struct ofport_dpif *ofport;
1849
1850 ofport = stp_port_get_aux(sp);
1851 if (!ofport) {
1852 VLOG_WARN_RL(&rl, "%s: cannot send BPDU on unknown port %d",
1853 ofproto->up.name, port_num);
1854 } else {
1855 struct eth_header *eth = pkt->l2;
1856
1857 netdev_get_etheraddr(ofport->up.netdev, eth->eth_src);
1858 if (eth_addr_is_zero(eth->eth_src)) {
1859 VLOG_WARN_RL(&rl, "%s: cannot send BPDU on port %d "
1860 "with unknown MAC", ofproto->up.name, port_num);
1861 } else {
97d6520b 1862 send_packet(ofport, pkt);
21f7563c
JP
1863 }
1864 }
1865 ofpbuf_delete(pkt);
1866}
1867
1868/* Configures STP on 'ofproto_' using the settings defined in 's'. */
1869static int
1870set_stp(struct ofproto *ofproto_, const struct ofproto_stp_settings *s)
1871{
1872 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1873
1874 /* Only revalidate flows if the configuration changed. */
1875 if (!s != !ofproto->stp) {
2cc3c58e 1876 ofproto->backer->need_revalidate = REV_RECONFIGURE;
21f7563c
JP
1877 }
1878
1879 if (s) {
1880 if (!ofproto->stp) {
1881 ofproto->stp = stp_create(ofproto_->name, s->system_id,
1882 send_bpdu_cb, ofproto);
1883 ofproto->stp_last_tick = time_msec();
1884 }
1885
1886 stp_set_bridge_id(ofproto->stp, s->system_id);
1887 stp_set_bridge_priority(ofproto->stp, s->priority);
1888 stp_set_hello_time(ofproto->stp, s->hello_time);
1889 stp_set_max_age(ofproto->stp, s->max_age);
1890 stp_set_forward_delay(ofproto->stp, s->fwd_delay);
1891 } else {
851bf71d
EJ
1892 struct ofport *ofport;
1893
1894 HMAP_FOR_EACH (ofport, hmap_node, &ofproto->up.ports) {
1895 set_stp_port(ofport, NULL);
1896 }
1897
21f7563c
JP
1898 stp_destroy(ofproto->stp);
1899 ofproto->stp = NULL;
1900 }
1901
1902 return 0;
1903}
1904
1905static int
1906get_stp_status(struct ofproto *ofproto_, struct ofproto_stp_status *s)
1907{
1908 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1909
1910 if (ofproto->stp) {
1911 s->enabled = true;
1912 s->bridge_id = stp_get_bridge_id(ofproto->stp);
1913 s->designated_root = stp_get_designated_root(ofproto->stp);
1914 s->root_path_cost = stp_get_root_path_cost(ofproto->stp);
1915 } else {
1916 s->enabled = false;
1917 }
1918
1919 return 0;
1920}
1921
1922static void
1923update_stp_port_state(struct ofport_dpif *ofport)
1924{
1925 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
1926 enum stp_state state;
1927
1928 /* Figure out new state. */
1929 state = ofport->stp_port ? stp_port_get_state(ofport->stp_port)
1930 : STP_DISABLED;
1931
1932 /* Update state. */
1933 if (ofport->stp_state != state) {
9e1fd49b 1934 enum ofputil_port_state of_state;
21f7563c
JP
1935 bool fwd_change;
1936
1937 VLOG_DBG_RL(&rl, "port %s: STP state changed from %s to %s",
1938 netdev_get_name(ofport->up.netdev),
1939 stp_state_name(ofport->stp_state),
1940 stp_state_name(state));
1941 if (stp_learn_in_state(ofport->stp_state)
1942 != stp_learn_in_state(state)) {
1943 /* xxx Learning action flows should also be flushed. */
2cc3c58e
EJ
1944 mac_learning_flush(ofproto->ml,
1945 &ofproto->backer->revalidate_set);
21f7563c
JP
1946 }
1947 fwd_change = stp_forward_in_state(ofport->stp_state)
1948 != stp_forward_in_state(state);
1949
2cc3c58e 1950 ofproto->backer->need_revalidate = REV_STP;
21f7563c
JP
1951 ofport->stp_state = state;
1952 ofport->stp_state_entered = time_msec();
1953
b308140a 1954 if (fwd_change && ofport->bundle) {
21f7563c
JP
1955 bundle_update(ofport->bundle);
1956 }
1957
1958 /* Update the STP state bits in the OpenFlow port description. */
9e1fd49b
BP
1959 of_state = ofport->up.pp.state & ~OFPUTIL_PS_STP_MASK;
1960 of_state |= (state == STP_LISTENING ? OFPUTIL_PS_STP_LISTEN
1961 : state == STP_LEARNING ? OFPUTIL_PS_STP_LEARN
1962 : state == STP_FORWARDING ? OFPUTIL_PS_STP_FORWARD
1963 : state == STP_BLOCKING ? OFPUTIL_PS_STP_BLOCK
1964 : 0);
21f7563c
JP
1965 ofproto_port_set_state(&ofport->up, of_state);
1966 }
1967}
1968
1969/* Configures STP on 'ofport_' using the settings defined in 's'. The
1970 * caller is responsible for assigning STP port numbers and ensuring
1971 * there are no duplicates. */
1972static int
1973set_stp_port(struct ofport *ofport_,
1974 const struct ofproto_port_stp_settings *s)
1975{
1976 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1977 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
1978 struct stp_port *sp = ofport->stp_port;
1979
1980 if (!s || !s->enable) {
1981 if (sp) {
1982 ofport->stp_port = NULL;
1983 stp_port_disable(sp);
ecd12731 1984 update_stp_port_state(ofport);
21f7563c
JP
1985 }
1986 return 0;
1987 } else if (sp && stp_port_no(sp) != s->port_num
1988 && ofport == stp_port_get_aux(sp)) {
1989 /* The port-id changed, so disable the old one if it's not
1990 * already in use by another port. */
1991 stp_port_disable(sp);
1992 }
1993
1994 sp = ofport->stp_port = stp_get_port(ofproto->stp, s->port_num);
1995 stp_port_enable(sp);
1996
1997 stp_port_set_aux(sp, ofport);
1998 stp_port_set_priority(sp, s->priority);
1999 stp_port_set_path_cost(sp, s->path_cost);
2000
2001 update_stp_port_state(ofport);
2002
2003 return 0;
2004}
2005
2006static int
2007get_stp_port_status(struct ofport *ofport_,
2008 struct ofproto_port_stp_status *s)
2009{
2010 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2011 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2012 struct stp_port *sp = ofport->stp_port;
2013
2014 if (!ofproto->stp || !sp) {
2015 s->enabled = false;
2016 return 0;
2017 }
2018
2019 s->enabled = true;
2020 s->port_id = stp_port_get_id(sp);
2021 s->state = stp_port_get_state(sp);
2022 s->sec_in_state = (time_msec() - ofport->stp_state_entered) / 1000;
2023 s->role = stp_port_get_role(sp);
80740385 2024 stp_port_get_counts(sp, &s->tx_count, &s->rx_count, &s->error_count);
21f7563c
JP
2025
2026 return 0;
2027}
2028
2029static void
2030stp_run(struct ofproto_dpif *ofproto)
2031{
2032 if (ofproto->stp) {
2033 long long int now = time_msec();
2034 long long int elapsed = now - ofproto->stp_last_tick;
2035 struct stp_port *sp;
2036
2037 if (elapsed > 0) {
2038 stp_tick(ofproto->stp, MIN(INT_MAX, elapsed));
2039 ofproto->stp_last_tick = now;
2040 }
2041 while (stp_get_changed_port(ofproto->stp, &sp)) {
2042 struct ofport_dpif *ofport = stp_port_get_aux(sp);
2043
2044 if (ofport) {
2045 update_stp_port_state(ofport);
2046 }
2047 }
6ae50723
EJ
2048
2049 if (stp_check_and_reset_fdb_flush(ofproto->stp)) {
2cc3c58e 2050 mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
6ae50723 2051 }
21f7563c
JP
2052 }
2053}
2054
2055static void
2056stp_wait(struct ofproto_dpif *ofproto)
2057{
2058 if (ofproto->stp) {
2059 poll_timer_wait(1000);
2060 }
2061}
2062
2063/* Returns true if STP should process 'flow'. */
2064static bool
2065stp_should_process_flow(const struct flow *flow)
2066{
2067 return eth_addr_equals(flow->dl_dst, eth_addr_stp);
2068}
2069
2070static void
2071stp_process_packet(const struct ofport_dpif *ofport,
2072 const struct ofpbuf *packet)
2073{
2074 struct ofpbuf payload = *packet;
2075 struct eth_header *eth = payload.data;
2076 struct stp_port *sp = ofport->stp_port;
2077
2078 /* Sink packets on ports that have STP disabled when the bridge has
2079 * STP enabled. */
2080 if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
2081 return;
2082 }
2083
2084 /* Trim off padding on payload. */
c573540b
BP
2085 if (payload.size > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
2086 payload.size = ntohs(eth->eth_type) + ETH_HEADER_LEN;
21f7563c
JP
2087 }
2088
2089 if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
2090 stp_received_bpdu(sp, payload.data, payload.size);
2091 }
2092}
2093\f
8b36f51e
EJ
2094static struct priority_to_dscp *
2095get_priority(const struct ofport_dpif *ofport, uint32_t priority)
2096{
2097 struct priority_to_dscp *pdscp;
2098 uint32_t hash;
2099
2100 hash = hash_int(priority, 0);
2101 HMAP_FOR_EACH_IN_BUCKET (pdscp, hmap_node, hash, &ofport->priorities) {
2102 if (pdscp->priority == priority) {
2103 return pdscp;
2104 }
2105 }
2106 return NULL;
2107}
2108
2109static void
2110ofport_clear_priorities(struct ofport_dpif *ofport)
2111{
2112 struct priority_to_dscp *pdscp, *next;
2113
2114 HMAP_FOR_EACH_SAFE (pdscp, next, hmap_node, &ofport->priorities) {
2115 hmap_remove(&ofport->priorities, &pdscp->hmap_node);
2116 free(pdscp);
2117 }
2118}
2119
2120static int
2121set_queues(struct ofport *ofport_,
2122 const struct ofproto_port_queue *qdscp_list,
2123 size_t n_qdscp)
2124{
2125 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2126 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2127 struct hmap new = HMAP_INITIALIZER(&new);
2128 size_t i;
2129
2130 for (i = 0; i < n_qdscp; i++) {
2131 struct priority_to_dscp *pdscp;
2132 uint32_t priority;
2133 uint8_t dscp;
2134
2135 dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
acf60855 2136 if (dpif_queue_to_priority(ofproto->backer->dpif, qdscp_list[i].queue,
8b36f51e
EJ
2137 &priority)) {
2138 continue;
2139 }
2140
2141 pdscp = get_priority(ofport, priority);
2142 if (pdscp) {
2143 hmap_remove(&ofport->priorities, &pdscp->hmap_node);
2144 } else {
2145 pdscp = xmalloc(sizeof *pdscp);
2146 pdscp->priority = priority;
2147 pdscp->dscp = dscp;
2cc3c58e 2148 ofproto->backer->need_revalidate = REV_RECONFIGURE;
8b36f51e
EJ
2149 }
2150
2151 if (pdscp->dscp != dscp) {
2152 pdscp->dscp = dscp;
2cc3c58e 2153 ofproto->backer->need_revalidate = REV_RECONFIGURE;
8b36f51e
EJ
2154 }
2155
2156 hmap_insert(&new, &pdscp->hmap_node, hash_int(pdscp->priority, 0));
2157 }
2158
2159 if (!hmap_is_empty(&ofport->priorities)) {
2160 ofport_clear_priorities(ofport);
2cc3c58e 2161 ofproto->backer->need_revalidate = REV_RECONFIGURE;
8b36f51e
EJ
2162 }
2163
2164 hmap_swap(&new, &ofport->priorities);
2165 hmap_destroy(&new);
2166
2167 return 0;
2168}
2169\f
abe529af
BP
2170/* Bundles. */
2171
b44a10b7
BP
2172/* Expires all MAC learning entries associated with 'bundle' and forces its
2173 * ofproto to revalidate every flow.
2174 *
2175 * Normally MAC learning entries are removed only from the ofproto associated
2176 * with 'bundle', but if 'all_ofprotos' is true, then the MAC learning entries
2177 * are removed from every ofproto. When patch ports and SLB bonds are in use
2178 * and a VM migration happens and the gratuitous ARPs are somehow lost, this
2179 * avoids a MAC_ENTRY_IDLE_TIME delay before the migrated VM can communicate
2180 * with the host from which it migrated. */
abe529af 2181static void
b44a10b7 2182bundle_flush_macs(struct ofbundle *bundle, bool all_ofprotos)
abe529af
BP
2183{
2184 struct ofproto_dpif *ofproto = bundle->ofproto;
2185 struct mac_learning *ml = ofproto->ml;
2186 struct mac_entry *mac, *next_mac;
2187
2cc3c58e 2188 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
2189 LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
2190 if (mac->port.p == bundle) {
b44a10b7
BP
2191 if (all_ofprotos) {
2192 struct ofproto_dpif *o;
2193
2194 HMAP_FOR_EACH (o, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
2195 if (o != ofproto) {
2196 struct mac_entry *e;
2197
2198 e = mac_learning_lookup(o->ml, mac->mac, mac->vlan,
2199 NULL);
2200 if (e) {
b44a10b7
BP
2201 mac_learning_expire(o->ml, e);
2202 }
2203 }
2204 }
2205 }
2206
abe529af
BP
2207 mac_learning_expire(ml, mac);
2208 }
2209 }
2210}
2211
2212static struct ofbundle *
2213bundle_lookup(const struct ofproto_dpif *ofproto, void *aux)
2214{
2215 struct ofbundle *bundle;
2216
2217 HMAP_FOR_EACH_IN_BUCKET (bundle, hmap_node, hash_pointer(aux, 0),
2218 &ofproto->bundles) {
2219 if (bundle->aux == aux) {
2220 return bundle;
2221 }
2222 }
2223 return NULL;
2224}
2225
2226/* Looks up each of the 'n_auxes' pointers in 'auxes' as bundles and adds the
2227 * ones that are found to 'bundles'. */
2228static void
2229bundle_lookup_multiple(struct ofproto_dpif *ofproto,
2230 void **auxes, size_t n_auxes,
2231 struct hmapx *bundles)
2232{
2233 size_t i;
2234
2235 hmapx_init(bundles);
2236 for (i = 0; i < n_auxes; i++) {
2237 struct ofbundle *bundle = bundle_lookup(ofproto, auxes[i]);
2238 if (bundle) {
2239 hmapx_add(bundles, bundle);
2240 }
2241 }
2242}
2243
7bde8dd8
JP
2244static void
2245bundle_update(struct ofbundle *bundle)
2246{
2247 struct ofport_dpif *port;
2248
2249 bundle->floodable = true;
2250 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
9e1fd49b
BP
2251 if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
2252 || !stp_forward_in_state(port->stp_state)) {
7bde8dd8
JP
2253 bundle->floodable = false;
2254 break;
2255 }
2256 }
2257}
2258
abe529af
BP
2259static void
2260bundle_del_port(struct ofport_dpif *port)
2261{
2262 struct ofbundle *bundle = port->bundle;
2263
2cc3c58e 2264 bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
6f77f4ae 2265
abe529af
BP
2266 list_remove(&port->bundle_node);
2267 port->bundle = NULL;
2268
2269 if (bundle->lacp) {
2270 lacp_slave_unregister(bundle->lacp, port);
2271 }
2272 if (bundle->bond) {
2273 bond_slave_unregister(bundle->bond, port);
2274 }
2275
7bde8dd8 2276 bundle_update(bundle);
abe529af
BP
2277}
2278
2279static bool
2280bundle_add_port(struct ofbundle *bundle, uint32_t ofp_port,
df53d41c 2281 struct lacp_slave_settings *lacp)
abe529af
BP
2282{
2283 struct ofport_dpif *port;
2284
2285 port = get_ofp_port(bundle->ofproto, ofp_port);
2286 if (!port) {
2287 return false;
2288 }
2289
2290 if (port->bundle != bundle) {
2cc3c58e 2291 bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
2292 if (port->bundle) {
2293 bundle_del_port(port);
2294 }
2295
2296 port->bundle = bundle;
2297 list_push_back(&bundle->ports, &port->bundle_node);
9e1fd49b
BP
2298 if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
2299 || !stp_forward_in_state(port->stp_state)) {
abe529af
BP
2300 bundle->floodable = false;
2301 }
2302 }
2303 if (lacp) {
2cc3c58e 2304 bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
2305 lacp_slave_register(bundle->lacp, port, lacp);
2306 }
2307
2308 return true;
2309}
2310
2311static void
2312bundle_destroy(struct ofbundle *bundle)
2313{
2314 struct ofproto_dpif *ofproto;
2315 struct ofport_dpif *port, *next_port;
2316 int i;
2317
2318 if (!bundle) {
2319 return;
2320 }
2321
2322 ofproto = bundle->ofproto;
2323 for (i = 0; i < MAX_MIRRORS; i++) {
2324 struct ofmirror *m = ofproto->mirrors[i];
2325 if (m) {
2326 if (m->out == bundle) {
2327 mirror_destroy(m);
2328 } else if (hmapx_find_and_delete(&m->srcs, bundle)
2329 || hmapx_find_and_delete(&m->dsts, bundle)) {
2cc3c58e 2330 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
2331 }
2332 }
2333 }
2334
2335 LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) {
2336 bundle_del_port(port);
2337 }
2338
b44a10b7 2339 bundle_flush_macs(bundle, true);
abe529af
BP
2340 hmap_remove(&ofproto->bundles, &bundle->hmap_node);
2341 free(bundle->name);
2342 free(bundle->trunks);
2343 lacp_destroy(bundle->lacp);
2344 bond_destroy(bundle->bond);
2345 free(bundle);
2346}
2347
2348static int
2349bundle_set(struct ofproto *ofproto_, void *aux,
2350 const struct ofproto_bundle_settings *s)
2351{
2352 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2353 bool need_flush = false;
abe529af
BP
2354 struct ofport_dpif *port;
2355 struct ofbundle *bundle;
ecac4ebf
BP
2356 unsigned long *trunks;
2357 int vlan;
abe529af
BP
2358 size_t i;
2359 bool ok;
2360
2361 if (!s) {
2362 bundle_destroy(bundle_lookup(ofproto, aux));
2363 return 0;
2364 }
2365
cb22974d
BP
2366 ovs_assert(s->n_slaves == 1 || s->bond != NULL);
2367 ovs_assert((s->lacp != NULL) == (s->lacp_slaves != NULL));
abe529af
BP
2368
2369 bundle = bundle_lookup(ofproto, aux);
2370 if (!bundle) {
2371 bundle = xmalloc(sizeof *bundle);
2372
2373 bundle->ofproto = ofproto;
2374 hmap_insert(&ofproto->bundles, &bundle->hmap_node,
2375 hash_pointer(aux, 0));
2376 bundle->aux = aux;
2377 bundle->name = NULL;
2378
2379 list_init(&bundle->ports);
ecac4ebf 2380 bundle->vlan_mode = PORT_VLAN_TRUNK;
abe529af
BP
2381 bundle->vlan = -1;
2382 bundle->trunks = NULL;
5e9ceccd 2383 bundle->use_priority_tags = s->use_priority_tags;
abe529af
BP
2384 bundle->lacp = NULL;
2385 bundle->bond = NULL;
2386
2387 bundle->floodable = true;
2388
2389 bundle->src_mirrors = 0;
2390 bundle->dst_mirrors = 0;
2391 bundle->mirror_out = 0;
2392 }
2393
2394 if (!bundle->name || strcmp(s->name, bundle->name)) {
2395 free(bundle->name);
2396 bundle->name = xstrdup(s->name);
2397 }
2398
2399 /* LACP. */
2400 if (s->lacp) {
2401 if (!bundle->lacp) {
2cc3c58e 2402 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
2403 bundle->lacp = lacp_create();
2404 }
2405 lacp_configure(bundle->lacp, s->lacp);
2406 } else {
2407 lacp_destroy(bundle->lacp);
2408 bundle->lacp = NULL;
2409 }
2410
2411 /* Update set of ports. */
2412 ok = true;
2413 for (i = 0; i < s->n_slaves; i++) {
2414 if (!bundle_add_port(bundle, s->slaves[i],
df53d41c 2415 s->lacp ? &s->lacp_slaves[i] : NULL)) {
abe529af
BP
2416 ok = false;
2417 }
2418 }
2419 if (!ok || list_size(&bundle->ports) != s->n_slaves) {
2420 struct ofport_dpif *next_port;
2421
2422 LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) {
2423 for (i = 0; i < s->n_slaves; i++) {
56c769ab 2424 if (s->slaves[i] == port->up.ofp_port) {
abe529af
BP
2425 goto found;
2426 }
2427 }
2428
2429 bundle_del_port(port);
2430 found: ;
2431 }
2432 }
cb22974d 2433 ovs_assert(list_size(&bundle->ports) <= s->n_slaves);
abe529af
BP
2434
2435 if (list_is_empty(&bundle->ports)) {
2436 bundle_destroy(bundle);
2437 return EINVAL;
2438 }
2439
ecac4ebf 2440 /* Set VLAN tagging mode */
5e9ceccd
BP
2441 if (s->vlan_mode != bundle->vlan_mode
2442 || s->use_priority_tags != bundle->use_priority_tags) {
ecac4ebf 2443 bundle->vlan_mode = s->vlan_mode;
5e9ceccd 2444 bundle->use_priority_tags = s->use_priority_tags;
ecac4ebf
BP
2445 need_flush = true;
2446 }
2447
abe529af 2448 /* Set VLAN tag. */
ecac4ebf
BP
2449 vlan = (s->vlan_mode == PORT_VLAN_TRUNK ? -1
2450 : s->vlan >= 0 && s->vlan <= 4095 ? s->vlan
2451 : 0);
2452 if (vlan != bundle->vlan) {
2453 bundle->vlan = vlan;
abe529af
BP
2454 need_flush = true;
2455 }
2456
2457 /* Get trunked VLANs. */
ecac4ebf
BP
2458 switch (s->vlan_mode) {
2459 case PORT_VLAN_ACCESS:
2460 trunks = NULL;
2461 break;
2462
2463 case PORT_VLAN_TRUNK:
ebc56baa 2464 trunks = CONST_CAST(unsigned long *, s->trunks);
ecac4ebf
BP
2465 break;
2466
2467 case PORT_VLAN_NATIVE_UNTAGGED:
2468 case PORT_VLAN_NATIVE_TAGGED:
2469 if (vlan != 0 && (!s->trunks
2470 || !bitmap_is_set(s->trunks, vlan)
2471 || bitmap_is_set(s->trunks, 0))) {
2472 /* Force trunking the native VLAN and prohibit trunking VLAN 0. */
2473 if (s->trunks) {
2474 trunks = bitmap_clone(s->trunks, 4096);
2475 } else {
2476 trunks = bitmap_allocate1(4096);
2477 }
2478 bitmap_set1(trunks, vlan);
2479 bitmap_set0(trunks, 0);
2480 } else {
ebc56baa 2481 trunks = CONST_CAST(unsigned long *, s->trunks);
ecac4ebf
BP
2482 }
2483 break;
2484
2485 default:
2486 NOT_REACHED();
2487 }
abe529af
BP
2488 if (!vlan_bitmap_equal(trunks, bundle->trunks)) {
2489 free(bundle->trunks);
ecac4ebf
BP
2490 if (trunks == s->trunks) {
2491 bundle->trunks = vlan_bitmap_clone(trunks);
2492 } else {
2493 bundle->trunks = trunks;
2494 trunks = NULL;
2495 }
abe529af
BP
2496 need_flush = true;
2497 }
ecac4ebf
BP
2498 if (trunks != s->trunks) {
2499 free(trunks);
2500 }
abe529af
BP
2501
2502 /* Bonding. */
2503 if (!list_is_short(&bundle->ports)) {
2504 bundle->ofproto->has_bonded_bundles = true;
2505 if (bundle->bond) {
2506 if (bond_reconfigure(bundle->bond, s->bond)) {
2cc3c58e 2507 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
2508 }
2509 } else {
2510 bundle->bond = bond_create(s->bond);
2cc3c58e 2511 ofproto->backer->need_revalidate = REV_RECONFIGURE;
abe529af
BP
2512 }
2513
2514 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
df53d41c 2515 bond_slave_register(bundle->bond, port, port->up.netdev);
abe529af
BP
2516 }
2517 } else {
2518 bond_destroy(bundle->bond);
2519 bundle->bond = NULL;
2520 }
2521
2522 /* If we changed something that would affect MAC learning, un-learn
2523 * everything on this port and force flow revalidation. */
2524 if (need_flush) {
b44a10b7 2525 bundle_flush_macs(bundle, false);
abe529af
BP
2526 }
2527
2528 return 0;
2529}
2530
2531static void
2532bundle_remove(struct ofport *port_)
2533{
2534 struct ofport_dpif *port = ofport_dpif_cast(port_);
2535 struct ofbundle *bundle = port->bundle;
2536
2537 if (bundle) {
2538 bundle_del_port(port);
2539 if (list_is_empty(&bundle->ports)) {
2540 bundle_destroy(bundle);
2541 } else if (list_is_short(&bundle->ports)) {
2542 bond_destroy(bundle->bond);
2543 bundle->bond = NULL;
2544 }
2545 }
2546}
2547
2548static void
5f877369 2549send_pdu_cb(void *port_, const void *pdu, size_t pdu_size)
abe529af
BP
2550{
2551 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 10);
2552 struct ofport_dpif *port = port_;
2553 uint8_t ea[ETH_ADDR_LEN];
2554 int error;
2555
2556 error = netdev_get_etheraddr(port->up.netdev, ea);
2557 if (!error) {
abe529af 2558 struct ofpbuf packet;
5f877369 2559 void *packet_pdu;
abe529af
BP
2560
2561 ofpbuf_init(&packet, 0);
2562 packet_pdu = eth_compose(&packet, eth_addr_lacp, ea, ETH_TYPE_LACP,
5f877369
EJ
2563 pdu_size);
2564 memcpy(packet_pdu, pdu, pdu_size);
2565
97d6520b 2566 send_packet(port, &packet);
abe529af
BP
2567 ofpbuf_uninit(&packet);
2568 } else {
2569 VLOG_ERR_RL(&rl, "port %s: cannot obtain Ethernet address of iface "
2570 "%s (%s)", port->bundle->name,
2571 netdev_get_name(port->up.netdev), strerror(error));
2572 }
2573}
2574
2575static void
2576bundle_send_learning_packets(struct ofbundle *bundle)
2577{
2578 struct ofproto_dpif *ofproto = bundle->ofproto;
2579 int error, n_packets, n_errors;
2580 struct mac_entry *e;
2581
2582 error = n_packets = n_errors = 0;
2583 LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
2584 if (e->port.p != bundle) {
ea131871
JG
2585 struct ofpbuf *learning_packet;
2586 struct ofport_dpif *port;
4dd1e3ca 2587 void *port_void;
ea131871
JG
2588 int ret;
2589
4dd1e3ca
BP
2590 /* The assignment to "port" is unnecessary but makes "grep"ing for
2591 * struct ofport_dpif more effective. */
2592 learning_packet = bond_compose_learning_packet(bundle->bond,
2593 e->mac, e->vlan,
2594 &port_void);
2595 port = port_void;
97d6520b 2596 ret = send_packet(port, learning_packet);
ea131871 2597 ofpbuf_delete(learning_packet);
abe529af
BP
2598 if (ret) {
2599 error = ret;
2600 n_errors++;
2601 }
2602 n_packets++;
2603 }
2604 }
2605
2606 if (n_errors) {
2607 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2608 VLOG_WARN_RL(&rl, "bond %s: %d errors sending %d gratuitous learning "
2609 "packets, last error was: %s",
2610 bundle->name, n_errors, n_packets, strerror(error));
2611 } else {
2612 VLOG_DBG("bond %s: sent %d gratuitous learning packets",
2613 bundle->name, n_packets);
2614 }
2615}
2616
2617static void
2618bundle_run(struct ofbundle *bundle)
2619{
2620 if (bundle->lacp) {
2621 lacp_run(bundle->lacp, send_pdu_cb);
2622 }
2623 if (bundle->bond) {
2624 struct ofport_dpif *port;
2625
2626 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
015e08bc 2627 bond_slave_set_may_enable(bundle->bond, port, port->may_enable);
abe529af
BP
2628 }
2629
2cc3c58e 2630 bond_run(bundle->bond, &bundle->ofproto->backer->revalidate_set,
bdebeece 2631 lacp_status(bundle->lacp));
abe529af
BP
2632 if (bond_should_send_learning_packets(bundle->bond)) {
2633 bundle_send_learning_packets(bundle);
2634 }
2635 }
2636}
2637
2638static void
2639bundle_wait(struct ofbundle *bundle)
2640{
2641 if (bundle->lacp) {
2642 lacp_wait(bundle->lacp);
2643 }
2644 if (bundle->bond) {
2645 bond_wait(bundle->bond);
2646 }
2647}
2648\f
2649/* Mirrors. */
2650
2651static int
2652mirror_scan(struct ofproto_dpif *ofproto)
2653{
2654 int idx;
2655
2656 for (idx = 0; idx < MAX_MIRRORS; idx++) {
2657 if (!ofproto->mirrors[idx]) {
2658 return idx;
2659 }
2660 }
2661 return -1;
2662}
2663
2664static struct ofmirror *
2665mirror_lookup(struct ofproto_dpif *ofproto, void *aux)
2666{
2667 int i;
2668
2669 for (i = 0; i < MAX_MIRRORS; i++) {
2670 struct ofmirror *mirror = ofproto->mirrors[i];
2671 if (mirror && mirror->aux == aux) {
2672 return mirror;
2673 }
2674 }
2675
2676 return NULL;
2677}
2678
9ba15e2a
BP
2679/* Update the 'dup_mirrors' member of each of the ofmirrors in 'ofproto'. */
2680static void
2681mirror_update_dups(struct ofproto_dpif *ofproto)
2682{
2683 int i;
2684
2685 for (i = 0; i < MAX_MIRRORS; i++) {
2686 struct ofmirror *m = ofproto->mirrors[i];
2687
2688 if (m) {
2689 m->dup_mirrors = MIRROR_MASK_C(1) << i;
2690 }
2691 }
2692
2693 for (i = 0; i < MAX_MIRRORS; i++) {
2694 struct ofmirror *m1 = ofproto->mirrors[i];
2695 int j;
2696
2697 if (!m1) {
2698 continue;
2699 }
2700
2701 for (j = i + 1; j < MAX_MIRRORS; j++) {
2702 struct ofmirror *m2 = ofproto->mirrors[j];
2703
edb0540b 2704 if (m2 && m1->out == m2->out && m1->out_vlan == m2->out_vlan) {
9ba15e2a
BP
2705 m1->dup_mirrors |= MIRROR_MASK_C(1) << j;
2706 m2->dup_mirrors |= m1->dup_mirrors;
2707 }
2708 }
2709 }
2710}
2711
abe529af
BP
2712static int
2713mirror_set(struct ofproto *ofproto_, void *aux,
2714 const struct ofproto_mirror_settings *s)
2715{
2716 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2717 mirror_mask_t mirror_bit;
2718 struct ofbundle *bundle;
2719 struct ofmirror *mirror;
2720 struct ofbundle *out;
2721 struct hmapx srcs; /* Contains "struct ofbundle *"s. */
2722 struct hmapx dsts; /* Contains "struct ofbundle *"s. */
2723 int out_vlan;
2724
2725 mirror = mirror_lookup(ofproto, aux);
2726 if (!s) {
2727 mirror_destroy(mirror);
2728 return 0;
2729 }
2730 if (!mirror) {
2731 int idx;
2732
2733 idx = mirror_scan(ofproto);
2734 if (idx < 0) {
2735 VLOG_WARN("bridge %s: maximum of %d port mirrors reached, "
2736 "cannot create %s",
2737 ofproto->up.name, MAX_MIRRORS, s->name);
2738 return EFBIG;
2739 }
2740
2741 mirror = ofproto->mirrors[idx] = xzalloc(sizeof *mirror);
2742 mirror->ofproto = ofproto;
2743 mirror->idx = idx;
8b28d864 2744 mirror->aux = aux;
abe529af
BP
2745 mirror->out_vlan = -1;
2746 mirror->name = NULL;
2747 }
2748
2749 if (!mirror->name || strcmp(s->name, mirror->name)) {
2750 free(mirror->name);
2751 mirror->name = xstrdup(s->name);
2752 }
2753
2754 /* Get the new configuration. */
2755 if (s->out_bundle) {
2756 out = bundle_lookup(ofproto, s->out_bundle);
2757 if (!out) {
2758 mirror_destroy(mirror);
2759 return EINVAL;
2760 }
2761 out_vlan = -1;
2762 } else {
2763 out = NULL;
2764 out_vlan = s->out_vlan;
2765 }
2766 bundle_lookup_multiple(ofproto, s->srcs, s->n_srcs, &srcs);
2767 bundle_lookup_multiple(ofproto, s->dsts, s->n_dsts, &dsts);
2768
2769 /* If the configuration has not changed, do nothing. */
2770 if (hmapx_equals(&srcs, &mirror->srcs)
2771 && hmapx_equals(&dsts, &mirror->dsts)
2772 && vlan_bitmap_equal(mirror->vlans, s->src_vlans)
2773 && mirror->out == out
2774 && mirror->out_vlan == out_vlan)
2775 {
2776 hmapx_destroy(&srcs);
2777 hmapx_destroy(&dsts);
2778 return 0;
2779 }
2780
2781 hmapx_swap(&srcs, &mirror->srcs);
2782 hmapx_destroy(&srcs);
2783
2784 hmapx_swap(&dsts, &mirror->dsts);
2785 hmapx_destroy(&dsts);
2786
2787 free(mirror->vlans);
2788 mirror->vlans = vlan_bitmap_clone(s->src_vlans);
2789
2790 mirror->out = out;
2791 mirror->out_vlan = out_vlan;
2792
2793 /* Update bundles. */
2794 mirror_bit = MIRROR_MASK_C(1) << mirror->idx;
2795 HMAP_FOR_EACH (bundle, hmap_node, &mirror->ofproto->bundles) {
2796 if (hmapx_contains(&mirror->srcs, bundle)) {
2797 bundle->src_mirrors |= mirror_bit;
2798 } else {
2799 bundle->src_mirrors &= ~mirror_bit;
2800 }
2801
2802 if (hmapx_contains(&mirror->dsts, bundle)) {
2803 bundle->dst_mirrors |= mirror_bit;
2804 } else {
2805 bundle->dst_mirrors &= ~mirror_bit;
2806 }
2807
2808 if (mirror->out == bundle) {
2809 bundle->mirror_out |= mirror_bit;
2810 } else {
2811 bundle->mirror_out &= ~mirror_bit;
2812 }
2813 }
2814
2cc3c58e 2815 ofproto->backer->need_revalidate = REV_RECONFIGURE;
ccb7c863 2816 ofproto->has_mirrors = true;
2cc3c58e
EJ
2817 mac_learning_flush(ofproto->ml,
2818 &ofproto->backer->revalidate_set);
9ba15e2a 2819 mirror_update_dups(ofproto);
abe529af
BP
2820
2821 return 0;
2822}
2823
2824static void
2825mirror_destroy(struct ofmirror *mirror)
2826{
2827 struct ofproto_dpif *ofproto;
2828 mirror_mask_t mirror_bit;
2829 struct ofbundle *bundle;
ccb7c863 2830 int i;
abe529af
BP
2831
2832 if (!mirror) {
2833 return;
2834 }
2835
2836 ofproto = mirror->ofproto;
2cc3c58e
EJ
2837 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2838 mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
abe529af
BP
2839
2840 mirror_bit = MIRROR_MASK_C(1) << mirror->idx;
2841 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
2842 bundle->src_mirrors &= ~mirror_bit;
2843 bundle->dst_mirrors &= ~mirror_bit;
2844 bundle->mirror_out &= ~mirror_bit;
2845 }
2846
2847 hmapx_destroy(&mirror->srcs);
2848 hmapx_destroy(&mirror->dsts);
2849 free(mirror->vlans);
2850
2851 ofproto->mirrors[mirror->idx] = NULL;
2852 free(mirror->name);
2853 free(mirror);
9ba15e2a
BP
2854
2855 mirror_update_dups(ofproto);
ccb7c863
BP
2856
2857 ofproto->has_mirrors = false;
2858 for (i = 0; i < MAX_MIRRORS; i++) {
2859 if (ofproto->mirrors[i]) {
2860 ofproto->has_mirrors = true;
2861 break;
2862 }
2863 }
abe529af
BP
2864}
2865
9d24de3b
JP
2866static int
2867mirror_get_stats(struct ofproto *ofproto_, void *aux,
2868 uint64_t *packets, uint64_t *bytes)
2869{
2870 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2871 struct ofmirror *mirror = mirror_lookup(ofproto, aux);
2872
2873 if (!mirror) {
2874 *packets = *bytes = UINT64_MAX;
2875 return 0;
2876 }
2877
2878 *packets = mirror->packet_count;
2879 *bytes = mirror->byte_count;
2880
2881 return 0;
2882}
2883
abe529af
BP
2884static int
2885set_flood_vlans(struct ofproto *ofproto_, unsigned long *flood_vlans)
2886{
2887 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2888 if (mac_learning_set_flood_vlans(ofproto->ml, flood_vlans)) {
2cc3c58e 2889 mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
abe529af
BP
2890 }
2891 return 0;
2892}
2893
2894static bool
b4affc74 2895is_mirror_output_bundle(const struct ofproto *ofproto_, void *aux)
abe529af
BP
2896{
2897 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2898 struct ofbundle *bundle = bundle_lookup(ofproto, aux);
2899 return bundle && bundle->mirror_out != 0;
2900}
8402c74b
SS
2901
2902static void
b53055f4 2903forward_bpdu_changed(struct ofproto *ofproto_)
8402c74b
SS
2904{
2905 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2cc3c58e 2906 ofproto->backer->need_revalidate = REV_RECONFIGURE;
8402c74b 2907}
e764773c
BP
2908
2909static void
c4069512
BP
2910set_mac_table_config(struct ofproto *ofproto_, unsigned int idle_time,
2911 size_t max_entries)
e764773c
BP
2912{
2913 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2914 mac_learning_set_idle_time(ofproto->ml, idle_time);
c4069512 2915 mac_learning_set_max_entries(ofproto->ml, max_entries);
e764773c 2916}
abe529af
BP
2917\f
2918/* Ports. */
2919
2920static struct ofport_dpif *
4acbc98d 2921get_ofp_port(const struct ofproto_dpif *ofproto, uint16_t ofp_port)
abe529af 2922{
7df6a8bd
BP
2923 struct ofport *ofport = ofproto_get_port(&ofproto->up, ofp_port);
2924 return ofport ? ofport_dpif_cast(ofport) : NULL;
abe529af
BP
2925}
2926
2927static struct ofport_dpif *
4acbc98d 2928get_odp_port(const struct ofproto_dpif *ofproto, uint32_t odp_port)
abe529af 2929{
7c33b188
JR
2930 struct ofport_dpif *port = odp_port_to_ofport(ofproto->backer, odp_port);
2931 return port && &ofproto->up == port->up.ofproto ? port : NULL;
abe529af
BP
2932}
2933
2934static void
e1b1d06a
JP
2935ofproto_port_from_dpif_port(struct ofproto_dpif *ofproto,
2936 struct ofproto_port *ofproto_port,
abe529af
BP
2937 struct dpif_port *dpif_port)
2938{
2939 ofproto_port->name = dpif_port->name;
2940 ofproto_port->type = dpif_port->type;
e1b1d06a 2941 ofproto_port->ofp_port = odp_port_to_ofp_port(ofproto, dpif_port->port_no);
abe529af
BP
2942}
2943
0a740f48
EJ
2944static struct ofport_dpif *
2945ofport_get_peer(const struct ofport_dpif *ofport_dpif)
2946{
2947 const struct ofproto_dpif *ofproto;
2948 const char *peer;
2949
2950 peer = netdev_vport_patch_peer(ofport_dpif->up.netdev);
2951 if (!peer) {
2952 return NULL;
2953 }
2954
2955 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
2956 struct ofport *ofport;
2957
2958 ofport = shash_find_data(&ofproto->up.port_by_name, peer);
2959 if (ofport && ofport->ofproto->ofproto_class == &ofproto_dpif_class) {
2960 return ofport_dpif_cast(ofport);
2961 }
2962 }
2963 return NULL;
2964}
2965
0aa66d6e
EJ
2966static void
2967port_run_fast(struct ofport_dpif *ofport)
2968{
2969 if (ofport->cfm && cfm_should_send_ccm(ofport->cfm)) {
2970 struct ofpbuf packet;
2971
2972 ofpbuf_init(&packet, 0);
2973 cfm_compose_ccm(ofport->cfm, &packet, ofport->up.pp.hw_addr);
2974 send_packet(ofport, &packet);
2975 ofpbuf_uninit(&packet);
2976 }
2977}
2978
abe529af
BP
2979static void
2980port_run(struct ofport_dpif *ofport)
2981{
3e5b3fdb
EJ
2982 long long int carrier_seq = netdev_get_carrier_resets(ofport->up.netdev);
2983 bool carrier_changed = carrier_seq != ofport->carrier_seq;
015e08bc
EJ
2984 bool enable = netdev_get_carrier(ofport->up.netdev);
2985
3e5b3fdb
EJ
2986 ofport->carrier_seq = carrier_seq;
2987
0aa66d6e 2988 port_run_fast(ofport);
b9ad7294
EJ
2989
2990 if (ofport->tnl_port
2991 && tnl_port_reconfigure(&ofport->up, ofport->odp_port,
2992 &ofport->tnl_port)) {
2993 ofproto_dpif_cast(ofport->up.ofproto)->backer->need_revalidate = true;
2994 }
2995
abe529af 2996 if (ofport->cfm) {
4653c558
EJ
2997 int cfm_opup = cfm_get_opup(ofport->cfm);
2998
abe529af 2999 cfm_run(ofport->cfm);
4653c558
EJ
3000 enable = enable && !cfm_get_fault(ofport->cfm);
3001
3002 if (cfm_opup >= 0) {
3003 enable = enable && cfm_opup;
3004 }
abe529af 3005 }
015e08bc
EJ
3006
3007 if (ofport->bundle) {
3008 enable = enable && lacp_slave_may_enable(ofport->bundle->lacp, ofport);
3e5b3fdb
EJ
3009 if (carrier_changed) {
3010 lacp_slave_carrier_changed(ofport->bundle->lacp, ofport);
3011 }
015e08bc
EJ
3012 }
3013
daff3353
EJ
3014 if (ofport->may_enable != enable) {
3015 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
3016
3017 if (ofproto->has_bundle_action) {
2cc3c58e 3018 ofproto->backer->need_revalidate = REV_PORT_TOGGLED;
daff3353
EJ
3019 }
3020 }
3021
015e08bc 3022 ofport->may_enable = enable;
abe529af
BP
3023}
3024
3025static void
3026port_wait(struct ofport_dpif *ofport)
3027{
3028 if (ofport->cfm) {
3029 cfm_wait(ofport->cfm);
3030 }
3031}
3032
3033static int
3034port_query_by_name(const struct ofproto *ofproto_, const char *devname,
3035 struct ofproto_port *ofproto_port)
3036{
3037 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3038 struct dpif_port dpif_port;
3039 int error;
3040
0a740f48
EJ
3041 if (sset_contains(&ofproto->ghost_ports, devname)) {
3042 const char *type = netdev_get_type_from_name(devname);
3043
3044 /* We may be called before ofproto->up.port_by_name is populated with
3045 * the appropriate ofport. For this reason, we must get the name and
3046 * type from the netdev layer directly. */
3047 if (type) {
3048 const struct ofport *ofport;
3049
3050 ofport = shash_find_data(&ofproto->up.port_by_name, devname);
3051 ofproto_port->ofp_port = ofport ? ofport->ofp_port : OFPP_NONE;
3052 ofproto_port->name = xstrdup(devname);
3053 ofproto_port->type = xstrdup(type);
3054 return 0;
3055 }
3056 return ENODEV;
3057 }
3058
acf60855
JP
3059 if (!sset_contains(&ofproto->ports, devname)) {
3060 return ENODEV;
3061 }
3062 error = dpif_port_query_by_name(ofproto->backer->dpif,
3063 devname, &dpif_port);
abe529af 3064 if (!error) {
e1b1d06a 3065 ofproto_port_from_dpif_port(ofproto, ofproto_port, &dpif_port);
abe529af
BP
3066 }
3067 return error;
3068}
3069
3070static int
e1b1d06a 3071port_add(struct ofproto *ofproto_, struct netdev *netdev)
abe529af
BP
3072{
3073 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
b9ad7294
EJ
3074 const char *dp_port_name = netdev_vport_get_dpif_port(netdev);
3075 const char *devname = netdev_get_name(netdev);
abe529af 3076
0a740f48
EJ
3077 if (netdev_vport_is_patch(netdev)) {
3078 sset_add(&ofproto->ghost_ports, netdev_get_name(netdev));
3079 return 0;
3080 }
3081
b9ad7294 3082 if (!dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
7d82ab2e
KM
3083 uint32_t port_no = UINT32_MAX;
3084 int error;
3085
3086 error = dpif_port_add(ofproto->backer->dpif, netdev, &port_no);
b9ad7294
EJ
3087 if (error) {
3088 return error;
3089 }
7d82ab2e
KM
3090 if (netdev_get_tunnel_config(netdev)) {
3091 simap_put(&ofproto->backer->tnl_backers, dp_port_name, port_no);
3092 }
acf60855 3093 }
b9ad7294
EJ
3094
3095 if (netdev_get_tunnel_config(netdev)) {
3096 sset_add(&ofproto->ghost_ports, devname);
b9ad7294
EJ
3097 } else {
3098 sset_add(&ofproto->ports, devname);
3099 }
3100 return 0;
3101}
3102
abe529af
BP
3103static int
3104port_del(struct ofproto *ofproto_, uint16_t ofp_port)
3105{
3106 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
b9ad7294 3107 struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port);
e1b1d06a 3108 int error = 0;
abe529af 3109
b9ad7294
EJ
3110 if (!ofport) {
3111 return 0;
e1b1d06a 3112 }
b9ad7294
EJ
3113
3114 sset_find_and_delete(&ofproto->ghost_ports,
3115 netdev_get_name(ofport->up.netdev));
a614d823
KM
3116 ofproto->backer->need_revalidate = REV_RECONFIGURE;
3117 if (!ofport->tnl_port) {
b9ad7294
EJ
3118 error = dpif_port_del(ofproto->backer->dpif, ofport->odp_port);
3119 if (!error) {
abe529af
BP
3120 /* The caller is going to close ofport->up.netdev. If this is a
3121 * bonded port, then the bond is using that netdev, so remove it
3122 * from the bond. The client will need to reconfigure everything
3123 * after deleting ports, so then the slave will get re-added. */
3124 bundle_remove(&ofport->up);
3125 }
3126 }
3127 return error;
3128}
3129
6527c598
PS
3130static int
3131port_get_stats(const struct ofport *ofport_, struct netdev_stats *stats)
3132{
3133 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
3134 int error;
3135
3136 error = netdev_get_stats(ofport->up.netdev, stats);
3137
ee382d89 3138 if (!error && ofport_->ofp_port == OFPP_LOCAL) {
6527c598
PS
3139 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
3140
3141 /* ofproto->stats.tx_packets represents packets that we created
3142 * internally and sent to some port (e.g. packets sent with
3143 * send_packet()). Account for them as if they had come from
3144 * OFPP_LOCAL and got forwarded. */
3145
3146 if (stats->rx_packets != UINT64_MAX) {
3147 stats->rx_packets += ofproto->stats.tx_packets;
3148 }
3149
3150 if (stats->rx_bytes != UINT64_MAX) {
3151 stats->rx_bytes += ofproto->stats.tx_bytes;
3152 }
3153
3154 /* ofproto->stats.rx_packets represents packets that were received on
3155 * some port and we processed internally and dropped (e.g. STP).
4e090bc7 3156 * Account for them as if they had been forwarded to OFPP_LOCAL. */
6527c598
PS
3157
3158 if (stats->tx_packets != UINT64_MAX) {
3159 stats->tx_packets += ofproto->stats.rx_packets;
3160 }
3161
3162 if (stats->tx_bytes != UINT64_MAX) {
3163 stats->tx_bytes += ofproto->stats.rx_bytes;
3164 }
3165 }
3166
3167 return error;
3168}
3169
3170/* Account packets for LOCAL port. */
3171static void
3172ofproto_update_local_port_stats(const struct ofproto *ofproto_,
3173 size_t tx_size, size_t rx_size)
3174{
3175 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3176
3177 if (rx_size) {
3178 ofproto->stats.rx_packets++;
3179 ofproto->stats.rx_bytes += rx_size;
3180 }
3181 if (tx_size) {
3182 ofproto->stats.tx_packets++;
3183 ofproto->stats.tx_bytes += tx_size;
3184 }
3185}
3186
abe529af 3187struct port_dump_state {
acf60855
JP
3188 uint32_t bucket;
3189 uint32_t offset;
0a740f48 3190 bool ghost;
da78d43d
BP
3191
3192 struct ofproto_port port;
3193 bool has_port;
abe529af
BP
3194};
3195
3196static int
acf60855 3197port_dump_start(const struct ofproto *ofproto_ OVS_UNUSED, void **statep)
abe529af 3198{
0a740f48 3199 *statep = xzalloc(sizeof(struct port_dump_state));
abe529af
BP
3200 return 0;
3201}
3202
3203static int
b9ad7294 3204port_dump_next(const struct ofproto *ofproto_, void *state_,
abe529af
BP
3205 struct ofproto_port *port)
3206{
e1b1d06a 3207 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
abe529af 3208 struct port_dump_state *state = state_;
0a740f48 3209 const struct sset *sset;
acf60855 3210 struct sset_node *node;
abe529af 3211
da78d43d
BP
3212 if (state->has_port) {
3213 ofproto_port_destroy(&state->port);
3214 state->has_port = false;
3215 }
0a740f48
EJ
3216 sset = state->ghost ? &ofproto->ghost_ports : &ofproto->ports;
3217 while ((node = sset_at_position(sset, &state->bucket, &state->offset))) {
acf60855
JP
3218 int error;
3219
da78d43d
BP
3220 error = port_query_by_name(ofproto_, node->name, &state->port);
3221 if (!error) {
3222 *port = state->port;
3223 state->has_port = true;
3224 return 0;
3225 } else if (error != ENODEV) {
acf60855
JP
3226 return error;
3227 }
abe529af 3228 }
acf60855 3229
0a740f48
EJ
3230 if (!state->ghost) {
3231 state->ghost = true;
3232 state->bucket = 0;
3233 state->offset = 0;
3234 return port_dump_next(ofproto_, state_, port);
3235 }
3236
acf60855 3237 return EOF;
abe529af
BP
3238}
3239
3240static int
3241port_dump_done(const struct ofproto *ofproto_ OVS_UNUSED, void *state_)
3242{
3243 struct port_dump_state *state = state_;
3244
da78d43d
BP
3245 if (state->has_port) {
3246 ofproto_port_destroy(&state->port);
3247 }
abe529af
BP
3248 free(state);
3249 return 0;
3250}
3251
3252static int
3253port_poll(const struct ofproto *ofproto_, char **devnamep)
3254{
3255 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
acf60855
JP
3256
3257 if (ofproto->port_poll_errno) {
3258 int error = ofproto->port_poll_errno;
3259 ofproto->port_poll_errno = 0;
3260 return error;
3261 }
3262
3263 if (sset_is_empty(&ofproto->port_poll_set)) {
3264 return EAGAIN;
3265 }
3266
3267 *devnamep = sset_pop(&ofproto->port_poll_set);
3268 return 0;
abe529af
BP
3269}
3270
3271static void
3272port_poll_wait(const struct ofproto *ofproto_)
3273{
3274 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
acf60855 3275 dpif_port_poll_wait(ofproto->backer->dpif);
abe529af
BP
3276}
3277
3278static int
3279port_is_lacp_current(const struct ofport *ofport_)
3280{
3281 const struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
3282 return (ofport->bundle && ofport->bundle->lacp
3283 ? lacp_slave_is_current(ofport->bundle->lacp, ofport)
3284 : -1);
3285}
3286\f
3287/* Upcall handling. */
3288
501f8d1f
BP
3289/* Flow miss batching.
3290 *
3291 * Some dpifs implement operations faster when you hand them off in a batch.
3292 * To allow batching, "struct flow_miss" queues the dpif-related work needed
3293 * for a given flow. Each "struct flow_miss" corresponds to sending one or
3294 * more packets, plus possibly installing the flow in the dpif.
3295 *
3296 * So far we only batch the operations that affect flow setup time the most.
3297 * It's possible to batch more than that, but the benefit might be minimal. */
3298struct flow_miss {
3299 struct hmap_node hmap_node;
acf60855 3300 struct ofproto_dpif *ofproto;
501f8d1f 3301 struct flow flow;
b0f7b9b5 3302 enum odp_key_fitness key_fitness;
501f8d1f
BP
3303 const struct nlattr *key;
3304 size_t key_len;
14f94f9a 3305 struct initial_vals initial_vals;
501f8d1f 3306 struct list packets;
6a7e895f 3307 enum dpif_upcall_type upcall_type;
a088a1ff 3308 uint32_t odp_in_port;
501f8d1f
BP
3309};
3310
3311struct flow_miss_op {
c2b565b5 3312 struct dpif_op dpif_op;
5fe20d5d
BP
3313 void *garbage; /* Pointer to pass to free(), NULL if none. */
3314 uint64_t stub[1024 / 8]; /* Temporary buffer. */
501f8d1f
BP
3315};
3316
62cd7072
BP
3317/* Sends an OFPT_PACKET_IN message for 'packet' of type OFPR_NO_MATCH to each
3318 * OpenFlow controller as necessary according to their individual
29ebe880 3319 * configurations. */
62cd7072 3320static void
a39edbd4 3321send_packet_in_miss(struct ofproto_dpif *ofproto, const struct ofpbuf *packet,
29ebe880 3322 const struct flow *flow)
62cd7072
BP
3323{
3324 struct ofputil_packet_in pin;
3325
3e3252fa
EJ
3326 pin.packet = packet->data;
3327 pin.packet_len = packet->size;
62cd7072 3328 pin.reason = OFPR_NO_MATCH;
a7349929 3329 pin.controller_id = 0;
54834960
EJ
3330
3331 pin.table_id = 0;
3332 pin.cookie = 0;
3333
62cd7072 3334 pin.send_len = 0; /* not used for flow table misses */
5d6c3af0
EJ
3335
3336 flow_get_metadata(flow, &pin.fmd);
3337
d8653c38 3338 connmgr_send_packet_in(ofproto->up.connmgr, &pin);
62cd7072
BP
3339}
3340
6a7e895f 3341static enum slow_path_reason
abe529af 3342process_special(struct ofproto_dpif *ofproto, const struct flow *flow,
ffaef958 3343 const struct ofport_dpif *ofport, const struct ofpbuf *packet)
abe529af 3344{
b6e001b6 3345 if (!ofport) {
6a7e895f 3346 return 0;
ffaef958 3347 } else if (ofport->cfm && cfm_should_process_flow(ofport->cfm, flow)) {
b6e001b6 3348 if (packet) {
abe529af
BP
3349 cfm_process_heartbeat(ofport->cfm, packet);
3350 }
6a7e895f 3351 return SLOW_CFM;
b6e001b6
EJ
3352 } else if (ofport->bundle && ofport->bundle->lacp
3353 && flow->dl_type == htons(ETH_TYPE_LACP)) {
3354 if (packet) {
3355 lacp_process_packet(ofport->bundle->lacp, ofport, packet);
abe529af 3356 }
6a7e895f 3357 return SLOW_LACP;
21f7563c
JP
3358 } else if (ofproto->stp && stp_should_process_flow(flow)) {
3359 if (packet) {
3360 stp_process_packet(ofport, packet);
3361 }
6a7e895f 3362 return SLOW_STP;
ffaef958
BP
3363 } else {
3364 return 0;
abe529af 3365 }
abe529af
BP
3366}
3367
501f8d1f 3368static struct flow_miss *
ddbc5954
JP
3369flow_miss_find(struct hmap *todo, const struct ofproto_dpif *ofproto,
3370 const struct flow *flow, uint32_t hash)
abe529af 3371{
501f8d1f 3372 struct flow_miss *miss;
abe529af 3373
501f8d1f 3374 HMAP_FOR_EACH_WITH_HASH (miss, hmap_node, hash, todo) {
ddbc5954 3375 if (miss->ofproto == ofproto && flow_equal(&miss->flow, flow)) {
501f8d1f
BP
3376 return miss;
3377 }
3378 }
abe529af 3379
b23cdad9 3380 return NULL;
501f8d1f 3381}
abe529af 3382
9d6ac44e
BP
3383/* Partially Initializes 'op' as an "execute" operation for 'miss' and
3384 * 'packet'. The caller must initialize op->actions and op->actions_len. If
3385 * 'miss' is associated with a subfacet the caller must also initialize the
3386 * returned op->subfacet, and if anything needs to be freed after processing
3387 * the op, the caller must initialize op->garbage also. */
501f8d1f 3388static void
9d6ac44e
BP
3389init_flow_miss_execute_op(struct flow_miss *miss, struct ofpbuf *packet,
3390 struct flow_miss_op *op)
501f8d1f 3391{
14f94f9a 3392 if (miss->flow.vlan_tci != miss->initial_vals.vlan_tci) {
9d6ac44e
BP
3393 /* This packet was received on a VLAN splinter port. We
3394 * added a VLAN to the packet to make the packet resemble
3395 * the flow, but the actions were composed assuming that
3396 * the packet contained no VLAN. So, we must remove the
3397 * VLAN header from the packet before trying to execute the
3398 * actions. */
3399 eth_pop_vlan(packet);
3400 }
3401
9d6ac44e
BP
3402 op->garbage = NULL;
3403 op->dpif_op.type = DPIF_OP_EXECUTE;
3404 op->dpif_op.u.execute.key = miss->key;
3405 op->dpif_op.u.execute.key_len = miss->key_len;
3406 op->dpif_op.u.execute.packet = packet;
3407}
3408
3409/* Helper for handle_flow_miss_without_facet() and
3410 * handle_flow_miss_with_facet(). */
3411static void
3412handle_flow_miss_common(struct rule_dpif *rule,
3413 struct ofpbuf *packet, const struct flow *flow)
3414{
3415 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
3416
3417 ofproto->n_matches++;
3418
3419 if (rule->up.cr.priority == FAIL_OPEN_PRIORITY) {
3420 /*
3421 * Extra-special case for fail-open mode.
3422 *
3423 * We are in fail-open mode and the packet matched the fail-open
3424 * rule, but we are connected to a controller too. We should send
3425 * the packet up to the controller in the hope that it will try to
3426 * set up a flow and thereby allow us to exit fail-open.
3427 *
3428 * See the top-level comment in fail-open.c for more information.
3429 */
3430 send_packet_in_miss(ofproto, packet, flow);
3431 }
3432}
3433
3434/* Figures out whether a flow that missed in 'ofproto', whose details are in
3435 * 'miss', is likely to be worth tracking in detail in userspace and (usually)
3436 * installing a datapath flow. The answer is usually "yes" (a return value of
3437 * true). However, for short flows the cost of bookkeeping is much higher than
3438 * the benefits, so when the datapath holds a large number of flows we impose
3439 * some heuristics to decide which flows are likely to be worth tracking. */
3440static bool
3441flow_miss_should_make_facet(struct ofproto_dpif *ofproto,
3442 struct flow_miss *miss, uint32_t hash)
3443{
3444 if (!ofproto->governor) {
3445 size_t n_subfacets;
3446
3447 n_subfacets = hmap_count(&ofproto->subfacets);
3448 if (n_subfacets * 2 <= ofproto->up.flow_eviction_threshold) {
3449 return true;
3450 }
3451
3452 ofproto->governor = governor_create(ofproto->up.name);
3453 }
3454
3455 return governor_should_install_flow(ofproto->governor, hash,
3456 list_size(&miss->packets));
3457}
3458
3459/* Handles 'miss', which matches 'rule', without creating a facet or subfacet
3460 * or creating any datapath flow. May add an "execute" operation to 'ops' and
3461 * increment '*n_ops'. */
3462static void
3463handle_flow_miss_without_facet(struct flow_miss *miss,
3464 struct rule_dpif *rule,
3465 struct flow_miss_op *ops, size_t *n_ops)
3466{
3467 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
a7752d4a 3468 long long int now = time_msec();
9d6ac44e 3469 struct action_xlate_ctx ctx;
530a1d91 3470 struct ofpbuf *packet;
2b459b83 3471
9d6ac44e
BP
3472 LIST_FOR_EACH (packet, list_node, &miss->packets) {
3473 struct flow_miss_op *op = &ops[*n_ops];
3474 struct dpif_flow_stats stats;
3475 struct ofpbuf odp_actions;
abe529af 3476
9d6ac44e 3477 COVERAGE_INC(facet_suppress);
501f8d1f 3478
9d6ac44e 3479 ofpbuf_use_stub(&odp_actions, op->stub, sizeof op->stub);
501f8d1f 3480
a7752d4a 3481 dpif_flow_stats_extract(&miss->flow, packet, now, &stats);
9d6ac44e 3482 rule_credit_stats(rule, &stats);
abe529af 3483
14f94f9a
JP
3484 action_xlate_ctx_init(&ctx, ofproto, &miss->flow,
3485 &miss->initial_vals, rule, 0, packet);
9d6ac44e 3486 ctx.resubmit_stats = &stats;
f25d0cf3 3487 xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len,
9d6ac44e 3488 &odp_actions);
abe529af 3489
9d6ac44e
BP
3490 if (odp_actions.size) {
3491 struct dpif_execute *execute = &op->dpif_op.u.execute;
3492
3493 init_flow_miss_execute_op(miss, packet, op);
3494 execute->actions = odp_actions.data;
3495 execute->actions_len = odp_actions.size;
3496 op->garbage = ofpbuf_get_uninit_pointer(&odp_actions);
3497
3498 (*n_ops)++;
3499 } else {
3500 ofpbuf_uninit(&odp_actions);
3501 }
abe529af 3502 }
9d6ac44e
BP
3503}
3504
3505/* Handles 'miss', which matches 'facet'. May add any required datapath
459b16a1
BP
3506 * operations to 'ops', incrementing '*n_ops' for each new op.
3507 *
3508 * All of the packets in 'miss' are considered to have arrived at time 'now'.
3509 * This is really important only for new facets: if we just called time_msec()
3510 * here, then the new subfacet or its packets could look (occasionally) as
3511 * though it was used some time after the facet was used. That can make a
3512 * one-packet flow look like it has a nonzero duration, which looks odd in
3513 * e.g. NetFlow statistics. */
9d6ac44e
BP
3514static void
3515handle_flow_miss_with_facet(struct flow_miss *miss, struct facet *facet,
459b16a1 3516 long long int now,
9d6ac44e
BP
3517 struct flow_miss_op *ops, size_t *n_ops)
3518{
6a7e895f
BP
3519 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
3520 enum subfacet_path want_path;
9d6ac44e
BP
3521 struct subfacet *subfacet;
3522 struct ofpbuf *packet;
abe529af 3523
a088a1ff 3524 subfacet = subfacet_create(facet, miss, now);
b0f7b9b5 3525
530a1d91 3526 LIST_FOR_EACH (packet, list_node, &miss->packets) {
5fe20d5d 3527 struct flow_miss_op *op = &ops[*n_ops];
67d91f78 3528 struct dpif_flow_stats stats;
5fe20d5d 3529 struct ofpbuf odp_actions;
67d91f78 3530
9d6ac44e 3531 handle_flow_miss_common(facet->rule, packet, &miss->flow);
501f8d1f 3532
5fe20d5d 3533 ofpbuf_use_stub(&odp_actions, op->stub, sizeof op->stub);
6a7e895f 3534 if (!subfacet->actions || subfacet->slow) {
5fe20d5d 3535 subfacet_make_actions(subfacet, packet, &odp_actions);
501f8d1f 3536 }
67d91f78 3537
459b16a1 3538 dpif_flow_stats_extract(&facet->flow, packet, now, &stats);
15baa734 3539 subfacet_update_stats(subfacet, &stats);
67d91f78 3540
9d6ac44e
BP
3541 if (subfacet->actions_len) {
3542 struct dpif_execute *execute = &op->dpif_op.u.execute;
8338659a 3543
9d6ac44e 3544 init_flow_miss_execute_op(miss, packet, op);
6a7e895f 3545 if (!subfacet->slow) {
9d6ac44e
BP
3546 execute->actions = subfacet->actions;
3547 execute->actions_len = subfacet->actions_len;
3548 ofpbuf_uninit(&odp_actions);
3549 } else {
3550 execute->actions = odp_actions.data;
3551 execute->actions_len = odp_actions.size;
3552 op->garbage = ofpbuf_get_uninit_pointer(&odp_actions);
3553 }
999fba59 3554
9d6ac44e 3555 (*n_ops)++;
5fe20d5d 3556 } else {
9d6ac44e 3557 ofpbuf_uninit(&odp_actions);
5fe20d5d 3558 }
501f8d1f
BP
3559 }
3560
6a7e895f
BP
3561 want_path = subfacet_want_path(subfacet->slow);
3562 if (miss->upcall_type == DPIF_UC_MISS || subfacet->path != want_path) {
501f8d1f 3563 struct flow_miss_op *op = &ops[(*n_ops)++];
c2b565b5 3564 struct dpif_flow_put *put = &op->dpif_op.u.flow_put;
501f8d1f 3565
c84451a6
EJ
3566 subfacet->path = want_path;
3567
5fe20d5d 3568 op->garbage = NULL;
c2b565b5 3569 op->dpif_op.type = DPIF_OP_FLOW_PUT;
501f8d1f
BP
3570 put->flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
3571 put->key = miss->key;
3572 put->key_len = miss->key_len;
6a7e895f
BP
3573 if (want_path == SF_FAST_PATH) {
3574 put->actions = subfacet->actions;
3575 put->actions_len = subfacet->actions_len;
3576 } else {
3577 compose_slow_path(ofproto, &facet->flow, subfacet->slow,
3578 op->stub, sizeof op->stub,
3579 &put->actions, &put->actions_len);
3580 }
501f8d1f
BP
3581 put->stats = NULL;
3582 }
3583}
3584
acf60855
JP
3585/* Handles flow miss 'miss'. May add any required datapath operations
3586 * to 'ops', incrementing '*n_ops' for each new op. */
9d6ac44e 3587static void
acf60855
JP
3588handle_flow_miss(struct flow_miss *miss, struct flow_miss_op *ops,
3589 size_t *n_ops)
9d6ac44e 3590{
acf60855 3591 struct ofproto_dpif *ofproto = miss->ofproto;
9d6ac44e 3592 struct facet *facet;
459b16a1 3593 long long int now;
9d6ac44e
BP
3594 uint32_t hash;
3595
3596 /* The caller must ensure that miss->hmap_node.hash contains
3597 * flow_hash(miss->flow, 0). */
3598 hash = miss->hmap_node.hash;
3599
3600 facet = facet_lookup_valid(ofproto, &miss->flow, hash);
3601 if (!facet) {
c57b2226
BP
3602 struct rule_dpif *rule = rule_dpif_lookup(ofproto, &miss->flow);
3603
3604 if (!flow_miss_should_make_facet(ofproto, miss, hash)) {
9d6ac44e
BP
3605 handle_flow_miss_without_facet(miss, rule, ops, n_ops);
3606 return;
3607 }
3608
3609 facet = facet_create(rule, &miss->flow, hash);
459b16a1
BP
3610 now = facet->used;
3611 } else {
3612 now = time_msec();
9d6ac44e 3613 }
459b16a1 3614 handle_flow_miss_with_facet(miss, facet, now, ops, n_ops);
9d6ac44e
BP
3615}
3616
8f73d537
EJ
3617static struct drop_key *
3618drop_key_lookup(const struct dpif_backer *backer, const struct nlattr *key,
3619 size_t key_len)
3620{
3621 struct drop_key *drop_key;
3622
3623 HMAP_FOR_EACH_WITH_HASH (drop_key, hmap_node, hash_bytes(key, key_len, 0),
3624 &backer->drop_keys) {
3625 if (drop_key->key_len == key_len
3626 && !memcmp(drop_key->key, key, key_len)) {
3627 return drop_key;
3628 }
3629 }
3630 return NULL;
3631}
3632
3633static void
3634drop_key_clear(struct dpif_backer *backer)
3635{
3636 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 15);
3637 struct drop_key *drop_key, *next;
3638
3639 HMAP_FOR_EACH_SAFE (drop_key, next, hmap_node, &backer->drop_keys) {
3640 int error;
3641
3642 error = dpif_flow_del(backer->dpif, drop_key->key, drop_key->key_len,
3643 NULL);
3644 if (error && !VLOG_DROP_WARN(&rl)) {
3645 struct ds ds = DS_EMPTY_INITIALIZER;
3646 odp_flow_key_format(drop_key->key, drop_key->key_len, &ds);
3647 VLOG_WARN("Failed to delete drop key (%s) (%s)", strerror(error),
3648 ds_cstr(&ds));
3649 ds_destroy(&ds);
3650 }
3651
3652 hmap_remove(&backer->drop_keys, &drop_key->hmap_node);
3653 free(drop_key->key);
3654 free(drop_key);
3655 }
3656}
3657
e09ee259
EJ
3658/* Given a datpath, packet, and flow metadata ('backer', 'packet', and 'key'
3659 * respectively), populates 'flow' with the result of odp_flow_key_to_flow().
3660 * Optionally, if nonnull, populates 'fitnessp' with the fitness of 'flow' as
3661 * returned by odp_flow_key_to_flow(). Also, optionally populates 'ofproto'
3662 * with the ofproto_dpif, and 'odp_in_port' with the datapath in_port, that
3663 * 'packet' ingressed.
e2a6ca36 3664 *
e09ee259
EJ
3665 * If 'ofproto' is nonnull, requires 'flow''s in_port to exist. Otherwise sets
3666 * 'flow''s in_port to OFPP_NONE.
3667 *
3668 * This function does post-processing on data returned from
3669 * odp_flow_key_to_flow() to help make VLAN splinters transparent to the rest
3670 * of the upcall processing logic. In particular, if the extracted in_port is
3671 * a VLAN splinter port, it replaces flow->in_port by the "real" port, sets
3672 * flow->vlan_tci correctly for the VLAN of the VLAN splinter port, and pushes
3673 * a VLAN header onto 'packet' (if it is nonnull).
3674 *
c3f6c502
JP
3675 * Optionally, if 'initial_vals' is nonnull, sets 'initial_vals->vlan_tci'
3676 * to the VLAN TCI with which the packet was really received, that is, the
3677 * actual VLAN TCI extracted by odp_flow_key_to_flow(). (This differs from
3678 * the value returned in flow->vlan_tci only for packets received on
3679 * VLAN splinters.) Also, if received on an IP tunnel, sets
3680 * 'initial_vals->tunnel_ip_tos' to the tunnel's IP TOS.
e09ee259 3681 *
b9ad7294
EJ
3682 * Similarly, this function also includes some logic to help with tunnels. It
3683 * may modify 'flow' as necessary to make the tunneling implementation
3684 * transparent to the upcall processing logic.
3685 *
e09ee259
EJ
3686 * Returns 0 if successful, ENODEV if the parsed flow has no associated ofport,
3687 * or some other positive errno if there are other problems. */
3688static int
3689ofproto_receive(const struct dpif_backer *backer, struct ofpbuf *packet,
3690 const struct nlattr *key, size_t key_len,
3691 struct flow *flow, enum odp_key_fitness *fitnessp,
3692 struct ofproto_dpif **ofproto, uint32_t *odp_in_port,
14f94f9a 3693 struct initial_vals *initial_vals)
e84173dc 3694{
e09ee259
EJ
3695 const struct ofport_dpif *port;
3696 enum odp_key_fitness fitness;
b9ad7294 3697 int error = ENODEV;
e09ee259
EJ
3698
3699 fitness = odp_flow_key_to_flow(key, key_len, flow);
e84173dc 3700 if (fitness == ODP_FIT_ERROR) {
e09ee259
EJ
3701 error = EINVAL;
3702 goto exit;
3703 }
3704
14f94f9a
JP
3705 if (initial_vals) {
3706 initial_vals->vlan_tci = flow->vlan_tci;
c3f6c502 3707 initial_vals->tunnel_ip_tos = flow->tunnel.ip_tos;
e84173dc 3708 }
e84173dc 3709
e09ee259
EJ
3710 if (odp_in_port) {
3711 *odp_in_port = flow->in_port;
3712 }
3713
b9ad7294
EJ
3714 if (tnl_port_should_receive(flow)) {
3715 const struct ofport *ofport = tnl_port_receive(flow);
3716 if (!ofport) {
3717 flow->in_port = OFPP_NONE;
3718 goto exit;
3719 }
3720 port = ofport_dpif_cast(ofport);
e09ee259 3721
b9ad7294
EJ
3722 /* We can't reproduce 'key' from 'flow'. */
3723 fitness = fitness == ODP_FIT_PERFECT ? ODP_FIT_TOO_MUCH : fitness;
e09ee259 3724
b9ad7294
EJ
3725 /* XXX: Since the tunnel module is not scoped per backer, it's
3726 * theoretically possible that we'll receive an ofport belonging to an
3727 * entirely different datapath. In practice, this can't happen because
3728 * no platforms has two separate datapaths which each support
3729 * tunneling. */
3730 ovs_assert(ofproto_dpif_cast(port->up.ofproto)->backer == backer);
3731 } else {
3732 port = odp_port_to_ofport(backer, flow->in_port);
3733 if (!port) {
3734 flow->in_port = OFPP_NONE;
3735 goto exit;
3736 }
3737
3738 flow->in_port = port->up.ofp_port;
3739 if (vsp_adjust_flow(ofproto_dpif_cast(port->up.ofproto), flow)) {
3740 if (packet) {
3741 /* Make the packet resemble the flow, so that it gets sent to
3742 * an OpenFlow controller properly, so that it looks correct
3743 * for sFlow, and so that flow_extract() will get the correct
3744 * vlan_tci if it is called on 'packet'.
3745 *
3746 * The allocated space inside 'packet' probably also contains
3747 * 'key', that is, both 'packet' and 'key' are probably part of
3748 * a struct dpif_upcall (see the large comment on that
3749 * structure definition), so pushing data on 'packet' is in
3750 * general not a good idea since it could overwrite 'key' or
3751 * free it as a side effect. However, it's OK in this special
3752 * case because we know that 'packet' is inside a Netlink
3753 * attribute: pushing 4 bytes will just overwrite the 4-byte
3754 * "struct nlattr", which is fine since we don't need that
3755 * header anymore. */
3756 eth_push_vlan(packet, flow->vlan_tci);
3757 }
3758 /* We can't reproduce 'key' from 'flow'. */
3759 fitness = fitness == ODP_FIT_PERFECT ? ODP_FIT_TOO_MUCH : fitness;
52a90c29
BP
3760 }
3761 }
e09ee259 3762 error = 0;
52a90c29 3763
b9ad7294
EJ
3764 if (ofproto) {
3765 *ofproto = ofproto_dpif_cast(port->up.ofproto);
3766 }
3767
e09ee259
EJ
3768exit:
3769 if (fitnessp) {
3770 *fitnessp = fitness;
3771 }
3772 return error;
e84173dc
BP
3773}
3774
501f8d1f 3775static void
acf60855 3776handle_miss_upcalls(struct dpif_backer *backer, struct dpif_upcall *upcalls,
501f8d1f
BP
3777 size_t n_upcalls)
3778{
3779 struct dpif_upcall *upcall;
b23cdad9
BP
3780 struct flow_miss *miss;
3781 struct flow_miss misses[FLOW_MISS_MAX_BATCH];
501f8d1f 3782 struct flow_miss_op flow_miss_ops[FLOW_MISS_MAX_BATCH * 2];
c2b565b5 3783 struct dpif_op *dpif_ops[FLOW_MISS_MAX_BATCH * 2];
501f8d1f 3784 struct hmap todo;
b23cdad9 3785 int n_misses;
501f8d1f
BP
3786 size_t n_ops;
3787 size_t i;
3788
3789 if (!n_upcalls) {
3790 return;
3791 }
3792
3793 /* Construct the to-do list.
3794 *
3795 * This just amounts to extracting the flow from each packet and sticking
3796 * the packets that have the same flow in the same "flow_miss" structure so
3797 * that we can process them together. */
3798 hmap_init(&todo);
b23cdad9 3799 n_misses = 0;
501f8d1f 3800 for (upcall = upcalls; upcall < &upcalls[n_upcalls]; upcall++) {
b23cdad9
BP
3801 struct flow_miss *miss = &misses[n_misses];
3802 struct flow_miss *existing_miss;
acf60855 3803 struct ofproto_dpif *ofproto;
a088a1ff 3804 uint32_t odp_in_port;
1d446463 3805 struct flow flow;
b23cdad9 3806 uint32_t hash;
e09ee259 3807 int error;
501f8d1f 3808
e09ee259
EJ
3809 error = ofproto_receive(backer, upcall->packet, upcall->key,
3810 upcall->key_len, &flow, &miss->key_fitness,
14f94f9a 3811 &ofproto, &odp_in_port, &miss->initial_vals);
e09ee259 3812 if (error == ENODEV) {
8f73d537
EJ
3813 struct drop_key *drop_key;
3814
acf60855
JP
3815 /* Received packet on port for which we couldn't associate
3816 * an ofproto. This can happen if a port is removed while
3817 * traffic is being received. Print a rate-limited message
8f73d537
EJ
3818 * in case it happens frequently. Install a drop flow so
3819 * that future packets of the flow are inexpensively dropped
3820 * in the kernel. */
acf60855
JP
3821 VLOG_INFO_RL(&rl, "received packet on unassociated port %"PRIu32,
3822 flow.in_port);
8f73d537
EJ
3823
3824 drop_key = drop_key_lookup(backer, upcall->key, upcall->key_len);
3825 if (!drop_key) {
3826 drop_key = xmalloc(sizeof *drop_key);
3827 drop_key->key = xmemdup(upcall->key, upcall->key_len);
3828 drop_key->key_len = upcall->key_len;
3829
3830 hmap_insert(&backer->drop_keys, &drop_key->hmap_node,
3831 hash_bytes(drop_key->key, drop_key->key_len, 0));
3832 dpif_flow_put(backer->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY,
3833 drop_key->key, drop_key->key_len, NULL, 0, NULL);
3834 }
3835 continue;
acf60855 3836 }
e09ee259 3837 if (error) {
b0f7b9b5
BP
3838 continue;
3839 }
735d7efb
AZ
3840
3841 ofproto->n_missed++;
72e8bf28 3842 flow_extract(upcall->packet, flow.skb_priority, flow.skb_mark,
1d446463 3843 &flow.tunnel, flow.in_port, &miss->flow);
501f8d1f 3844
501f8d1f 3845 /* Add other packets to a to-do list. */
b23cdad9 3846 hash = flow_hash(&miss->flow, 0);
ddbc5954 3847 existing_miss = flow_miss_find(&todo, ofproto, &miss->flow, hash);
b23cdad9
BP
3848 if (!existing_miss) {
3849 hmap_insert(&todo, &miss->hmap_node, hash);
acf60855 3850 miss->ofproto = ofproto;
b23cdad9
BP
3851 miss->key = upcall->key;
3852 miss->key_len = upcall->key_len;
6a7e895f 3853 miss->upcall_type = upcall->type;
a088a1ff 3854 miss->odp_in_port = odp_in_port;
b23cdad9
BP
3855 list_init(&miss->packets);
3856
3857 n_misses++;
3858 } else {
3859 miss = existing_miss;
3860 }
501f8d1f
BP
3861 list_push_back(&miss->packets, &upcall->packet->list_node);
3862 }
3863
3864 /* Process each element in the to-do list, constructing the set of
3865 * operations to batch. */
3866 n_ops = 0;
33bb0caa 3867 HMAP_FOR_EACH (miss, hmap_node, &todo) {
acf60855 3868 handle_flow_miss(miss, flow_miss_ops, &n_ops);
abe529af 3869 }
cb22974d 3870 ovs_assert(n_ops <= ARRAY_SIZE(flow_miss_ops));
501f8d1f
BP
3871
3872 /* Execute batch. */
3873 for (i = 0; i < n_ops; i++) {
3874 dpif_ops[i] = &flow_miss_ops[i].dpif_op;
3875 }
acf60855 3876 dpif_operate(backer->dpif, dpif_ops, n_ops);
501f8d1f 3877
c84451a6 3878 /* Free memory. */
501f8d1f 3879 for (i = 0; i < n_ops; i++) {
c84451a6 3880 free(flow_miss_ops[i].garbage);
501f8d1f 3881 }
33bb0caa 3882 hmap_destroy(&todo);
abe529af
BP
3883}
3884
6a7e895f
BP
3885static enum { SFLOW_UPCALL, MISS_UPCALL, BAD_UPCALL }
3886classify_upcall(const struct dpif_upcall *upcall)
3887{
3888 union user_action_cookie cookie;
3889
3890 /* First look at the upcall type. */
3891 switch (upcall->type) {
3892 case DPIF_UC_ACTION:
3893 break;
3894
3895 case DPIF_UC_MISS:
3896 return MISS_UPCALL;
3897
3898 case DPIF_N_UC_TYPES:
3899 default:
3900 VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, upcall->type);
3901 return BAD_UPCALL;
3902 }
3903
3904 /* "action" upcalls need a closer look. */
e995e3df
BP
3905 if (!upcall->userdata) {
3906 VLOG_WARN_RL(&rl, "action upcall missing cookie");
3907 return BAD_UPCALL;
3908 }
3909 if (nl_attr_get_size(upcall->userdata) != sizeof(cookie)) {
3910 VLOG_WARN_RL(&rl, "action upcall cookie has unexpected size %zu",
3911 nl_attr_get_size(upcall->userdata));
3912 return BAD_UPCALL;
3913 }
3914 memcpy(&cookie, nl_attr_get(upcall->userdata), sizeof(cookie));
6a7e895f
BP
3915 switch (cookie.type) {
3916 case USER_ACTION_COOKIE_SFLOW:
3917 return SFLOW_UPCALL;
3918
3919 case USER_ACTION_COOKIE_SLOW_PATH:
3920 return MISS_UPCALL;
3921
3922 case USER_ACTION_COOKIE_UNSPEC:
3923 default:
e995e3df
BP
3924 VLOG_WARN_RL(&rl, "invalid user cookie : 0x%"PRIx64,
3925 nl_attr_get_u64(upcall->userdata));
6a7e895f
BP
3926 return BAD_UPCALL;
3927 }
3928}
3929
abe529af 3930static void
acf60855 3931handle_sflow_upcall(struct dpif_backer *backer,
6a7e895f 3932 const struct dpif_upcall *upcall)
abe529af 3933{
acf60855 3934 struct ofproto_dpif *ofproto;
1673e0e4 3935 union user_action_cookie cookie;
e84173dc 3936 struct flow flow;
e1b1d06a 3937 uint32_t odp_in_port;
abe529af 3938
e09ee259
EJ
3939 if (ofproto_receive(backer, upcall->packet, upcall->key, upcall->key_len,
3940 &flow, NULL, &ofproto, &odp_in_port, NULL)
3941 || !ofproto->sflow) {
e84173dc
BP
3942 return;
3943 }
3944
e995e3df 3945 memcpy(&cookie, nl_attr_get(upcall->userdata), sizeof(cookie));
e1b1d06a
JP
3946 dpif_sflow_received(ofproto->sflow, upcall->packet, &flow,
3947 odp_in_port, &cookie);
6ff686f2
PS
3948}
3949
9b16c439 3950static int
acf60855 3951handle_upcalls(struct dpif_backer *backer, unsigned int max_batch)
6ff686f2 3952{
9b16c439 3953 struct dpif_upcall misses[FLOW_MISS_MAX_BATCH];
90a7c55e
BP
3954 struct ofpbuf miss_bufs[FLOW_MISS_MAX_BATCH];
3955 uint64_t miss_buf_stubs[FLOW_MISS_MAX_BATCH][4096 / 8];
3956 int n_processed;
9b16c439
BP
3957 int n_misses;
3958 int i;
abe529af 3959
cb22974d 3960 ovs_assert(max_batch <= FLOW_MISS_MAX_BATCH);
abe529af 3961
9b16c439 3962 n_misses = 0;
90a7c55e 3963 for (n_processed = 0; n_processed < max_batch; n_processed++) {
9b16c439 3964 struct dpif_upcall *upcall = &misses[n_misses];
90a7c55e 3965 struct ofpbuf *buf = &miss_bufs[n_misses];
9b16c439
BP
3966 int error;
3967
90a7c55e
BP
3968 ofpbuf_use_stub(buf, miss_buf_stubs[n_misses],
3969 sizeof miss_buf_stubs[n_misses]);
acf60855 3970 error = dpif_recv(backer->dpif, upcall, buf);
9b16c439 3971 if (error) {
90a7c55e 3972 ofpbuf_uninit(buf);
9b16c439
BP
3973 break;
3974 }
3975
6a7e895f
BP
3976 switch (classify_upcall(upcall)) {
3977 case MISS_UPCALL:
9b16c439
BP
3978 /* Handle it later. */
3979 n_misses++;
3980 break;
3981
6a7e895f 3982 case SFLOW_UPCALL:
acf60855 3983 handle_sflow_upcall(backer, upcall);
6a7e895f
BP
3984 ofpbuf_uninit(buf);
3985 break;
3986
3987 case BAD_UPCALL:
3988 ofpbuf_uninit(buf);
9b16c439
BP
3989 break;
3990 }
abe529af 3991 }
9b16c439 3992
6a7e895f 3993 /* Handle deferred MISS_UPCALL processing. */
acf60855 3994 handle_miss_upcalls(backer, misses, n_misses);
90a7c55e
BP
3995 for (i = 0; i < n_misses; i++) {
3996 ofpbuf_uninit(&miss_bufs[i]);
3997 }
9b16c439 3998
90a7c55e 3999 return n_processed;
abe529af
BP
4000}
4001\f
4002/* Flow expiration. */
4003
b0f7b9b5 4004static int subfacet_max_idle(const struct ofproto_dpif *);
acf60855 4005static void update_stats(struct dpif_backer *);
abe529af 4006static void rule_expire(struct rule_dpif *);
b0f7b9b5 4007static void expire_subfacets(struct ofproto_dpif *, int dp_max_idle);
abe529af
BP
4008
4009/* This function is called periodically by run(). Its job is to collect
4010 * updates for the flows that have been installed into the datapath, most
4011 * importantly when they last were used, and then use that information to
4012 * expire flows that have not been used recently.
4013 *
4014 * Returns the number of milliseconds after which it should be called again. */
4015static int
acf60855 4016expire(struct dpif_backer *backer)
abe529af 4017{
acf60855
JP
4018 struct ofproto_dpif *ofproto;
4019 int max_idle = INT32_MAX;
abe529af 4020
8f73d537
EJ
4021 /* Periodically clear out the drop keys in an effort to keep them
4022 * relatively few. */
4023 drop_key_clear(backer);
4024
acf60855
JP
4025 /* Update stats for each flow in the backer. */
4026 update_stats(backer);
abe529af 4027
acf60855 4028 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
e503cc19 4029 struct rule *rule, *next_rule;
acf60855 4030 int dp_max_idle;
abe529af 4031
acf60855
JP
4032 if (ofproto->backer != backer) {
4033 continue;
4034 }
0697b5c3 4035
acf60855
JP
4036 /* Expire subfacets that have been idle too long. */
4037 dp_max_idle = subfacet_max_idle(ofproto);
4038 expire_subfacets(ofproto, dp_max_idle);
4039
4040 max_idle = MIN(max_idle, dp_max_idle);
4041
4042 /* Expire OpenFlow flows whose idle_timeout or hard_timeout
4043 * has passed. */
e503cc19
SH
4044 LIST_FOR_EACH_SAFE (rule, next_rule, expirable,
4045 &ofproto->up.expirable) {
4046 rule_expire(rule_dpif_cast(rule));
0697b5c3 4047 }
abe529af 4048
acf60855
JP
4049 /* All outstanding data in existing flows has been accounted, so it's a
4050 * good time to do bond rebalancing. */
4051 if (ofproto->has_bonded_bundles) {
4052 struct ofbundle *bundle;
abe529af 4053
acf60855
JP
4054 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
4055 if (bundle->bond) {
2cc3c58e 4056 bond_rebalance(bundle->bond, &backer->revalidate_set);
acf60855 4057 }
abe529af
BP
4058 }
4059 }
4060 }
4061
acf60855 4062 return MIN(max_idle, 1000);
abe529af
BP
4063}
4064
a218c879
BP
4065/* Updates flow table statistics given that the datapath just reported 'stats'
4066 * as 'subfacet''s statistics. */
4067static void
4068update_subfacet_stats(struct subfacet *subfacet,
4069 const struct dpif_flow_stats *stats)
4070{
4071 struct facet *facet = subfacet->facet;
4072
4073 if (stats->n_packets >= subfacet->dp_packet_count) {
4074 uint64_t extra = stats->n_packets - subfacet->dp_packet_count;
4075 facet->packet_count += extra;
4076 } else {
4077 VLOG_WARN_RL(&rl, "unexpected packet count from the datapath");
4078 }
4079
4080 if (stats->n_bytes >= subfacet->dp_byte_count) {
4081 facet->byte_count += stats->n_bytes - subfacet->dp_byte_count;
4082 } else {
4083 VLOG_WARN_RL(&rl, "unexpected byte count from datapath");
4084 }
4085
4086 subfacet->dp_packet_count = stats->n_packets;
4087 subfacet->dp_byte_count = stats->n_bytes;
4088
4089 facet->tcp_flags |= stats->tcp_flags;
4090
4091 subfacet_update_time(subfacet, stats->used);
4092 if (facet->accounted_bytes < facet->byte_count) {
4093 facet_learn(facet);
4094 facet_account(facet);
4095 facet->accounted_bytes = facet->byte_count;
4096 }
4097 facet_push_stats(facet);
4098}
4099
4100/* 'key' with length 'key_len' bytes is a flow in 'dpif' that we know nothing
4101 * about, or a flow that shouldn't be installed but was anyway. Delete it. */
4102static void
acf60855 4103delete_unexpected_flow(struct ofproto_dpif *ofproto,
a218c879
BP
4104 const struct nlattr *key, size_t key_len)
4105{
4106 if (!VLOG_DROP_WARN(&rl)) {
4107 struct ds s;
4108
4109 ds_init(&s);
4110 odp_flow_key_format(key, key_len, &s);
acf60855 4111 VLOG_WARN("unexpected flow on %s: %s", ofproto->up.name, ds_cstr(&s));
a218c879
BP
4112 ds_destroy(&s);
4113 }
4114
4115 COVERAGE_INC(facet_unexpected);
acf60855 4116 dpif_flow_del(ofproto->backer->dpif, key, key_len, NULL);
a218c879
BP
4117}
4118
abe529af
BP
4119/* Update 'packet_count', 'byte_count', and 'used' members of installed facets.
4120 *
4121 * This function also pushes statistics updates to rules which each facet
4122 * resubmits into. Generally these statistics will be accurate. However, if a
4123 * facet changes the rule it resubmits into at some time in between
4124 * update_stats() runs, it is possible that statistics accrued to the
4125 * old rule will be incorrectly attributed to the new rule. This could be
4126 * avoided by calling update_stats() whenever rules are created or
4127 * deleted. However, the performance impact of making so many calls to the
4128 * datapath do not justify the benefit of having perfectly accurate statistics.
735d7efb
AZ
4129 *
4130 * In addition, this function maintains per ofproto flow hit counts. The patch
4131 * port is not treated specially. e.g. A packet ingress from br0 patched into
4132 * br1 will increase the hit count of br0 by 1, however, does not affect
4133 * the hit or miss counts of br1.
abe529af
BP
4134 */
4135static void
acf60855 4136update_stats(struct dpif_backer *backer)
abe529af
BP
4137{
4138 const struct dpif_flow_stats *stats;
4139 struct dpif_flow_dump dump;
4140 const struct nlattr *key;
4141 size_t key_len;
4142
acf60855 4143 dpif_flow_dump_start(&dump, backer->dpif);
abe529af 4144 while (dpif_flow_dump_next(&dump, &key, &key_len, NULL, NULL, &stats)) {
acf60855 4145 struct flow flow;
b0f7b9b5 4146 struct subfacet *subfacet;
acf60855 4147 struct ofproto_dpif *ofproto;
b9ad7294 4148 struct ofport_dpif *ofport;
acf60855 4149 uint32_t key_hash;
abe529af 4150
58c6adda
EJ
4151 if (ofproto_receive(backer, NULL, key, key_len, &flow, NULL, &ofproto,
4152 NULL, NULL)) {
acf60855
JP
4153 continue;
4154 }
4155
b9ad7294
EJ
4156 ofport = get_ofp_port(ofproto, flow.in_port);
4157 if (ofport && ofport->tnl_port) {
4158 netdev_vport_inc_rx(ofport->up.netdev, stats);
4159 }
4160
acf60855 4161 key_hash = odp_flow_key_hash(key, key_len);
9566abf9 4162 subfacet = subfacet_find(ofproto, key, key_len, key_hash);
6a7e895f
BP
4163 switch (subfacet ? subfacet->path : SF_NOT_INSTALLED) {
4164 case SF_FAST_PATH:
735d7efb
AZ
4165 /* Update ofproto_dpif's hit count. */
4166 if (stats->n_packets > subfacet->dp_packet_count) {
4167 uint64_t delta = stats->n_packets - subfacet->dp_packet_count;
4168 dpif_stats_update_hit_count(ofproto, delta);
4169 }
4170
a218c879 4171 update_subfacet_stats(subfacet, stats);
6a7e895f
BP
4172 break;
4173
4174 case SF_SLOW_PATH:
4175 /* Stats are updated per-packet. */
4176 break;
4177
4178 case SF_NOT_INSTALLED:
4179 default:
acf60855 4180 delete_unexpected_flow(ofproto, key, key_len);
6a7e895f 4181 break;
abe529af
BP
4182 }
4183 }
4184 dpif_flow_dump_done(&dump);
4185}
4186
4187/* Calculates and returns the number of milliseconds of idle time after which
b0f7b9b5
BP
4188 * subfacets should expire from the datapath. When a subfacet expires, we fold
4189 * its statistics into its facet, and when a facet's last subfacet expires, we
4190 * fold its statistic into its rule. */
abe529af 4191static int
b0f7b9b5 4192subfacet_max_idle(const struct ofproto_dpif *ofproto)
abe529af
BP
4193{
4194 /*
4195 * Idle time histogram.
4196 *
b0f7b9b5
BP
4197 * Most of the time a switch has a relatively small number of subfacets.
4198 * When this is the case we might as well keep statistics for all of them
4199 * in userspace and to cache them in the kernel datapath for performance as
abe529af
BP
4200 * well.
4201 *
b0f7b9b5 4202 * As the number of subfacets increases, the memory required to maintain
abe529af 4203 * statistics about them in userspace and in the kernel becomes
b0f7b9b5
BP
4204 * significant. However, with a large number of subfacets it is likely
4205 * that only a few of them are "heavy hitters" that consume a large amount
4206 * of bandwidth. At this point, only heavy hitters are worth caching in
4207 * the kernel and maintaining in userspaces; other subfacets we can
4208 * discard.
abe529af
BP
4209 *
4210 * The technique used to compute the idle time is to build a histogram with
b0f7b9b5 4211 * N_BUCKETS buckets whose width is BUCKET_WIDTH msecs each. Each subfacet
abe529af
BP
4212 * that is installed in the kernel gets dropped in the appropriate bucket.
4213 * After the histogram has been built, we compute the cutoff so that only
b0f7b9b5 4214 * the most-recently-used 1% of subfacets (but at least
084f5290 4215 * ofproto->up.flow_eviction_threshold flows) are kept cached. At least
b0f7b9b5
BP
4216 * the most-recently-used bucket of subfacets is kept, so actually an
4217 * arbitrary number of subfacets can be kept in any given expiration run
084f5290
SH
4218 * (though the next run will delete most of those unless they receive
4219 * additional data).
abe529af 4220 *
b0f7b9b5
BP
4221 * This requires a second pass through the subfacets, in addition to the
4222 * pass made by update_stats(), because the former function never looks at
4223 * uninstallable subfacets.
abe529af
BP
4224 */
4225 enum { BUCKET_WIDTH = ROUND_UP(100, TIME_UPDATE_INTERVAL) };
4226 enum { N_BUCKETS = 5000 / BUCKET_WIDTH };
4227 int buckets[N_BUCKETS] = { 0 };
f11c1ef4 4228 int total, subtotal, bucket;
b0f7b9b5 4229 struct subfacet *subfacet;
abe529af
BP
4230 long long int now;
4231 int i;
4232
b0f7b9b5 4233 total = hmap_count(&ofproto->subfacets);
084f5290 4234 if (total <= ofproto->up.flow_eviction_threshold) {
abe529af
BP
4235 return N_BUCKETS * BUCKET_WIDTH;
4236 }
4237
4238 /* Build histogram. */
4239 now = time_msec();
b0f7b9b5
BP
4240 HMAP_FOR_EACH (subfacet, hmap_node, &ofproto->subfacets) {
4241 long long int idle = now - subfacet->used;
abe529af
BP
4242 int bucket = (idle <= 0 ? 0
4243 : idle >= BUCKET_WIDTH * N_BUCKETS ? N_BUCKETS - 1
4244 : (unsigned int) idle / BUCKET_WIDTH);
4245 buckets[bucket]++;
4246 }
4247
4248 /* Find the first bucket whose flows should be expired. */
f11c1ef4
SH
4249 subtotal = bucket = 0;
4250 do {
4251 subtotal += buckets[bucket++];
084f5290
SH
4252 } while (bucket < N_BUCKETS &&
4253 subtotal < MAX(ofproto->up.flow_eviction_threshold, total / 100));
abe529af
BP
4254
4255 if (VLOG_IS_DBG_ENABLED()) {
4256 struct ds s;
4257
4258 ds_init(&s);
4259 ds_put_cstr(&s, "keep");
4260 for (i = 0; i < N_BUCKETS; i++) {
4261 if (i == bucket) {
4262 ds_put_cstr(&s, ", drop");
4263 }
4264 if (buckets[i]) {
4265 ds_put_format(&s, " %d:%d", i * BUCKET_WIDTH, buckets[i]);
4266 }
4267 }
4268 VLOG_INFO("%s: %s (msec:count)", ofproto->up.name, ds_cstr(&s));
4269 ds_destroy(&s);
4270 }
4271
4272 return bucket * BUCKET_WIDTH;
4273}
4274
abe529af 4275static void
b0f7b9b5 4276expire_subfacets(struct ofproto_dpif *ofproto, int dp_max_idle)
abe529af 4277{
625b0720
BP
4278 /* Cutoff time for most flows. */
4279 long long int normal_cutoff = time_msec() - dp_max_idle;
4280
4281 /* We really want to keep flows for special protocols around, so use a more
4282 * conservative cutoff. */
4283 long long int special_cutoff = time_msec() - 10000;
b99d3cee 4284
b0f7b9b5 4285 struct subfacet *subfacet, *next_subfacet;
1d85f9e5 4286 struct subfacet *batch[SUBFACET_DESTROY_MAX_BATCH];
b99d3cee 4287 int n_batch;
abe529af 4288
b99d3cee 4289 n_batch = 0;
b0f7b9b5
BP
4290 HMAP_FOR_EACH_SAFE (subfacet, next_subfacet, hmap_node,
4291 &ofproto->subfacets) {
625b0720
BP
4292 long long int cutoff;
4293
4294 cutoff = (subfacet->slow & (SLOW_CFM | SLOW_LACP | SLOW_STP)
4295 ? special_cutoff
4296 : normal_cutoff);
b0f7b9b5 4297 if (subfacet->used < cutoff) {
6a7e895f 4298 if (subfacet->path != SF_NOT_INSTALLED) {
b99d3cee 4299 batch[n_batch++] = subfacet;
1d85f9e5
JP
4300 if (n_batch >= SUBFACET_DESTROY_MAX_BATCH) {
4301 subfacet_destroy_batch(ofproto, batch, n_batch);
b99d3cee
BP
4302 n_batch = 0;
4303 }
4304 } else {
4305 subfacet_destroy(subfacet);
4306 }
abe529af
BP
4307 }
4308 }
b99d3cee
BP
4309
4310 if (n_batch > 0) {
1d85f9e5 4311 subfacet_destroy_batch(ofproto, batch, n_batch);
b99d3cee 4312 }
abe529af
BP
4313}
4314
4315/* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
4316 * then delete it entirely. */
4317static void
4318rule_expire(struct rule_dpif *rule)
4319{
abe529af
BP
4320 struct facet *facet, *next_facet;
4321 long long int now;
4322 uint8_t reason;
4323
e2a3d183
BP
4324 if (rule->up.pending) {
4325 /* We'll have to expire it later. */
4326 return;
4327 }
4328
abe529af
BP
4329 /* Has 'rule' expired? */
4330 now = time_msec();
4331 if (rule->up.hard_timeout
308881af 4332 && now > rule->up.modified + rule->up.hard_timeout * 1000) {
abe529af 4333 reason = OFPRR_HARD_TIMEOUT;
8ea6ac3e 4334 } else if (rule->up.idle_timeout
1745cd08 4335 && now > rule->up.used + rule->up.idle_timeout * 1000) {
abe529af
BP
4336 reason = OFPRR_IDLE_TIMEOUT;
4337 } else {
4338 return;
4339 }
4340
4341 COVERAGE_INC(ofproto_dpif_expired);
4342
4343 /* Update stats. (This is a no-op if the rule expired due to an idle
4344 * timeout, because that only happens when the rule has no facets left.) */
4345 LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) {
15baa734 4346 facet_remove(facet);
abe529af
BP
4347 }
4348
4349 /* Get rid of the rule. */
4350 ofproto_rule_expire(&rule->up, reason);
4351}
4352\f
4353/* Facets. */
4354
f3827897 4355/* Creates and returns a new facet owned by 'rule', given a 'flow'.
abe529af
BP
4356 *
4357 * The caller must already have determined that no facet with an identical
4358 * 'flow' exists in 'ofproto' and that 'flow' is the best match for 'rule' in
f3827897
BP
4359 * the ofproto's classifier table.
4360 *
2b459b83
BP
4361 * 'hash' must be the return value of flow_hash(flow, 0).
4362 *
b0f7b9b5
BP
4363 * The facet will initially have no subfacets. The caller should create (at
4364 * least) one subfacet with subfacet_create(). */
abe529af 4365static struct facet *
2b459b83 4366facet_create(struct rule_dpif *rule, const struct flow *flow, uint32_t hash)
abe529af
BP
4367{
4368 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
4369 struct facet *facet;
4370
4371 facet = xzalloc(sizeof *facet);
4372 facet->used = time_msec();
2b459b83 4373 hmap_insert(&ofproto->facets, &facet->hmap_node, hash);
abe529af
BP
4374 list_push_back(&rule->facets, &facet->list_node);
4375 facet->rule = rule;
4376 facet->flow = *flow;
b0f7b9b5 4377 list_init(&facet->subfacets);
abe529af
BP
4378 netflow_flow_init(&facet->nf_flow);
4379 netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used);
4380
6cf474d7
EJ
4381 facet->learn_rl = time_msec() + 500;
4382
abe529af
BP
4383 return facet;
4384}
4385
4386static void
4387facet_free(struct facet *facet)
4388{
abe529af
BP
4389 free(facet);
4390}
4391
3d9e05f8 4392/* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
0a740f48 4393 * 'packet', which arrived on 'in_port'. */
3d9e05f8
BP
4394static bool
4395execute_odp_actions(struct ofproto_dpif *ofproto, const struct flow *flow,
4396 const struct nlattr *odp_actions, size_t actions_len,
4397 struct ofpbuf *packet)
4398{
4399 struct odputil_keybuf keybuf;
4400 struct ofpbuf key;
4401 int error;
4402
6ff686f2 4403 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
e1b1d06a
JP
4404 odp_flow_key_from_flow(&key, flow,
4405 ofp_port_to_odp_port(ofproto, flow->in_port));
80e5eed9 4406
acf60855 4407 error = dpif_execute(ofproto->backer->dpif, key.data, key.size,
6ff686f2 4408 odp_actions, actions_len, packet);
6ff686f2 4409 return !error;
abe529af
BP
4410}
4411
abe529af
BP
4412/* Remove 'facet' from 'ofproto' and free up the associated memory:
4413 *
4414 * - If 'facet' was installed in the datapath, uninstalls it and updates its
b0f7b9b5 4415 * rule's statistics, via subfacet_uninstall().
abe529af
BP
4416 *
4417 * - Removes 'facet' from its rule and from ofproto->facets.
4418 */
4419static void
15baa734 4420facet_remove(struct facet *facet)
abe529af 4421{
15baa734 4422 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
b0f7b9b5
BP
4423 struct subfacet *subfacet, *next_subfacet;
4424
cb22974d 4425 ovs_assert(!list_is_empty(&facet->subfacets));
551a2f6c
BP
4426
4427 /* First uninstall all of the subfacets to get final statistics. */
4428 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
15baa734 4429 subfacet_uninstall(subfacet);
551a2f6c
BP
4430 }
4431
4432 /* Flush the final stats to the rule.
4433 *
4434 * This might require us to have at least one subfacet around so that we
4435 * can use its actions for accounting in facet_account(), which is why we
4436 * have uninstalled but not yet destroyed the subfacets. */
15baa734 4437 facet_flush_stats(facet);
551a2f6c
BP
4438
4439 /* Now we're really all done so destroy everything. */
b0f7b9b5
BP
4440 LIST_FOR_EACH_SAFE (subfacet, next_subfacet, list_node,
4441 &facet->subfacets) {
15baa734 4442 subfacet_destroy__(subfacet);
b0f7b9b5 4443 }
abe529af
BP
4444 hmap_remove(&ofproto->facets, &facet->hmap_node);
4445 list_remove(&facet->list_node);
4446 facet_free(facet);
4447}
4448
3de9590b
BP
4449/* Feed information from 'facet' back into the learning table to keep it in
4450 * sync with what is actually flowing through the datapath. */
abe529af 4451static void
3de9590b 4452facet_learn(struct facet *facet)
abe529af 4453{
15baa734 4454 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
14f94f9a
JP
4455 struct subfacet *subfacet= CONTAINER_OF(list_front(&facet->subfacets),
4456 struct subfacet, list_node);
3de9590b 4457 struct action_xlate_ctx ctx;
abe529af 4458
6cf474d7
EJ
4459 if (time_msec() < facet->learn_rl) {
4460 return;
4461 }
4462
4463 facet->learn_rl = time_msec() + 500;
4464
3de9590b
BP
4465 if (!facet->has_learn
4466 && !facet->has_normal
4467 && (!facet->has_fin_timeout
4468 || !(facet->tcp_flags & (TCP_FIN | TCP_RST)))) {
abe529af
BP
4469 return;
4470 }
abe529af 4471
3de9590b 4472 action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
14f94f9a 4473 &subfacet->initial_vals,
3de9590b
BP
4474 facet->rule, facet->tcp_flags, NULL);
4475 ctx.may_learn = true;
f25d0cf3
BP
4476 xlate_actions_for_side_effects(&ctx, facet->rule->up.ofpacts,
4477 facet->rule->up.ofpacts_len);
3de9590b
BP
4478}
4479
4480static void
4481facet_account(struct facet *facet)
4482{
4483 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
1a1e3a0a 4484 struct subfacet *subfacet = facet_get_subfacet(facet);
3de9590b
BP
4485 const struct nlattr *a;
4486 unsigned int left;
4487 ovs_be16 vlan_tci;
4488 uint64_t n_bytes;
abe529af 4489
75a75043 4490 if (!facet->has_normal || !ofproto->has_bonded_bundles) {
abe529af
BP
4491 return;
4492 }
3de9590b 4493 n_bytes = facet->byte_count - facet->accounted_bytes;
d78be13b
BP
4494
4495 /* This loop feeds byte counters to bond_account() for rebalancing to use
4496 * as a basis. We also need to track the actual VLAN on which the packet
4497 * is going to be sent to ensure that it matches the one passed to
4498 * bond_choose_output_slave(). (Otherwise, we will account to the wrong
b95fc6ba
BP
4499 * hash bucket.)
4500 *
4501 * We use the actions from an arbitrary subfacet because they should all
4502 * be equally valid for our purpose. */
d78be13b 4503 vlan_tci = facet->flow.vlan_tci;
b95fc6ba
BP
4504 NL_ATTR_FOR_EACH_UNSAFE (a, left,
4505 subfacet->actions, subfacet->actions_len) {
fea393b1 4506 const struct ovs_action_push_vlan *vlan;
d78be13b 4507 struct ofport_dpif *port;
abe529af 4508
d78be13b 4509 switch (nl_attr_type(a)) {
df2c07f4 4510 case OVS_ACTION_ATTR_OUTPUT:
abe529af
BP
4511 port = get_odp_port(ofproto, nl_attr_get_u32(a));
4512 if (port && port->bundle && port->bundle->bond) {
d78be13b 4513 bond_account(port->bundle->bond, &facet->flow,
dc155bff 4514 vlan_tci_to_vid(vlan_tci), n_bytes);
abe529af 4515 }
d78be13b
BP
4516 break;
4517
fea393b1
BP
4518 case OVS_ACTION_ATTR_POP_VLAN:
4519 vlan_tci = htons(0);
d78be13b
BP
4520 break;
4521
fea393b1
BP
4522 case OVS_ACTION_ATTR_PUSH_VLAN:
4523 vlan = nl_attr_get(a);
4524 vlan_tci = vlan->vlan_tci;
d78be13b 4525 break;
abe529af
BP
4526 }
4527 }
4528}
4529
abe529af
BP
4530/* Returns true if the only action for 'facet' is to send to the controller.
4531 * (We don't report NetFlow expiration messages for such facets because they
4532 * are just part of the control logic for the network, not real traffic). */
4533static bool
4534facet_is_controller_flow(struct facet *facet)
4535{
f25d0cf3
BP
4536 if (facet) {
4537 const struct rule *rule = &facet->rule->up;
4538 const struct ofpact *ofpacts = rule->ofpacts;
4539 size_t ofpacts_len = rule->ofpacts_len;
4540
dd30ff28
BP
4541 if (ofpacts_len > 0 &&
4542 ofpacts->type == OFPACT_CONTROLLER &&
f25d0cf3
BP
4543 ofpact_next(ofpacts) >= ofpact_end(ofpacts, ofpacts_len)) {
4544 return true;
4545 }
4546 }
4547 return false;
abe529af
BP
4548}
4549
4550/* Folds all of 'facet''s statistics into its rule. Also updates the
4551 * accounting ofhook and emits a NetFlow expiration if appropriate. All of
4552 * 'facet''s statistics in the datapath should have been zeroed and folded into
4553 * its packet and byte counts before this function is called. */
4554static void
15baa734 4555facet_flush_stats(struct facet *facet)
abe529af 4556{
15baa734 4557 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
b0f7b9b5
BP
4558 struct subfacet *subfacet;
4559
4560 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
cb22974d
BP
4561 ovs_assert(!subfacet->dp_byte_count);
4562 ovs_assert(!subfacet->dp_packet_count);
b0f7b9b5 4563 }
abe529af
BP
4564
4565 facet_push_stats(facet);
3de9590b
BP
4566 if (facet->accounted_bytes < facet->byte_count) {
4567 facet_account(facet);
4568 facet->accounted_bytes = facet->byte_count;
4569 }
abe529af
BP
4570
4571 if (ofproto->netflow && !facet_is_controller_flow(facet)) {
4572 struct ofexpired expired;
4573 expired.flow = facet->flow;
4574 expired.packet_count = facet->packet_count;
4575 expired.byte_count = facet->byte_count;
4576 expired.used = facet->used;
4577 netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
4578 }
4579
4580 facet->rule->packet_count += facet->packet_count;
4581 facet->rule->byte_count += facet->byte_count;
4582
4583 /* Reset counters to prevent double counting if 'facet' ever gets
4584 * reinstalled. */
bbb5d219 4585 facet_reset_counters(facet);
abe529af
BP
4586
4587 netflow_flow_clear(&facet->nf_flow);
0e553d9c 4588 facet->tcp_flags = 0;
abe529af
BP
4589}
4590
4591/* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
4592 * Returns it if found, otherwise a null pointer.
4593 *
2b459b83
BP
4594 * 'hash' must be the return value of flow_hash(flow, 0).
4595 *
abe529af
BP
4596 * The returned facet might need revalidation; use facet_lookup_valid()
4597 * instead if that is important. */
4598static struct facet *
2b459b83
BP
4599facet_find(struct ofproto_dpif *ofproto,
4600 const struct flow *flow, uint32_t hash)
abe529af
BP
4601{
4602 struct facet *facet;
4603
2b459b83 4604 HMAP_FOR_EACH_WITH_HASH (facet, hmap_node, hash, &ofproto->facets) {
abe529af
BP
4605 if (flow_equal(flow, &facet->flow)) {
4606 return facet;
4607 }
4608 }
4609
4610 return NULL;
4611}
4612
4613/* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
4614 * Returns it if found, otherwise a null pointer.
4615 *
2b459b83
BP
4616 * 'hash' must be the return value of flow_hash(flow, 0).
4617 *
abe529af
BP
4618 * The returned facet is guaranteed to be valid. */
4619static struct facet *
2b459b83
BP
4620facet_lookup_valid(struct ofproto_dpif *ofproto, const struct flow *flow,
4621 uint32_t hash)
abe529af 4622{
c57b2226 4623 struct facet *facet;
abe529af 4624
c57b2226 4625 facet = facet_find(ofproto, flow, hash);
abe529af 4626 if (facet
2cc3c58e
EJ
4627 && (ofproto->backer->need_revalidate
4628 || tag_set_intersects(&ofproto->backer->revalidate_set,
4629 facet->tags))) {
c57b2226 4630 facet_revalidate(facet);
f231418e
EJ
4631
4632 /* facet_revalidate() may have destroyed 'facet'. */
4633 facet = facet_find(ofproto, flow, hash);
abe529af
BP
4634 }
4635
4636 return facet;
4637}
4638
1a1e3a0a
JP
4639/* Return a subfacet from 'facet'. A facet consists of one or more
4640 * subfacets, and this function returns one of them. */
4641static struct subfacet *facet_get_subfacet(struct facet *facet)
4642{
4643 return CONTAINER_OF(list_front(&facet->subfacets), struct subfacet,
4644 list_node);
4645}
4646
6a7e895f
BP
4647static const char *
4648subfacet_path_to_string(enum subfacet_path path)
4649{
4650 switch (path) {
4651 case SF_NOT_INSTALLED:
4652 return "not installed";
4653 case SF_FAST_PATH:
4654 return "in fast path";
4655 case SF_SLOW_PATH:
4656 return "in slow path";
4657 default:
4658 return "<error>";
4659 }
4660}
4661
4662/* Returns the path in which a subfacet should be installed if its 'slow'
4663 * member has the specified value. */
4664static enum subfacet_path
4665subfacet_want_path(enum slow_path_reason slow)
4666{
4667 return slow ? SF_SLOW_PATH : SF_FAST_PATH;
4668}
4669
4670/* Returns true if 'subfacet' needs to have its datapath flow updated,
4671 * supposing that its actions have been recalculated as 'want_actions' and that
4672 * 'slow' is nonzero iff 'subfacet' should be in the slow path. */
4673static bool
4674subfacet_should_install(struct subfacet *subfacet, enum slow_path_reason slow,
4675 const struct ofpbuf *want_actions)
4676{
4677 enum subfacet_path want_path = subfacet_want_path(slow);
4678 return (want_path != subfacet->path
4679 || (want_path == SF_FAST_PATH
4680 && (subfacet->actions_len != want_actions->size
4681 || memcmp(subfacet->actions, want_actions->data,
4682 subfacet->actions_len))));
4683}
4684
6814e51f
BP
4685static bool
4686facet_check_consistency(struct facet *facet)
4687{
4688 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 15);
4689
4690 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
4691
050ac423
BP
4692 uint64_t odp_actions_stub[1024 / 8];
4693 struct ofpbuf odp_actions;
4694
6814e51f
BP
4695 struct rule_dpif *rule;
4696 struct subfacet *subfacet;
c53e1132 4697 bool may_log = false;
6814e51f
BP
4698 bool ok;
4699
4700 /* Check the rule for consistency. */
c57b2226
BP
4701 rule = rule_dpif_lookup(ofproto, &facet->flow);
4702 ok = rule == facet->rule;
4703 if (!ok) {
c53e1132 4704 may_log = !VLOG_DROP_WARN(&rl);
c53e1132
BP
4705 if (may_log) {
4706 struct ds s;
6814e51f 4707
c53e1132
BP
4708 ds_init(&s);
4709 flow_format(&s, &facet->flow);
4710 ds_put_format(&s, ": facet associated with wrong rule (was "
4711 "table=%"PRIu8",", facet->rule->up.table_id);
4712 cls_rule_format(&facet->rule->up.cr, &s);
4713 ds_put_format(&s, ") (should have been table=%"PRIu8",",
4714 rule->up.table_id);
4715 cls_rule_format(&rule->up.cr, &s);
4716 ds_put_char(&s, ')');
6814e51f 4717
c53e1132
BP
4718 VLOG_WARN("%s", ds_cstr(&s));
4719 ds_destroy(&s);
4720 }
6814e51f
BP
4721 }
4722
4723 /* Check the datapath actions for consistency. */
050ac423 4724 ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
6814e51f 4725 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
6a7e895f 4726 enum subfacet_path want_path;
6814e51f 4727 struct action_xlate_ctx ctx;
9616614b 4728 struct ds s;
6814e51f
BP
4729
4730 action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
14f94f9a 4731 &subfacet->initial_vals, rule, 0, NULL);
f25d0cf3 4732 xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len,
050ac423 4733 &odp_actions);
6814e51f 4734
6a7e895f
BP
4735 if (subfacet->path == SF_NOT_INSTALLED) {
4736 /* This only happens if the datapath reported an error when we
4737 * tried to install the flow. Don't flag another error here. */
4738 continue;
4739 }
4740
4741 want_path = subfacet_want_path(subfacet->slow);
4742 if (want_path == SF_SLOW_PATH && subfacet->path == SF_SLOW_PATH) {
4743 /* The actions for slow-path flows may legitimately vary from one
4744 * packet to the next. We're done. */
050ac423 4745 continue;
6814e51f
BP
4746 }
4747
6a7e895f 4748 if (!subfacet_should_install(subfacet, subfacet->slow, &odp_actions)) {
9616614b
BP
4749 continue;
4750 }
c53e1132 4751
9616614b
BP
4752 /* Inconsistency! */
4753 if (ok) {
4754 may_log = !VLOG_DROP_WARN(&rl);
4755 ok = false;
4756 }
4757 if (!may_log) {
4758 /* Rate-limited, skip reporting. */
4759 continue;
4760 }
c53e1132 4761
9616614b 4762 ds_init(&s);
9566abf9 4763 odp_flow_key_format(subfacet->key, subfacet->key_len, &s);
9616614b
BP
4764
4765 ds_put_cstr(&s, ": inconsistency in subfacet");
6a7e895f 4766 if (want_path != subfacet->path) {
9616614b
BP
4767 enum odp_key_fitness fitness = subfacet->key_fitness;
4768
6a7e895f
BP
4769 ds_put_format(&s, " (%s, fitness=%s)",
4770 subfacet_path_to_string(subfacet->path),
9616614b 4771 odp_key_fitness_to_string(fitness));
6a7e895f
BP
4772 ds_put_format(&s, " (should have been %s)",
4773 subfacet_path_to_string(want_path));
4774 } else if (want_path == SF_FAST_PATH) {
9616614b
BP
4775 ds_put_cstr(&s, " (actions were: ");
4776 format_odp_actions(&s, subfacet->actions,
4777 subfacet->actions_len);
4778 ds_put_cstr(&s, ") (correct actions: ");
4779 format_odp_actions(&s, odp_actions.data, odp_actions.size);
4780 ds_put_char(&s, ')');
4781 } else {
4782 ds_put_cstr(&s, " (actions: ");
4783 format_odp_actions(&s, subfacet->actions,
4784 subfacet->actions_len);
4785 ds_put_char(&s, ')');
6814e51f 4786 }
9616614b
BP
4787 VLOG_WARN("%s", ds_cstr(&s));
4788 ds_destroy(&s);
6814e51f 4789 }
050ac423 4790 ofpbuf_uninit(&odp_actions);
6814e51f
BP
4791
4792 return ok;
4793}
4794
15baa734 4795/* Re-searches the classifier for 'facet':
abe529af
BP
4796 *
4797 * - If the rule found is different from 'facet''s current rule, moves
4798 * 'facet' to the new rule and recompiles its actions.
4799 *
4800 * - If the rule found is the same as 'facet''s current rule, leaves 'facet'
f231418e
EJ
4801 * where it is and recompiles its actions anyway.
4802 *
4803 * - If any of 'facet''s subfacets correspond to a new flow according to
4804 * ofproto_receive(), 'facet' is removed. */
c57b2226 4805static void
15baa734 4806facet_revalidate(struct facet *facet)
abe529af 4807{
15baa734 4808 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
b95fc6ba
BP
4809 struct actions {
4810 struct nlattr *odp_actions;
4811 size_t actions_len;
4812 };
4813 struct actions *new_actions;
4814
abe529af 4815 struct action_xlate_ctx ctx;
050ac423
BP
4816 uint64_t odp_actions_stub[1024 / 8];
4817 struct ofpbuf odp_actions;
4818
abe529af 4819 struct rule_dpif *new_rule;
b0f7b9b5 4820 struct subfacet *subfacet;
b95fc6ba 4821 int i;
abe529af
BP
4822
4823 COVERAGE_INC(facet_revalidate);
4824
f231418e
EJ
4825 /* Check that child subfacets still correspond to this facet. Tunnel
4826 * configuration changes could cause a subfacet's OpenFlow in_port to
4827 * change. */
4828 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
4829 struct ofproto_dpif *recv_ofproto;
4830 struct flow recv_flow;
4831 int error;
4832
4833 error = ofproto_receive(ofproto->backer, NULL, subfacet->key,
4834 subfacet->key_len, &recv_flow, NULL,
4835 &recv_ofproto, NULL, NULL);
4836 if (error
4837 || recv_ofproto != ofproto
4838 || memcmp(&recv_flow, &facet->flow, sizeof recv_flow)) {
4839 facet_remove(facet);
4840 return;
4841 }
4842 }
4843
c57b2226 4844 new_rule = rule_dpif_lookup(ofproto, &facet->flow);
abe529af 4845
df2c07f4 4846 /* Calculate new datapath actions.
abe529af
BP
4847 *
4848 * We do not modify any 'facet' state yet, because we might need to, e.g.,
4849 * emit a NetFlow expiration and, if so, we need to have the old state
4850 * around to properly compose it. */
abe529af 4851
df2c07f4
JP
4852 /* If the datapath actions changed or the installability changed,
4853 * then we need to talk to the datapath. */
b95fc6ba
BP
4854 i = 0;
4855 new_actions = NULL;
4856 memset(&ctx, 0, sizeof ctx);
050ac423 4857 ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
b0f7b9b5 4858 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
6a7e895f 4859 enum slow_path_reason slow;
b95fc6ba 4860
e84173dc 4861 action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
14f94f9a 4862 &subfacet->initial_vals, new_rule, 0, NULL);
f25d0cf3 4863 xlate_actions(&ctx, new_rule->up.ofpacts, new_rule->up.ofpacts_len,
050ac423 4864 &odp_actions);
b0f7b9b5 4865
6a7e895f
BP
4866 slow = (subfacet->slow & SLOW_MATCH) | ctx.slow;
4867 if (subfacet_should_install(subfacet, slow, &odp_actions)) {
4868 struct dpif_flow_stats stats;
4869
4870 subfacet_install(subfacet,
4871 odp_actions.data, odp_actions.size, &stats, slow);
4872 subfacet_update_stats(subfacet, &stats);
b95fc6ba
BP
4873
4874 if (!new_actions) {
4875 new_actions = xcalloc(list_size(&facet->subfacets),
4876 sizeof *new_actions);
4877 }
050ac423
BP
4878 new_actions[i].odp_actions = xmemdup(odp_actions.data,
4879 odp_actions.size);
4880 new_actions[i].actions_len = odp_actions.size;
abe529af 4881 }
b95fc6ba 4882
b95fc6ba 4883 i++;
b0f7b9b5 4884 }
050ac423
BP
4885 ofpbuf_uninit(&odp_actions);
4886
b95fc6ba 4887 if (new_actions) {
15baa734 4888 facet_flush_stats(facet);
abe529af
BP
4889 }
4890
4891 /* Update 'facet' now that we've taken care of all the old state. */
4892 facet->tags = ctx.tags;
4893 facet->nf_flow.output_iface = ctx.nf_output_iface;
75a75043
BP
4894 facet->has_learn = ctx.has_learn;
4895 facet->has_normal = ctx.has_normal;
0e553d9c 4896 facet->has_fin_timeout = ctx.has_fin_timeout;
9d24de3b 4897 facet->mirrors = ctx.mirrors;
6a7e895f
BP
4898
4899 i = 0;
4900 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
4901 subfacet->slow = (subfacet->slow & SLOW_MATCH) | ctx.slow;
4902
4903 if (new_actions && new_actions[i].odp_actions) {
4904 free(subfacet->actions);
4905 subfacet->actions = new_actions[i].odp_actions;
4906 subfacet->actions_len = new_actions[i].actions_len;
b95fc6ba 4907 }
6a7e895f 4908 i++;
abe529af 4909 }
6a7e895f
BP
4910 free(new_actions);
4911
abe529af
BP
4912 if (facet->rule != new_rule) {
4913 COVERAGE_INC(facet_changed_rule);
4914 list_remove(&facet->list_node);
4915 list_push_back(&new_rule->facets, &facet->list_node);
4916 facet->rule = new_rule;
4917 facet->used = new_rule->up.created;
9d24de3b 4918 facet->prev_used = facet->used;
abe529af 4919 }
abe529af
BP
4920}
4921
4922/* Updates 'facet''s used time. Caller is responsible for calling
4923 * facet_push_stats() to update the flows which 'facet' resubmits into. */
4924static void
15baa734 4925facet_update_time(struct facet *facet, long long int used)
abe529af 4926{
15baa734 4927 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
abe529af
BP
4928 if (used > facet->used) {
4929 facet->used = used;
1745cd08 4930 ofproto_rule_update_used(&facet->rule->up, used);
abe529af
BP
4931 netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, used);
4932 }
4933}
4934
bbb5d219
EJ
4935static void
4936facet_reset_counters(struct facet *facet)
4937{
4938 facet->packet_count = 0;
4939 facet->byte_count = 0;
9d24de3b
JP
4940 facet->prev_packet_count = 0;
4941 facet->prev_byte_count = 0;
bbb5d219
EJ
4942 facet->accounted_bytes = 0;
4943}
4944
abe529af
BP
4945static void
4946facet_push_stats(struct facet *facet)
4947{
112bc5f4 4948 struct dpif_flow_stats stats;
abe529af 4949
cb22974d
BP
4950 ovs_assert(facet->packet_count >= facet->prev_packet_count);
4951 ovs_assert(facet->byte_count >= facet->prev_byte_count);
4952 ovs_assert(facet->used >= facet->prev_used);
abe529af 4953
112bc5f4
BP
4954 stats.n_packets = facet->packet_count - facet->prev_packet_count;
4955 stats.n_bytes = facet->byte_count - facet->prev_byte_count;
4956 stats.used = facet->used;
4957 stats.tcp_flags = 0;
abe529af 4958
112bc5f4 4959 if (stats.n_packets || stats.n_bytes || facet->used > facet->prev_used) {
9d24de3b
JP
4960 facet->prev_packet_count = facet->packet_count;
4961 facet->prev_byte_count = facet->byte_count;
4962 facet->prev_used = facet->used;
abe529af 4963
ac35f9c8 4964 flow_push_stats(facet, &stats);
9d24de3b
JP
4965
4966 update_mirror_stats(ofproto_dpif_cast(facet->rule->up.ofproto),
112bc5f4 4967 facet->mirrors, stats.n_packets, stats.n_bytes);
abe529af
BP
4968 }
4969}
4970
abe529af 4971static void
112bc5f4 4972rule_credit_stats(struct rule_dpif *rule, const struct dpif_flow_stats *stats)
abe529af 4973{
112bc5f4
BP
4974 rule->packet_count += stats->n_packets;
4975 rule->byte_count += stats->n_bytes;
4976 ofproto_rule_update_used(&rule->up, stats->used);
abe529af
BP
4977}
4978
ac35f9c8
JP
4979/* Pushes flow statistics to the rules which 'facet->flow' resubmits
4980 * into given 'facet->rule''s actions and mirrors. */
abe529af 4981static void
ac35f9c8 4982flow_push_stats(struct facet *facet, const struct dpif_flow_stats *stats)
abe529af 4983{
ac35f9c8 4984 struct rule_dpif *rule = facet->rule;
abe529af 4985 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
1a1e3a0a 4986 struct subfacet *subfacet = facet_get_subfacet(facet);
112bc5f4 4987 struct action_xlate_ctx ctx;
abe529af 4988
112bc5f4 4989 ofproto_rule_update_used(&rule->up, stats->used);
f3b50afb 4990
14f94f9a
JP
4991 action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
4992 &subfacet->initial_vals, rule, 0, NULL);
112bc5f4 4993 ctx.resubmit_stats = stats;
f25d0cf3
BP
4994 xlate_actions_for_side_effects(&ctx, rule->up.ofpacts,
4995 rule->up.ofpacts_len);
abe529af
BP
4996}
4997\f
b0f7b9b5
BP
4998/* Subfacets. */
4999
5000static struct subfacet *
acf60855 5001subfacet_find(struct ofproto_dpif *ofproto,
9566abf9 5002 const struct nlattr *key, size_t key_len, uint32_t key_hash)
b0f7b9b5
BP
5003{
5004 struct subfacet *subfacet;
5005
5006 HMAP_FOR_EACH_WITH_HASH (subfacet, hmap_node, key_hash,
5007 &ofproto->subfacets) {
9566abf9
EJ
5008 if (subfacet->key_len == key_len
5009 && !memcmp(key, subfacet->key, key_len)) {
b0f7b9b5
BP
5010 return subfacet;
5011 }
5012 }
5013
5014 return NULL;
5015}
5016
5017/* Searches 'facet' (within 'ofproto') for a subfacet with the specified
a088a1ff
JP
5018 * 'key_fitness', 'key', and 'key_len' members in 'miss'. Returns the
5019 * existing subfacet if there is one, otherwise creates and returns a
5020 * new subfacet.
b95fc6ba
BP
5021 *
5022 * If the returned subfacet is new, then subfacet->actions will be NULL, in
5023 * which case the caller must populate the actions with
5024 * subfacet_make_actions(). */
b0f7b9b5 5025static struct subfacet *
a088a1ff
JP
5026subfacet_create(struct facet *facet, struct flow_miss *miss,
5027 long long int now)
b0f7b9b5 5028{
15baa734 5029 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
a088a1ff
JP
5030 enum odp_key_fitness key_fitness = miss->key_fitness;
5031 const struct nlattr *key = miss->key;
5032 size_t key_len = miss->key_len;
5033 uint32_t key_hash;
b0f7b9b5
BP
5034 struct subfacet *subfacet;
5035
a088a1ff
JP
5036 key_hash = odp_flow_key_hash(key, key_len);
5037
3b145dd7
BP
5038 if (list_is_empty(&facet->subfacets)) {
5039 subfacet = &facet->one_subfacet;
5040 } else {
9566abf9 5041 subfacet = subfacet_find(ofproto, key, key_len, key_hash);
3b145dd7
BP
5042 if (subfacet) {
5043 if (subfacet->facet == facet) {
5044 return subfacet;
5045 }
5046
5047 /* This shouldn't happen. */
5048 VLOG_ERR_RL(&rl, "subfacet with wrong facet");
5049 subfacet_destroy(subfacet);
b0f7b9b5
BP
5050 }
5051
3b145dd7 5052 subfacet = xmalloc(sizeof *subfacet);
b0f7b9b5
BP
5053 }
5054
b0f7b9b5
BP
5055 hmap_insert(&ofproto->subfacets, &subfacet->hmap_node, key_hash);
5056 list_push_back(&facet->subfacets, &subfacet->list_node);
5057 subfacet->facet = facet;
b0f7b9b5 5058 subfacet->key_fitness = key_fitness;
9566abf9
EJ
5059 subfacet->key = xmemdup(key, key_len);
5060 subfacet->key_len = key_len;
459b16a1 5061 subfacet->used = now;
26cd7e34
BP
5062 subfacet->dp_packet_count = 0;
5063 subfacet->dp_byte_count = 0;
5064 subfacet->actions_len = 0;
5065 subfacet->actions = NULL;
6a7e895f
BP
5066 subfacet->slow = (subfacet->key_fitness == ODP_FIT_TOO_LITTLE
5067 ? SLOW_MATCH
5068 : 0);
5069 subfacet->path = SF_NOT_INSTALLED;
14f94f9a 5070 subfacet->initial_vals = miss->initial_vals;
a088a1ff 5071 subfacet->odp_in_port = miss->odp_in_port;
b0f7b9b5
BP
5072
5073 return subfacet;
5074}
5075
b0f7b9b5
BP
5076/* Uninstalls 'subfacet' from the datapath, if it is installed, removes it from
5077 * its facet within 'ofproto', and frees it. */
5078static void
15baa734 5079subfacet_destroy__(struct subfacet *subfacet)
b0f7b9b5 5080{
15baa734
BP
5081 struct facet *facet = subfacet->facet;
5082 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
5083
5084 subfacet_uninstall(subfacet);
b0f7b9b5
BP
5085 hmap_remove(&ofproto->subfacets, &subfacet->hmap_node);
5086 list_remove(&subfacet->list_node);
5087 free(subfacet->key);
b95fc6ba 5088 free(subfacet->actions);
26cd7e34
BP
5089 if (subfacet != &facet->one_subfacet) {
5090 free(subfacet);
5091 }
b0f7b9b5
BP
5092}
5093
5094/* Destroys 'subfacet', as with subfacet_destroy__(), and then if this was the
5095 * last remaining subfacet in its facet destroys the facet too. */
5096static void
15baa734 5097subfacet_destroy(struct subfacet *subfacet)
b0f7b9b5
BP
5098{
5099 struct facet *facet = subfacet->facet;
5100
551a2f6c
BP
5101 if (list_is_singleton(&facet->subfacets)) {
5102 /* facet_remove() needs at least one subfacet (it will remove it). */
15baa734 5103 facet_remove(facet);
551a2f6c 5104 } else {
15baa734 5105 subfacet_destroy__(subfacet);
b0f7b9b5
BP
5106 }
5107}
5108
1d85f9e5
JP
5109static void
5110subfacet_destroy_batch(struct ofproto_dpif *ofproto,
5111 struct subfacet **subfacets, int n)
5112{
1d85f9e5
JP
5113 struct dpif_op ops[SUBFACET_DESTROY_MAX_BATCH];
5114 struct dpif_op *opsp[SUBFACET_DESTROY_MAX_BATCH];
1d85f9e5
JP
5115 struct dpif_flow_stats stats[SUBFACET_DESTROY_MAX_BATCH];
5116 int i;
5117
5118 for (i = 0; i < n; i++) {
5119 ops[i].type = DPIF_OP_FLOW_DEL;
9566abf9
EJ
5120 ops[i].u.flow_del.key = subfacets[i]->key;
5121 ops[i].u.flow_del.key_len = subfacets[i]->key_len;
1d85f9e5
JP
5122 ops[i].u.flow_del.stats = &stats[i];
5123 opsp[i] = &ops[i];
5124 }
5125
acf60855 5126 dpif_operate(ofproto->backer->dpif, opsp, n);
1d85f9e5
JP
5127 for (i = 0; i < n; i++) {
5128 subfacet_reset_dp_stats(subfacets[i], &stats[i]);
5129 subfacets[i]->path = SF_NOT_INSTALLED;
5130 subfacet_destroy(subfacets[i]);
5131 }
5132}
5133
5fe20d5d
BP
5134/* Composes the datapath actions for 'subfacet' based on its rule's actions.
5135 * Translates the actions into 'odp_actions', which the caller must have
5136 * initialized and is responsible for uninitializing. */
b95fc6ba 5137static void
5fe20d5d
BP
5138subfacet_make_actions(struct subfacet *subfacet, const struct ofpbuf *packet,
5139 struct ofpbuf *odp_actions)
b95fc6ba
BP
5140{
5141 struct facet *facet = subfacet->facet;
18b2a258 5142 struct rule_dpif *rule = facet->rule;
15baa734 5143 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
050ac423 5144
b95fc6ba
BP
5145 struct action_xlate_ctx ctx;
5146
14f94f9a
JP
5147 action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
5148 &subfacet->initial_vals, rule, 0, packet);
f25d0cf3 5149 xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len, odp_actions);
b95fc6ba 5150 facet->tags = ctx.tags;
b95fc6ba
BP
5151 facet->has_learn = ctx.has_learn;
5152 facet->has_normal = ctx.has_normal;
0e553d9c 5153 facet->has_fin_timeout = ctx.has_fin_timeout;
b95fc6ba 5154 facet->nf_flow.output_iface = ctx.nf_output_iface;
9d24de3b 5155 facet->mirrors = ctx.mirrors;
b95fc6ba 5156
6a7e895f 5157 subfacet->slow = (subfacet->slow & SLOW_MATCH) | ctx.slow;
5fe20d5d
BP
5158 if (subfacet->actions_len != odp_actions->size
5159 || memcmp(subfacet->actions, odp_actions->data, odp_actions->size)) {
b95fc6ba 5160 free(subfacet->actions);
5fe20d5d
BP
5161 subfacet->actions_len = odp_actions->size;
5162 subfacet->actions = xmemdup(odp_actions->data, odp_actions->size);
b95fc6ba 5163 }
b95fc6ba
BP
5164}
5165
b0f7b9b5
BP
5166/* Updates 'subfacet''s datapath flow, setting its actions to 'actions_len'
5167 * bytes of actions in 'actions'. If 'stats' is non-null, statistics counters
5168 * in the datapath will be zeroed and 'stats' will be updated with traffic new
5169 * since 'subfacet' was last updated.
5170 *
5171 * Returns 0 if successful, otherwise a positive errno value. */
5172static int
15baa734 5173subfacet_install(struct subfacet *subfacet,
b0f7b9b5 5174 const struct nlattr *actions, size_t actions_len,
6a7e895f
BP
5175 struct dpif_flow_stats *stats,
5176 enum slow_path_reason slow)
b0f7b9b5 5177{
15baa734
BP
5178 struct facet *facet = subfacet->facet;
5179 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
6a7e895f
BP
5180 enum subfacet_path path = subfacet_want_path(slow);
5181 uint64_t slow_path_stub[128 / 8];
b0f7b9b5 5182 enum dpif_flow_put_flags flags;
b0f7b9b5
BP
5183 int ret;
5184
5185 flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
5186 if (stats) {
5187 flags |= DPIF_FP_ZERO_STATS;
5188 }
5189
6a7e895f
BP
5190 if (path == SF_SLOW_PATH) {
5191 compose_slow_path(ofproto, &facet->flow, slow,
5192 slow_path_stub, sizeof slow_path_stub,
5193 &actions, &actions_len);
5194 }
5195
9566abf9
EJ
5196 ret = dpif_flow_put(ofproto->backer->dpif, flags, subfacet->key,
5197 subfacet->key_len, actions, actions_len, stats);
b0f7b9b5
BP
5198
5199 if (stats) {
5200 subfacet_reset_dp_stats(subfacet, stats);
5201 }
5202
6a7e895f
BP
5203 if (!ret) {
5204 subfacet->path = path;
5205 }
b0f7b9b5
BP
5206 return ret;
5207}
5208
6a7e895f
BP
5209static int
5210subfacet_reinstall(struct subfacet *subfacet, struct dpif_flow_stats *stats)
5211{
5212 return subfacet_install(subfacet, subfacet->actions, subfacet->actions_len,
5213 stats, subfacet->slow);
5214}
5215
b0f7b9b5
BP
5216/* If 'subfacet' is installed in the datapath, uninstalls it. */
5217static void
15baa734 5218subfacet_uninstall(struct subfacet *subfacet)
b0f7b9b5 5219{
6a7e895f 5220 if (subfacet->path != SF_NOT_INSTALLED) {
15baa734
BP
5221 struct rule_dpif *rule = subfacet->facet->rule;
5222 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
b0f7b9b5 5223 struct dpif_flow_stats stats;
b0f7b9b5
BP
5224 int error;
5225
9566abf9
EJ
5226 error = dpif_flow_del(ofproto->backer->dpif, subfacet->key,
5227 subfacet->key_len, &stats);
b0f7b9b5
BP
5228 subfacet_reset_dp_stats(subfacet, &stats);
5229 if (!error) {
15baa734 5230 subfacet_update_stats(subfacet, &stats);
b0f7b9b5 5231 }
6a7e895f 5232 subfacet->path = SF_NOT_INSTALLED;
b0f7b9b5 5233 } else {
cb22974d
BP
5234 ovs_assert(subfacet->dp_packet_count == 0);
5235 ovs_assert(subfacet->dp_byte_count == 0);
b0f7b9b5
BP
5236 }
5237}
5238
5239/* Resets 'subfacet''s datapath statistics counters. This should be called
5240 * when 'subfacet''s statistics are cleared in the datapath. If 'stats' is
5241 * non-null, it should contain the statistics returned by dpif when 'subfacet'
5242 * was reset in the datapath. 'stats' will be modified to include only
5243 * statistics new since 'subfacet' was last updated. */
5244static void
5245subfacet_reset_dp_stats(struct subfacet *subfacet,
5246 struct dpif_flow_stats *stats)
5247{
5248 if (stats
5249 && subfacet->dp_packet_count <= stats->n_packets
5250 && subfacet->dp_byte_count <= stats->n_bytes) {
5251 stats->n_packets -= subfacet->dp_packet_count;
5252 stats->n_bytes -= subfacet->dp_byte_count;
5253 }
5254
5255 subfacet->dp_packet_count = 0;
5256 subfacet->dp_byte_count = 0;
5257}
5258
5259/* Updates 'subfacet''s used time. The caller is responsible for calling
5260 * facet_push_stats() to update the flows which 'subfacet' resubmits into. */
5261static void
15baa734 5262subfacet_update_time(struct subfacet *subfacet, long long int used)
b0f7b9b5
BP
5263{
5264 if (used > subfacet->used) {
5265 subfacet->used = used;
15baa734 5266 facet_update_time(subfacet->facet, used);
b0f7b9b5
BP
5267 }
5268}
5269
5270/* Folds the statistics from 'stats' into the counters in 'subfacet'.
5271 *
5272 * Because of the meaning of a subfacet's counters, it only makes sense to do
5273 * this if 'stats' are not tracked in the datapath, that is, if 'stats'
5274 * represents a packet that was sent by hand or if it represents statistics
5275 * that have been cleared out of the datapath. */
5276static void
15baa734 5277subfacet_update_stats(struct subfacet *subfacet,
b0f7b9b5
BP
5278 const struct dpif_flow_stats *stats)
5279{
5280 if (stats->n_packets || stats->used > subfacet->used) {
5281 struct facet *facet = subfacet->facet;
5282
15baa734 5283 subfacet_update_time(subfacet, stats->used);
b0f7b9b5
BP
5284 facet->packet_count += stats->n_packets;
5285 facet->byte_count += stats->n_bytes;
0e553d9c 5286 facet->tcp_flags |= stats->tcp_flags;
b0f7b9b5
BP
5287 netflow_flow_update_flags(&facet->nf_flow, stats->tcp_flags);
5288 }
5289}
5290\f
abe529af
BP
5291/* Rules. */
5292
5293static struct rule_dpif *
c57b2226
BP
5294rule_dpif_lookup(struct ofproto_dpif *ofproto, const struct flow *flow)
5295{
c57b2226
BP
5296 struct rule_dpif *rule;
5297
5298 rule = rule_dpif_lookup__(ofproto, flow, 0);
5299 if (rule) {
5300 return rule;
5301 }
5302
c376f9a3 5303 return rule_dpif_miss_rule(ofproto, flow);
c57b2226
BP
5304}
5305
5306static struct rule_dpif *
5307rule_dpif_lookup__(struct ofproto_dpif *ofproto, const struct flow *flow,
5308 uint8_t table_id)
abe529af 5309{
7257b535
BP
5310 struct cls_rule *cls_rule;
5311 struct classifier *cls;
5312
9cdaaebe
BP
5313 if (table_id >= N_TABLES) {
5314 return NULL;
5315 }
5316
d0918789 5317 cls = &ofproto->up.tables[table_id].cls;
eadef313 5318 if (flow->nw_frag & FLOW_NW_FRAG_ANY
7257b535
BP
5319 && ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
5320 /* For OFPC_NORMAL frag_handling, we must pretend that transport ports
5321 * are unavailable. */
5322 struct flow ofpc_normal_flow = *flow;
5323 ofpc_normal_flow.tp_src = htons(0);
5324 ofpc_normal_flow.tp_dst = htons(0);
5325 cls_rule = classifier_lookup(cls, &ofpc_normal_flow);
5326 } else {
5327 cls_rule = classifier_lookup(cls, flow);
5328 }
5329 return rule_dpif_cast(rule_from_cls_rule(cls_rule));
abe529af
BP
5330}
5331
c376f9a3
IY
5332static struct rule_dpif *
5333rule_dpif_miss_rule(struct ofproto_dpif *ofproto, const struct flow *flow)
5334{
5335 struct ofport_dpif *port;
5336
5337 port = get_ofp_port(ofproto, flow->in_port);
5338 if (!port) {
5339 VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16, flow->in_port);
5340 return ofproto->miss_rule;
5341 }
5342
5343 if (port->up.pp.config & OFPUTIL_PC_NO_PACKET_IN) {
5344 return ofproto->no_packet_in_rule;
5345 }
5346 return ofproto->miss_rule;
5347}
5348
7ee20df1
BP
5349static void
5350complete_operation(struct rule_dpif *rule)
5351{
5352 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
5353
54a9cbc9 5354 rule_invalidate(rule);
7ee20df1
BP
5355 if (clogged) {
5356 struct dpif_completion *c = xmalloc(sizeof *c);
5357 c->op = rule->up.pending;
5358 list_push_back(&ofproto->completions, &c->list_node);
5359 } else {
5360 ofoperation_complete(rule->up.pending, 0);
5361 }
5362}
5363
abe529af
BP
5364static struct rule *
5365rule_alloc(void)
5366{
5367 struct rule_dpif *rule = xmalloc(sizeof *rule);
5368 return &rule->up;
5369}
5370
5371static void
5372rule_dealloc(struct rule *rule_)
5373{
5374 struct rule_dpif *rule = rule_dpif_cast(rule_);
5375 free(rule);
5376}
5377
90bf1e07 5378static enum ofperr
abe529af
BP
5379rule_construct(struct rule *rule_)
5380{
5381 struct rule_dpif *rule = rule_dpif_cast(rule_);
5382 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
7ee20df1 5383 struct rule_dpif *victim;
54a9cbc9 5384 uint8_t table_id;
abe529af 5385
abe529af
BP
5386 rule->packet_count = 0;
5387 rule->byte_count = 0;
abe529af 5388
7ee20df1
BP
5389 victim = rule_dpif_cast(ofoperation_get_victim(rule->up.pending));
5390 if (victim && !list_is_empty(&victim->facets)) {
5391 struct facet *facet;
5392
5393 rule->facets = victim->facets;
5394 list_moved(&rule->facets);
5395 LIST_FOR_EACH (facet, list_node, &rule->facets) {
bbb5d219
EJ
5396 /* XXX: We're only clearing our local counters here. It's possible
5397 * that quite a few packets are unaccounted for in the datapath
5398 * statistics. These will be accounted to the new rule instead of
5399 * cleared as required. This could be fixed by clearing out the
5400 * datapath statistics for this facet, but currently it doesn't
5401 * seem worth it. */
5402 facet_reset_counters(facet);
7ee20df1
BP
5403 facet->rule = rule;
5404 }
5405 } else {
5406 /* Must avoid list_moved() in this case. */
5407 list_init(&rule->facets);
5408 }
abe529af 5409
54a9cbc9 5410 table_id = rule->up.table_id;
5cb7a798
BP
5411 if (victim) {
5412 rule->tag = victim->tag;
5413 } else if (table_id == 0) {
5414 rule->tag = 0;
5415 } else {
5416 struct flow flow;
5417
5418 miniflow_expand(&rule->up.cr.match.flow, &flow);
5419 rule->tag = rule_calculate_tag(&flow, &rule->up.cr.match.mask,
5420 ofproto->tables[table_id].basis);
5421 }
54a9cbc9 5422
7ee20df1 5423 complete_operation(rule);
abe529af
BP
5424 return 0;
5425}
5426
5427static void
5428rule_destruct(struct rule *rule_)
5429{
5430 struct rule_dpif *rule = rule_dpif_cast(rule_);
abe529af
BP
5431 struct facet *facet, *next_facet;
5432
abe529af 5433 LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) {
15baa734 5434 facet_revalidate(facet);
abe529af 5435 }
7ee20df1
BP
5436
5437 complete_operation(rule);
abe529af
BP
5438}
5439
5440static void
5441rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes)
5442{
bf1e8ff9 5443 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule_->ofproto);
abe529af
BP
5444 struct rule_dpif *rule = rule_dpif_cast(rule_);
5445 struct facet *facet;
5446
bf1e8ff9
EJ
5447 HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
5448 facet_push_stats(facet);
5449 }
5450
abe529af
BP
5451 /* Start from historical data for 'rule' itself that are no longer tracked
5452 * in facets. This counts, for example, facets that have expired. */
5453 *packets = rule->packet_count;
5454 *bytes = rule->byte_count;
5455
5456 /* Add any statistics that are tracked by facets. This includes
5457 * statistical data recently updated by ofproto_update_stats() as well as
5458 * stats for packets that were executed "by hand" via dpif_execute(). */
5459 LIST_FOR_EACH (facet, list_node, &rule->facets) {
5460 *packets += facet->packet_count;
5461 *bytes += facet->byte_count;
5462 }
5463}
5464
0a740f48
EJ
5465static void
5466rule_dpif_execute(struct rule_dpif *rule, const struct flow *flow,
5467 struct ofpbuf *packet)
abe529af 5468{
abe529af 5469 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
14f94f9a 5470 struct initial_vals initial_vals;
112bc5f4 5471 struct dpif_flow_stats stats;
abe529af 5472 struct action_xlate_ctx ctx;
050ac423
BP
5473 uint64_t odp_actions_stub[1024 / 8];
5474 struct ofpbuf odp_actions;
abe529af 5475
a7752d4a 5476 dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
112bc5f4
BP
5477 rule_credit_stats(rule, &stats);
5478
14f94f9a 5479 initial_vals.vlan_tci = flow->vlan_tci;
c3f6c502 5480 initial_vals.tunnel_ip_tos = flow->tunnel.ip_tos;
050ac423 5481 ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
14f94f9a 5482 action_xlate_ctx_init(&ctx, ofproto, flow, &initial_vals,
112bc5f4
BP
5483 rule, stats.tcp_flags, packet);
5484 ctx.resubmit_stats = &stats;
f25d0cf3 5485 xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len, &odp_actions);
112bc5f4
BP
5486
5487 execute_odp_actions(ofproto, flow, odp_actions.data,
5488 odp_actions.size, packet);
5489
050ac423 5490 ofpbuf_uninit(&odp_actions);
0a740f48 5491}
5bf0e941 5492
0a740f48
EJ
5493static enum ofperr
5494rule_execute(struct rule *rule, const struct flow *flow,
5495 struct ofpbuf *packet)
5496{
5497 rule_dpif_execute(rule_dpif_cast(rule), flow, packet);
5498 ofpbuf_delete(packet);
5bf0e941 5499 return 0;
abe529af
BP
5500}
5501
7ee20df1
BP
5502static void
5503rule_modify_actions(struct rule *rule_)
abe529af
BP
5504{
5505 struct rule_dpif *rule = rule_dpif_cast(rule_);
7ee20df1
BP
5506
5507 complete_operation(rule);
abe529af
BP
5508}
5509\f
97d6520b 5510/* Sends 'packet' out 'ofport'.
52a90c29 5511 * May modify 'packet'.
abe529af
BP
5512 * Returns 0 if successful, otherwise a positive errno value. */
5513static int
52a90c29 5514send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet)
abe529af 5515{
97d6520b 5516 const struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
b9ad7294 5517 uint64_t odp_actions_stub[1024 / 8];
80e5eed9
BP
5518 struct ofpbuf key, odp_actions;
5519 struct odputil_keybuf keybuf;
9b56fe13 5520 uint32_t odp_port;
80e5eed9 5521 struct flow flow;
abe529af
BP
5522 int error;
5523
72e8bf28 5524 flow_extract(packet, 0, 0, NULL, OFPP_LOCAL, &flow);
0a740f48
EJ
5525 if (netdev_vport_is_patch(ofport->up.netdev)) {
5526 struct ofproto_dpif *peer_ofproto;
5527 struct dpif_flow_stats stats;
5528 struct ofport_dpif *peer;
5529 struct rule_dpif *rule;
5530
5531 peer = ofport_get_peer(ofport);
5532 if (!peer) {
5533 return ENODEV;
5534 }
5535
5536 dpif_flow_stats_extract(&flow, packet, time_msec(), &stats);
b9ad7294
EJ
5537 netdev_vport_inc_tx(ofport->up.netdev, &stats);
5538 netdev_vport_inc_rx(peer->up.netdev, &stats);
0a740f48
EJ
5539
5540 flow.in_port = peer->up.ofp_port;
5541 peer_ofproto = ofproto_dpif_cast(peer->up.ofproto);
5542 rule = rule_dpif_lookup(peer_ofproto, &flow);
5543 rule_dpif_execute(rule, &flow, packet);
5544
5545 return 0;
5546 }
5547
b9ad7294
EJ
5548 ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
5549
5550 if (ofport->tnl_port) {
5551 struct dpif_flow_stats stats;
5552
5553 odp_port = tnl_port_send(ofport->tnl_port, &flow);
5554 if (odp_port == OVSP_NONE) {
5555 return ENODEV;
5556 }
5557
5558 dpif_flow_stats_extract(&flow, packet, time_msec(), &stats);
5559 netdev_vport_inc_tx(ofport->up.netdev, &stats);
5560 odp_put_tunnel_action(&flow.tunnel, &odp_actions);
09a0d2e7 5561 odp_put_skb_mark_action(flow.skb_mark, &odp_actions);
b9ad7294
EJ
5562 } else {
5563 odp_port = vsp_realdev_to_vlandev(ofproto, ofport->odp_port,
5564 flow.vlan_tci);
5565 if (odp_port != ofport->odp_port) {
5566 eth_pop_vlan(packet);
5567 flow.vlan_tci = htons(0);
5568 }
52a90c29
BP
5569 }
5570
80e5eed9 5571 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
e1b1d06a
JP
5572 odp_flow_key_from_flow(&key, &flow,
5573 ofp_port_to_odp_port(ofproto, flow.in_port));
80e5eed9 5574
6ff686f2
PS
5575 compose_sflow_action(ofproto, &odp_actions, &flow, odp_port);
5576
df2c07f4 5577 nl_msg_put_u32(&odp_actions, OVS_ACTION_ATTR_OUTPUT, odp_port);
acf60855 5578 error = dpif_execute(ofproto->backer->dpif,
80e5eed9
BP
5579 key.data, key.size,
5580 odp_actions.data, odp_actions.size,
abe529af
BP
5581 packet);
5582 ofpbuf_uninit(&odp_actions);
5583
5584 if (error) {
5585 VLOG_WARN_RL(&rl, "%s: failed to send packet on port %"PRIu32" (%s)",
5586 ofproto->up.name, odp_port, strerror(error));
5587 }
6527c598 5588 ofproto_update_local_port_stats(ofport->up.ofproto, packet->size, 0);
abe529af
BP
5589 return error;
5590}
5591\f
df2c07f4 5592/* OpenFlow to datapath action translation. */
abe529af 5593
ffaef958 5594static bool may_receive(const struct ofport_dpif *, struct action_xlate_ctx *);
f25d0cf3
BP
5595static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
5596 struct action_xlate_ctx *);
4cd78906 5597static void xlate_normal(struct action_xlate_ctx *);
abe529af 5598
6a7e895f
BP
5599/* Composes an ODP action for a "slow path" action for 'flow' within 'ofproto'.
5600 * The action will state 'slow' as the reason that the action is in the slow
5601 * path. (This is purely informational: it allows a human viewing "ovs-dpctl
5602 * dump-flows" output to see why a flow is in the slow path.)
5603 *
5604 * The 'stub_size' bytes in 'stub' will be used to store the action.
5605 * 'stub_size' must be large enough for the action.
5606 *
5607 * The action and its size will be stored in '*actionsp' and '*actions_lenp',
5608 * respectively. */
5609static void
5610compose_slow_path(const struct ofproto_dpif *ofproto, const struct flow *flow,
5611 enum slow_path_reason slow,
5612 uint64_t *stub, size_t stub_size,
5613 const struct nlattr **actionsp, size_t *actions_lenp)
5614{
5615 union user_action_cookie cookie;
5616 struct ofpbuf buf;
5617
5618 cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
5619 cookie.slow_path.unused = 0;
5620 cookie.slow_path.reason = slow;
5621
5622 ofpbuf_use_stack(&buf, stub, stub_size);
625b0720 5623 if (slow & (SLOW_CFM | SLOW_LACP | SLOW_STP)) {
9032f11e 5624 uint32_t pid = dpif_port_get_pid(ofproto->backer->dpif, UINT32_MAX);
e995e3df 5625 odp_put_userspace_action(pid, &cookie, sizeof cookie, &buf);
625b0720
BP
5626 } else {
5627 put_userspace_action(ofproto, &buf, flow, &cookie);
5628 }
6a7e895f
BP
5629 *actionsp = buf.data;
5630 *actions_lenp = buf.size;
5631}
5632
98403001
BP
5633static size_t
5634put_userspace_action(const struct ofproto_dpif *ofproto,
5635 struct ofpbuf *odp_actions,
5636 const struct flow *flow,
1673e0e4 5637 const union user_action_cookie *cookie)
98403001 5638{
98403001
BP
5639 uint32_t pid;
5640
acf60855 5641 pid = dpif_port_get_pid(ofproto->backer->dpif,
e1b1d06a 5642 ofp_port_to_odp_port(ofproto, flow->in_port));
98403001 5643
e995e3df 5644 return odp_put_userspace_action(pid, cookie, sizeof *cookie, odp_actions);
98403001
BP
5645}
5646
36fc5f18
BP
5647static void
5648compose_sflow_cookie(const struct ofproto_dpif *ofproto,
5649 ovs_be16 vlan_tci, uint32_t odp_port,
1673e0e4 5650 unsigned int n_outputs, union user_action_cookie *cookie)
36fc5f18
BP
5651{
5652 int ifindex;
5653
5654 cookie->type = USER_ACTION_COOKIE_SFLOW;
1673e0e4 5655 cookie->sflow.vlan_tci = vlan_tci;
36fc5f18
BP
5656
5657 /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
5658 * port information") for the interpretation of cookie->output. */
5659 switch (n_outputs) {
5660 case 0:
5661 /* 0x40000000 | 256 means "packet dropped for unknown reason". */
1673e0e4 5662 cookie->sflow.output = 0x40000000 | 256;
36fc5f18
BP
5663 break;
5664
5665 case 1:
5666 ifindex = dpif_sflow_odp_port_to_ifindex(ofproto->sflow, odp_port);
5667 if (ifindex) {
1673e0e4 5668 cookie->sflow.output = ifindex;
36fc5f18
BP
5669 break;
5670 }
5671 /* Fall through. */
5672 default:
5673 /* 0x80000000 means "multiple output ports. */
1673e0e4 5674 cookie->sflow.output = 0x80000000 | n_outputs;
36fc5f18
BP
5675 break;
5676 }
5677}
5678
6ff686f2
PS
5679/* Compose SAMPLE action for sFlow. */
5680static size_t
5681compose_sflow_action(const struct ofproto_dpif *ofproto,
5682 struct ofpbuf *odp_actions,
5683 const struct flow *flow,
5684 uint32_t odp_port)
5685{
6ff686f2 5686 uint32_t probability;
1673e0e4 5687 union user_action_cookie cookie;
6ff686f2 5688 size_t sample_offset, actions_offset;
36fc5f18 5689 int cookie_offset;
6ff686f2
PS
5690
5691 if (!ofproto->sflow || flow->in_port == OFPP_NONE) {
5692 return 0;
5693 }
5694
6ff686f2
PS
5695 sample_offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SAMPLE);
5696
5697 /* Number of packets out of UINT_MAX to sample. */
5698 probability = dpif_sflow_get_probability(ofproto->sflow);
5699 nl_msg_put_u32(odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability);
5700
5701 actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
36fc5f18
BP
5702 compose_sflow_cookie(ofproto, htons(0), odp_port,
5703 odp_port == OVSP_NONE ? 0 : 1, &cookie);
98403001 5704 cookie_offset = put_userspace_action(ofproto, odp_actions, flow, &cookie);
6ff686f2
PS
5705
5706 nl_msg_end_nested(odp_actions, actions_offset);
5707 nl_msg_end_nested(odp_actions, sample_offset);
98403001 5708 return cookie_offset;
6ff686f2
PS
5709}
5710
5711/* SAMPLE action must be first action in any given list of actions.
5712 * At this point we do not have all information required to build it. So try to
5713 * build sample action as complete as possible. */
5714static void
5715add_sflow_action(struct action_xlate_ctx *ctx)
5716{
5717 ctx->user_cookie_offset = compose_sflow_action(ctx->ofproto,
5718 ctx->odp_actions,
5719 &ctx->flow, OVSP_NONE);
5720 ctx->sflow_odp_port = 0;
5721 ctx->sflow_n_outputs = 0;
5722}
5723
5724/* Fix SAMPLE action according to data collected while composing ODP actions.
5725 * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
5726 * USERSPACE action's user-cookie which is required for sflow. */
5727static void
5728fix_sflow_action(struct action_xlate_ctx *ctx)
5729{
5730 const struct flow *base = &ctx->base_flow;
1673e0e4 5731 union user_action_cookie *cookie;
6ff686f2
PS
5732
5733 if (!ctx->user_cookie_offset) {
5734 return;
5735 }
5736
5737 cookie = ofpbuf_at(ctx->odp_actions, ctx->user_cookie_offset,
36fc5f18 5738 sizeof(*cookie));
cb22974d 5739 ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
6ff686f2 5740
36fc5f18
BP
5741 compose_sflow_cookie(ctx->ofproto, base->vlan_tci,
5742 ctx->sflow_odp_port, ctx->sflow_n_outputs, cookie);
6ff686f2
PS
5743}
5744
6ff686f2 5745static void
81b1afb1
EJ
5746compose_output_action__(struct action_xlate_ctx *ctx, uint16_t ofp_port,
5747 bool check_stp)
6ff686f2 5748{
d59906fb 5749 const struct ofport_dpif *ofport = get_ofp_port(ctx->ofproto, ofp_port);
52a90c29 5750 ovs_be16 flow_vlan_tci = ctx->flow.vlan_tci;
b9ad7294 5751 ovs_be64 flow_tun_id = ctx->flow.tunnel.tun_id;
8b36f51e 5752 uint8_t flow_nw_tos = ctx->flow.nw_tos;
a4454ac6 5753 struct priority_to_dscp *pdscp;
0a740f48
EJ
5754 uint32_t out_port, odp_port;
5755
5756 /* If 'struct flow' gets additional metadata, we'll need to zero it out
5757 * before traversing a patch port. */
cff78c88 5758 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 20);
d59906fb 5759
a4454ac6
EJ
5760 if (!ofport) {
5761 xlate_report(ctx, "Nonexistent output port");
5762 return;
5763 } else if (ofport->up.pp.config & OFPUTIL_PC_NO_FWD) {
5764 xlate_report(ctx, "OFPPC_NO_FWD set, skipping output");
5765 return;
5766 } else if (check_stp && !stp_forward_in_state(ofport->stp_state)) {
5767 xlate_report(ctx, "STP not in forwarding state, skipping output");
5768 return;
5769 }
8b36f51e 5770
0a740f48
EJ
5771 if (netdev_vport_is_patch(ofport->up.netdev)) {
5772 struct ofport_dpif *peer = ofport_get_peer(ofport);
5773 struct flow old_flow = ctx->flow;
5774 const struct ofproto_dpif *peer_ofproto;
bb374ef6 5775 enum slow_path_reason special;
ffaef958 5776 struct ofport_dpif *in_port;
0a740f48
EJ
5777
5778 if (!peer) {
5779 xlate_report(ctx, "Nonexistent patch port peer");
5780 return;
5781 }
5782
5783 peer_ofproto = ofproto_dpif_cast(peer->up.ofproto);
5784 if (peer_ofproto->backer != ctx->ofproto->backer) {
5785 xlate_report(ctx, "Patch port peer on a different datapath");
5786 return;
5787 }
5788
5789 ctx->ofproto = ofproto_dpif_cast(peer->up.ofproto);
5790 ctx->flow.in_port = peer->up.ofp_port;
5791 ctx->flow.metadata = htonll(0);
5792 memset(&ctx->flow.tunnel, 0, sizeof ctx->flow.tunnel);
5793 memset(ctx->flow.regs, 0, sizeof ctx->flow.regs);
ffaef958
BP
5794
5795 in_port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
bb374ef6
EJ
5796 special = process_special(ctx->ofproto, &ctx->flow, in_port,
5797 ctx->packet);
5798 if (special) {
5799 ctx->slow |= special;
5800 } else if (!in_port || may_receive(in_port, ctx)) {
ffaef958
BP
5801 if (!in_port || stp_forward_in_state(in_port->stp_state)) {
5802 xlate_table_action(ctx, ctx->flow.in_port, 0, true);
5803 } else {
5804 /* Forwarding is disabled by STP. Let OFPP_NORMAL and the
5805 * learning action look at the packet, then drop it. */
5806 struct flow old_base_flow = ctx->base_flow;
5807 size_t old_size = ctx->odp_actions->size;
5808 xlate_table_action(ctx, ctx->flow.in_port, 0, true);
5809 ctx->base_flow = old_base_flow;
5810 ctx->odp_actions->size = old_size;
5811 }
5812 }
5813
0a740f48
EJ
5814 ctx->flow = old_flow;
5815 ctx->ofproto = ofproto_dpif_cast(ofport->up.ofproto);
5816
5817 if (ctx->resubmit_stats) {
b9ad7294
EJ
5818 netdev_vport_inc_tx(ofport->up.netdev, ctx->resubmit_stats);
5819 netdev_vport_inc_rx(peer->up.netdev, ctx->resubmit_stats);
0a740f48
EJ
5820 }
5821
5822 return;
5823 }
5824
a4454ac6
EJ
5825 pdscp = get_priority(ofport, ctx->flow.skb_priority);
5826 if (pdscp) {
5827 ctx->flow.nw_tos &= ~IP_DSCP_MASK;
5828 ctx->flow.nw_tos |= pdscp->dscp;
d59906fb
EJ
5829 }
5830
b9ad7294
EJ
5831 if (ofport->tnl_port) {
5832 odp_port = tnl_port_send(ofport->tnl_port, &ctx->flow);
5833 if (odp_port == OVSP_NONE) {
5834 xlate_report(ctx, "Tunneling decided against output");
5835 return;
5836 }
5837
5838 if (ctx->resubmit_stats) {
5839 netdev_vport_inc_tx(ofport->up.netdev, ctx->resubmit_stats);
5840 }
5841 out_port = odp_port;
5842 commit_odp_tunnel_action(&ctx->flow, &ctx->base_flow,
5843 ctx->odp_actions);
5844 } else {
cf630ea3 5845 odp_port = ofport->odp_port;
b9ad7294
EJ
5846 out_port = vsp_realdev_to_vlandev(ctx->ofproto, odp_port,
5847 ctx->flow.vlan_tci);
5848 if (out_port != odp_port) {
5849 ctx->flow.vlan_tci = htons(0);
5850 }
321fa429 5851 ctx->flow.skb_mark &= ~IPSEC_MARK;
52a90c29 5852 }
5bbda0aa 5853 commit_odp_actions(&ctx->flow, &ctx->base_flow, ctx->odp_actions);
52a90c29
BP
5854 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_OUTPUT, out_port);
5855
6ff686f2
PS
5856 ctx->sflow_odp_port = odp_port;
5857 ctx->sflow_n_outputs++;
81b1afb1 5858 ctx->nf_output_iface = ofp_port;
b9ad7294 5859 ctx->flow.tunnel.tun_id = flow_tun_id;
52a90c29 5860 ctx->flow.vlan_tci = flow_vlan_tci;
8b36f51e 5861 ctx->flow.nw_tos = flow_nw_tos;
6ff686f2
PS
5862}
5863
abe529af 5864static void
5e48dc2b 5865compose_output_action(struct action_xlate_ctx *ctx, uint16_t ofp_port)
abe529af 5866{
81b1afb1 5867 compose_output_action__(ctx, ofp_port, true);
abe529af
BP
5868}
5869
55599423
JR
5870static void
5871tag_the_flow(struct action_xlate_ctx *ctx, struct rule_dpif *rule)
5872{
5873 struct ofproto_dpif *ofproto = ctx->ofproto;
5874 uint8_t table_id = ctx->table_id;
5875
5876 if (table_id > 0 && table_id < N_TABLES) {
5877 struct table_dpif *table = &ofproto->tables[table_id];
5878 if (table->other_table) {
5879 ctx->tags |= (rule && rule->tag
5880 ? rule->tag
5881 : rule_calculate_tag(&ctx->flow,
5882 &table->other_table->mask,
5883 table->basis));
5884 }
5885 }
5886}
5887
5888/* Common rule processing in one place to avoid duplicating code. */
5889static struct rule_dpif *
5890ctx_rule_hooks(struct action_xlate_ctx *ctx, struct rule_dpif *rule,
5891 bool may_packet_in)
5892{
5893 if (ctx->resubmit_hook) {
5894 ctx->resubmit_hook(ctx, rule);
5895 }
5896 if (rule == NULL && may_packet_in) {
5897 /* XXX
5898 * check if table configuration flags
5899 * OFPTC_TABLE_MISS_CONTROLLER, default.
5900 * OFPTC_TABLE_MISS_CONTINUE,
5901 * OFPTC_TABLE_MISS_DROP
5902 * When OF1.0, OFPTC_TABLE_MISS_CONTINUE is used. What to do?
5903 */
5904 rule = rule_dpif_miss_rule(ctx->ofproto, &ctx->flow);
5905 }
5906 if (rule && ctx->resubmit_stats) {
5907 rule_credit_stats(rule, ctx->resubmit_stats);
5908 }
5909 return rule;
5910}
5911
abe529af 5912static void
29901626 5913xlate_table_action(struct action_xlate_ctx *ctx,
1688c479 5914 uint16_t in_port, uint8_t table_id, bool may_packet_in)
abe529af
BP
5915{
5916 if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
5917 struct rule_dpif *rule;
55599423
JR
5918 uint16_t old_in_port = ctx->flow.in_port;
5919 uint8_t old_table_id = ctx->table_id;
29901626 5920
29901626 5921 ctx->table_id = table_id;
abe529af 5922
54a9cbc9 5923 /* Look up a flow with 'in_port' as the input port. */
abe529af 5924 ctx->flow.in_port = in_port;
55599423
JR
5925 rule = rule_dpif_lookup__(ctx->ofproto, &ctx->flow, table_id);
5926
5927 tag_the_flow(ctx, rule);
54a9cbc9
BP
5928
5929 /* Restore the original input port. Otherwise OFPP_NORMAL and
5930 * OFPP_IN_PORT will have surprising behavior. */
abe529af
BP
5931 ctx->flow.in_port = old_in_port;
5932
55599423 5933 rule = ctx_rule_hooks(ctx, rule, may_packet_in);
1688c479 5934
abe529af 5935 if (rule) {
18b2a258 5936 struct rule_dpif *old_rule = ctx->rule;
54834960 5937
abe529af 5938 ctx->recurse++;
18b2a258 5939 ctx->rule = rule;
f25d0cf3 5940 do_xlate_actions(rule->up.ofpacts, rule->up.ofpacts_len, ctx);
18b2a258 5941 ctx->rule = old_rule;
abe529af
BP
5942 ctx->recurse--;
5943 }
29901626
BP
5944
5945 ctx->table_id = old_table_id;
abe529af
BP
5946 } else {
5947 static struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1);
5948
29901626 5949 VLOG_ERR_RL(&recurse_rl, "resubmit actions recursed over %d times",
abe529af 5950 MAX_RESUBMIT_RECURSION);
6a6455e5 5951 ctx->max_resubmit_trigger = true;
abe529af
BP
5952 }
5953}
5954
29901626 5955static void
f25d0cf3
BP
5956xlate_ofpact_resubmit(struct action_xlate_ctx *ctx,
5957 const struct ofpact_resubmit *resubmit)
29901626
BP
5958{
5959 uint16_t in_port;
5960 uint8_t table_id;
5961
f25d0cf3
BP
5962 in_port = resubmit->in_port;
5963 if (in_port == OFPP_IN_PORT) {
5964 in_port = ctx->flow.in_port;
5965 }
5966
5967 table_id = resubmit->table_id;
5968 if (table_id == 255) {
5969 table_id = ctx->table_id;
5970 }
29901626 5971
1688c479 5972 xlate_table_action(ctx, in_port, table_id, false);
29901626
BP
5973}
5974
abe529af 5975static void
d59906fb 5976flood_packets(struct action_xlate_ctx *ctx, bool all)
abe529af
BP
5977{
5978 struct ofport_dpif *ofport;
5979
b3e9b2ed 5980 HMAP_FOR_EACH (ofport, up.hmap_node, &ctx->ofproto->up.ports) {
abe529af 5981 uint16_t ofp_port = ofport->up.ofp_port;
d59906fb
EJ
5982
5983 if (ofp_port == ctx->flow.in_port) {
5984 continue;
5985 }
5986
5e48dc2b 5987 if (all) {
81b1afb1 5988 compose_output_action__(ctx, ofp_port, false);
9e1fd49b 5989 } else if (!(ofport->up.pp.config & OFPUTIL_PC_NO_FLOOD)) {
5e48dc2b 5990 compose_output_action(ctx, ofp_port);
abe529af
BP
5991 }
5992 }
b3e9b2ed
EJ
5993
5994 ctx->nf_output_iface = NF_OUT_FLOOD;
abe529af
BP
5995}
5996
6ff686f2 5997static void
f0fd1a17 5998execute_controller_action(struct action_xlate_ctx *ctx, int len,
a7349929
BP
5999 enum ofp_packet_in_reason reason,
6000 uint16_t controller_id)
6ff686f2 6001{
999fba59
EJ
6002 struct ofputil_packet_in pin;
6003 struct ofpbuf *packet;
6ff686f2 6004
6a7e895f 6005 ctx->slow |= SLOW_CONTROLLER;
999fba59
EJ
6006 if (!ctx->packet) {
6007 return;
6008 }
6009
6010 packet = ofpbuf_clone(ctx->packet);
6011
6012 if (packet->l2 && packet->l3) {
6013 struct eth_header *eh;
b02475c5 6014 uint16_t mpls_depth;
999fba59
EJ
6015
6016 eth_pop_vlan(packet);
6017 eh = packet->l2;
0104aba8 6018
999fba59
EJ
6019 memcpy(eh->eth_src, ctx->flow.dl_src, sizeof eh->eth_src);
6020 memcpy(eh->eth_dst, ctx->flow.dl_dst, sizeof eh->eth_dst);
6021
6022 if (ctx->flow.vlan_tci & htons(VLAN_CFI)) {
6023 eth_push_vlan(packet, ctx->flow.vlan_tci);
6024 }
6025
b02475c5
SH
6026 mpls_depth = eth_mpls_depth(packet);
6027
6028 if (mpls_depth < ctx->flow.mpls_depth) {
6029 push_mpls(packet, ctx->flow.dl_type, ctx->flow.mpls_lse);
6030 } else if (mpls_depth > ctx->flow.mpls_depth) {
6031 pop_mpls(packet, ctx->flow.dl_type);
6032 } else if (mpls_depth) {
6033 set_mpls_lse(packet, ctx->flow.mpls_lse);
6034 }
6035
999fba59
EJ
6036 if (packet->l4) {
6037 if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
6038 packet_set_ipv4(packet, ctx->flow.nw_src, ctx->flow.nw_dst,
6039 ctx->flow.nw_tos, ctx->flow.nw_ttl);
6040 }
6041
6042 if (packet->l7) {
6043 if (ctx->flow.nw_proto == IPPROTO_TCP) {
6044 packet_set_tcp_port(packet, ctx->flow.tp_src,
6045 ctx->flow.tp_dst);
6046 } else if (ctx->flow.nw_proto == IPPROTO_UDP) {
6047 packet_set_udp_port(packet, ctx->flow.tp_src,
6048 ctx->flow.tp_dst);
6049 }
6050 }
6051 }
6052 }
6053
6054 pin.packet = packet->data;
6055 pin.packet_len = packet->size;
f0fd1a17 6056 pin.reason = reason;
a7349929 6057 pin.controller_id = controller_id;
54834960 6058 pin.table_id = ctx->table_id;
18b2a258 6059 pin.cookie = ctx->rule ? ctx->rule->up.flow_cookie : 0;
54834960 6060
999fba59 6061 pin.send_len = len;
999fba59
EJ
6062 flow_get_metadata(&ctx->flow, &pin.fmd);
6063
d8653c38 6064 connmgr_send_packet_in(ctx->ofproto->up.connmgr, &pin);
999fba59 6065 ofpbuf_delete(packet);
6ff686f2
PS
6066}
6067
b02475c5
SH
6068static void
6069execute_mpls_push_action(struct action_xlate_ctx *ctx, ovs_be16 eth_type)
6070{
6071 ovs_assert(eth_type_mpls(eth_type));
6072
6073 if (ctx->base_flow.mpls_depth) {
6074 ctx->flow.mpls_lse &= ~htonl(MPLS_BOS_MASK);
6075 ctx->flow.mpls_depth++;
6076 } else {
6077 ovs_be32 label;
6078 uint8_t tc, ttl;
6079
6080 if (ctx->flow.dl_type == htons(ETH_TYPE_IPV6)) {
6081 label = htonl(0x2); /* IPV6 Explicit Null. */
6082 } else {
6083 label = htonl(0x0); /* IPV4 Explicit Null. */
6084 }
6085 tc = (ctx->flow.nw_tos & IP_DSCP_MASK) >> 2;
6086 ttl = ctx->flow.nw_ttl ? ctx->flow.nw_ttl : 0x40;
6087 ctx->flow.mpls_lse = set_mpls_lse_values(ttl, tc, 1, label);
b02475c5
SH
6088 ctx->flow.mpls_depth = 1;
6089 }
6090 ctx->flow.dl_type = eth_type;
6091}
6092
6093static void
6094execute_mpls_pop_action(struct action_xlate_ctx *ctx, ovs_be16 eth_type)
6095{
6096 ovs_assert(eth_type_mpls(ctx->flow.dl_type));
6097 ovs_assert(!eth_type_mpls(eth_type));
6098
6099 if (ctx->flow.mpls_depth) {
6100 ctx->flow.mpls_depth--;
6101 ctx->flow.mpls_lse = htonl(0);
6102 if (!ctx->flow.mpls_depth) {
6103 ctx->flow.dl_type = eth_type;
b02475c5
SH
6104 }
6105 }
6106}
6107
f0fd1a17 6108static bool
c2d967a5 6109compose_dec_ttl(struct action_xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
f0fd1a17
PS
6110{
6111 if (ctx->flow.dl_type != htons(ETH_TYPE_IP) &&
6112 ctx->flow.dl_type != htons(ETH_TYPE_IPV6)) {
6113 return false;
6114 }
6115
6116 if (ctx->flow.nw_ttl > 1) {
6117 ctx->flow.nw_ttl--;
6118 return false;
6119 } else {
c2d967a5
MM
6120 size_t i;
6121
6122 for (i = 0; i < ids->n_controllers; i++) {
6123 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
6124 ids->cnt_ids[i]);
6125 }
f0fd1a17
PS
6126
6127 /* Stop processing for current table. */
6128 return true;
6129 }
6130}
6131
0f3f3c3d
SH
6132static bool
6133execute_set_mpls_ttl_action(struct action_xlate_ctx *ctx, uint8_t ttl)
6134{
6135 if (!eth_type_mpls(ctx->flow.dl_type)) {
6136 return true;
6137 }
6138
6139 set_mpls_lse_ttl(&ctx->flow.mpls_lse, ttl);
6140 return false;
6141}
6142
b676167a
SH
6143static bool
6144execute_dec_mpls_ttl_action(struct action_xlate_ctx *ctx)
6145{
6146 uint8_t ttl = mpls_lse_to_ttl(ctx->flow.mpls_lse);
6147
6148 if (!eth_type_mpls(ctx->flow.dl_type)) {
6149 return false;
6150 }
6151
be80bc65 6152 if (ttl > 1) {
b676167a
SH
6153 ttl--;
6154 set_mpls_lse_ttl(&ctx->flow.mpls_lse, ttl);
6155 return false;
6156 } else {
6157 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0);
6158
6159 /* Stop processing for current table. */
6160 return true;
6161 }
6162}
6163
abe529af 6164static void
f25d0cf3 6165xlate_output_action(struct action_xlate_ctx *ctx,
1688c479 6166 uint16_t port, uint16_t max_len, bool may_packet_in)
abe529af
BP
6167{
6168 uint16_t prev_nf_output_iface = ctx->nf_output_iface;
6169
6170 ctx->nf_output_iface = NF_OUT_DROP;
6171
6172 switch (port) {
6173 case OFPP_IN_PORT:
81b1afb1 6174 compose_output_action(ctx, ctx->flow.in_port);
abe529af
BP
6175 break;
6176 case OFPP_TABLE:
1688c479 6177 xlate_table_action(ctx, ctx->flow.in_port, 0, may_packet_in);
abe529af
BP
6178 break;
6179 case OFPP_NORMAL:
6180 xlate_normal(ctx);
6181 break;
6182 case OFPP_FLOOD:
d59906fb 6183 flood_packets(ctx, false);
abe529af
BP
6184 break;
6185 case OFPP_ALL:
d59906fb 6186 flood_packets(ctx, true);
abe529af
BP
6187 break;
6188 case OFPP_CONTROLLER:
a7349929 6189 execute_controller_action(ctx, max_len, OFPR_ACTION, 0);
abe529af 6190 break;
e81d2933
EJ
6191 case OFPP_NONE:
6192 break;
a0fbe94a 6193 case OFPP_LOCAL:
abe529af
BP
6194 default:
6195 if (port != ctx->flow.in_port) {
81b1afb1 6196 compose_output_action(ctx, port);
3dd3eace
BP
6197 } else {
6198 xlate_report(ctx, "skipping output to input port");
abe529af
BP
6199 }
6200 break;
6201 }
6202
6203 if (prev_nf_output_iface == NF_OUT_FLOOD) {
6204 ctx->nf_output_iface = NF_OUT_FLOOD;
6205 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
6206 ctx->nf_output_iface = prev_nf_output_iface;
6207 } else if (prev_nf_output_iface != NF_OUT_DROP &&
6208 ctx->nf_output_iface != NF_OUT_FLOOD) {
6209 ctx->nf_output_iface = NF_OUT_MULTI;
6210 }
6211}
6212
f694937d
EJ
6213static void
6214xlate_output_reg_action(struct action_xlate_ctx *ctx,
f25d0cf3 6215 const struct ofpact_output_reg *or)
f694937d 6216{
f25d0cf3
BP
6217 uint64_t port = mf_get_subfield(&or->src, &ctx->flow);
6218 if (port <= UINT16_MAX) {
1688c479 6219 xlate_output_action(ctx, port, or->max_len, false);
f694937d
EJ
6220 }
6221}
6222
abe529af
BP
6223static void
6224xlate_enqueue_action(struct action_xlate_ctx *ctx,
f25d0cf3 6225 const struct ofpact_enqueue *enqueue)
abe529af 6226{
f25d0cf3
BP
6227 uint16_t ofp_port = enqueue->port;
6228 uint32_t queue_id = enqueue->queue;
abff858b 6229 uint32_t flow_priority, priority;
abe529af
BP
6230 int error;
6231
f25d0cf3 6232 /* Translate queue to priority. */
acf60855
JP
6233 error = dpif_queue_to_priority(ctx->ofproto->backer->dpif,
6234 queue_id, &priority);
abe529af
BP
6235 if (error) {
6236 /* Fall back to ordinary output action. */
1688c479 6237 xlate_output_action(ctx, enqueue->port, 0, false);
abe529af
BP
6238 return;
6239 }
6240
f25d0cf3 6241 /* Check output port. */
abe529af
BP
6242 if (ofp_port == OFPP_IN_PORT) {
6243 ofp_port = ctx->flow.in_port;
8ba855c1
BP
6244 } else if (ofp_port == ctx->flow.in_port) {
6245 return;
abe529af 6246 }
abe529af 6247
df2c07f4 6248 /* Add datapath actions. */
deedf7e7
BP
6249 flow_priority = ctx->flow.skb_priority;
6250 ctx->flow.skb_priority = priority;
81b1afb1 6251 compose_output_action(ctx, ofp_port);
deedf7e7 6252 ctx->flow.skb_priority = flow_priority;
abe529af
BP
6253
6254 /* Update NetFlow output port. */
6255 if (ctx->nf_output_iface == NF_OUT_DROP) {
4b23aebf 6256 ctx->nf_output_iface = ofp_port;
abe529af
BP
6257 } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
6258 ctx->nf_output_iface = NF_OUT_MULTI;
6259 }
6260}
6261
6262static void
f25d0cf3 6263xlate_set_queue_action(struct action_xlate_ctx *ctx, uint32_t queue_id)
abe529af 6264{
f25d0cf3 6265 uint32_t skb_priority;
abe529af 6266
acf60855
JP
6267 if (!dpif_queue_to_priority(ctx->ofproto->backer->dpif,
6268 queue_id, &skb_priority)) {
f25d0cf3
BP
6269 ctx->flow.skb_priority = skb_priority;
6270 } else {
6271 /* Couldn't translate queue to a priority. Nothing to do. A warning
abe529af 6272 * has already been logged. */
abe529af 6273 }
abe529af
BP
6274}
6275
6276struct xlate_reg_state {
6277 ovs_be16 vlan_tci;
6278 ovs_be64 tun_id;
6279};
6280
daff3353
EJ
6281static bool
6282slave_enabled_cb(uint16_t ofp_port, void *ofproto_)
6283{
6284 struct ofproto_dpif *ofproto = ofproto_;
6285 struct ofport_dpif *port;
6286
6287 switch (ofp_port) {
6288 case OFPP_IN_PORT:
6289 case OFPP_TABLE:
6290 case OFPP_NORMAL:
6291 case OFPP_FLOOD:
6292 case OFPP_ALL:
439e4d8c 6293 case OFPP_NONE:
daff3353
EJ
6294 return true;
6295 case OFPP_CONTROLLER: /* Not supported by the bundle action. */
6296 return false;
6297 default:
6298 port = get_ofp_port(ofproto, ofp_port);
6299 return port ? port->may_enable : false;
6300 }
6301}
6302
f25d0cf3
BP
6303static void
6304xlate_bundle_action(struct action_xlate_ctx *ctx,
6305 const struct ofpact_bundle *bundle)
6306{
6307 uint16_t port;
6308
6309 port = bundle_execute(bundle, &ctx->flow, slave_enabled_cb, ctx->ofproto);
6310 if (bundle->dst.field) {
6311 nxm_reg_load(&bundle->dst, port, &ctx->flow);
6312 } else {
1688c479 6313 xlate_output_action(ctx, port, 0, false);
f25d0cf3
BP
6314 }
6315}
6316
75a75043
BP
6317static void
6318xlate_learn_action(struct action_xlate_ctx *ctx,
f25d0cf3 6319 const struct ofpact_learn *learn)
75a75043
BP
6320{
6321 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
6322 struct ofputil_flow_mod fm;
f25d0cf3
BP
6323 uint64_t ofpacts_stub[1024 / 8];
6324 struct ofpbuf ofpacts;
75a75043
BP
6325 int error;
6326
f25d0cf3
BP
6327 ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
6328 learn_execute(learn, &ctx->flow, &fm, &ofpacts);
75a75043
BP
6329
6330 error = ofproto_flow_mod(&ctx->ofproto->up, &fm);
6331 if (error && !VLOG_DROP_WARN(&rl)) {
90bf1e07
BP
6332 VLOG_WARN("learning action failed to modify flow table (%s)",
6333 ofperr_get_name(error));
75a75043
BP
6334 }
6335
f25d0cf3 6336 ofpbuf_uninit(&ofpacts);
75a75043
BP
6337}
6338
0e553d9c
BP
6339/* Reduces '*timeout' to no more than 'max'. A value of zero in either case
6340 * means "infinite". */
6341static void
6342reduce_timeout(uint16_t max, uint16_t *timeout)
6343{
6344 if (max && (!*timeout || *timeout > max)) {
6345 *timeout = max;
6346 }
6347}
6348
6349static void
6350xlate_fin_timeout(struct action_xlate_ctx *ctx,
f25d0cf3 6351 const struct ofpact_fin_timeout *oft)
0e553d9c
BP
6352{
6353 if (ctx->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) {
6354 struct rule_dpif *rule = ctx->rule;
6355
f25d0cf3
BP
6356 reduce_timeout(oft->fin_idle_timeout, &rule->up.idle_timeout);
6357 reduce_timeout(oft->fin_hard_timeout, &rule->up.hard_timeout);
0e553d9c
BP
6358 }
6359}
6360
21f7563c
JP
6361static bool
6362may_receive(const struct ofport_dpif *port, struct action_xlate_ctx *ctx)
6363{
9e1fd49b
BP
6364 if (port->up.pp.config & (eth_addr_equals(ctx->flow.dl_dst, eth_addr_stp)
6365 ? OFPUTIL_PC_NO_RECV_STP
6366 : OFPUTIL_PC_NO_RECV)) {
21f7563c
JP
6367 return false;
6368 }
6369
6370 /* Only drop packets here if both forwarding and learning are
6371 * disabled. If just learning is enabled, we need to have
6372 * OFPP_NORMAL and the learning action have a look at the packet
6373 * before we can drop it. */
6374 if (!stp_forward_in_state(port->stp_state)
6375 && !stp_learn_in_state(port->stp_state)) {
6376 return false;
6377 }
6378
6379 return true;
6380}
6381
4863c249
JP
6382static bool
6383tunnel_ecn_ok(struct action_xlate_ctx *ctx)
6384{
6385 if (is_ip_any(&ctx->base_flow)
29a5df0a
JP
6386 && (ctx->base_flow.tunnel.ip_tos & IP_ECN_MASK) == IP_ECN_CE) {
6387 if ((ctx->base_flow.nw_tos & IP_ECN_MASK) == IP_ECN_NOT_ECT) {
6388 VLOG_WARN_RL(&rl, "dropping tunnel packet marked ECN CE"
6389 " but is not ECN capable");
6390 return false;
6391 } else {
6392 /* Set the ECN CE value in the tunneled packet. */
6393 ctx->flow.nw_tos |= IP_ECN_CE;
6394 }
4863c249
JP
6395 }
6396
6397 return true;
6398}
6399
abe529af 6400static void
f25d0cf3 6401do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
abe529af
BP
6402 struct action_xlate_ctx *ctx)
6403{
254750ce 6404 bool was_evictable = true;
f25d0cf3 6405 const struct ofpact *a;
abe529af 6406
254750ce
BP
6407 if (ctx->rule) {
6408 /* Don't let the rule we're working on get evicted underneath us. */
6409 was_evictable = ctx->rule->up.evictable;
6410 ctx->rule->up.evictable = false;
6411 }
55599423
JR
6412
6413 do_xlate_actions_again:
f25d0cf3
BP
6414 OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
6415 struct ofpact_controller *controller;
4cceacb9 6416 const struct ofpact_metadata *metadata;
38f2e360 6417
848e8809
EJ
6418 if (ctx->exit) {
6419 break;
6420 }
6421
f25d0cf3
BP
6422 switch (a->type) {
6423 case OFPACT_OUTPUT:
6424 xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
1688c479 6425 ofpact_get_OUTPUT(a)->max_len, true);
f25d0cf3
BP
6426 break;
6427
6428 case OFPACT_CONTROLLER:
6429 controller = ofpact_get_CONTROLLER(a);
6430 execute_controller_action(ctx, controller->max_len,
6431 controller->reason,
6432 controller->controller_id);
6433 break;
690a61c5 6434
f25d0cf3
BP
6435 case OFPACT_ENQUEUE:
6436 xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a));
abe529af
BP
6437 break;
6438
f25d0cf3 6439 case OFPACT_SET_VLAN_VID:
abe529af 6440 ctx->flow.vlan_tci &= ~htons(VLAN_VID_MASK);
f25d0cf3
BP
6441 ctx->flow.vlan_tci |= (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid)
6442 | htons(VLAN_CFI));
abe529af
BP
6443 break;
6444
f25d0cf3 6445 case OFPACT_SET_VLAN_PCP:
abe529af 6446 ctx->flow.vlan_tci &= ~htons(VLAN_PCP_MASK);
f25d0cf3
BP
6447 ctx->flow.vlan_tci |= htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp
6448 << VLAN_PCP_SHIFT)
6449 | VLAN_CFI);
abe529af
BP
6450 break;
6451
f25d0cf3 6452 case OFPACT_STRIP_VLAN:
abe529af 6453 ctx->flow.vlan_tci = htons(0);
abe529af
BP
6454 break;
6455
3e34fbdd 6456 case OFPACT_PUSH_VLAN:
5dca28b5 6457 /* XXX 802.1AD(QinQ) */
3e34fbdd
IY
6458 ctx->flow.vlan_tci = htons(VLAN_CFI);
6459 break;
6460
f25d0cf3
BP
6461 case OFPACT_SET_ETH_SRC:
6462 memcpy(ctx->flow.dl_src, ofpact_get_SET_ETH_SRC(a)->mac,
6463 ETH_ADDR_LEN);
abe529af
BP
6464 break;
6465
f25d0cf3
BP
6466 case OFPACT_SET_ETH_DST:
6467 memcpy(ctx->flow.dl_dst, ofpact_get_SET_ETH_DST(a)->mac,
6468 ETH_ADDR_LEN);
abe529af
BP
6469 break;
6470
f25d0cf3 6471 case OFPACT_SET_IPV4_SRC:
1b035ef2
SH
6472 if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
6473 ctx->flow.nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
6474 }
abe529af
BP
6475 break;
6476
f25d0cf3 6477 case OFPACT_SET_IPV4_DST:
1b035ef2
SH
6478 if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
6479 ctx->flow.nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
6480 }
abe529af
BP
6481 break;
6482
f25d0cf3 6483 case OFPACT_SET_IPV4_DSCP:
c4f2731d
PS
6484 /* OpenFlow 1.0 only supports IPv4. */
6485 if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
6486 ctx->flow.nw_tos &= ~IP_DSCP_MASK;
f25d0cf3 6487 ctx->flow.nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp;
c4f2731d 6488 }
abe529af
BP
6489 break;
6490
f25d0cf3 6491 case OFPACT_SET_L4_SRC_PORT:
1b035ef2
SH
6492 if (is_ip_any(&ctx->flow)) {
6493 ctx->flow.tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
6494 }
abe529af
BP
6495 break;
6496
f25d0cf3 6497 case OFPACT_SET_L4_DST_PORT:
1b035ef2
SH
6498 if (is_ip_any(&ctx->flow)) {
6499 ctx->flow.tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
6500 }
abe529af
BP
6501 break;
6502
f25d0cf3
BP
6503 case OFPACT_RESUBMIT:
6504 xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a));
38f2e360
BP
6505 break;
6506
f25d0cf3 6507 case OFPACT_SET_TUNNEL:
296e07ac 6508 ctx->flow.tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
29901626
BP
6509 break;
6510
f25d0cf3
BP
6511 case OFPACT_SET_QUEUE:
6512 xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id);
abe529af
BP
6513 break;
6514
f25d0cf3 6515 case OFPACT_POP_QUEUE:
deedf7e7 6516 ctx->flow.skb_priority = ctx->orig_skb_priority;
38f2e360
BP
6517 break;
6518
f25d0cf3
BP
6519 case OFPACT_REG_MOVE:
6520 nxm_execute_reg_move(ofpact_get_REG_MOVE(a), &ctx->flow);
38f2e360
BP
6521 break;
6522
f25d0cf3
BP
6523 case OFPACT_REG_LOAD:
6524 nxm_execute_reg_load(ofpact_get_REG_LOAD(a), &ctx->flow);
38f2e360
BP
6525 break;
6526
bd85dac1
AZ
6527 case OFPACT_STACK_PUSH:
6528 nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), &ctx->flow,
6529 &ctx->stack);
6530 break;
6531
6532 case OFPACT_STACK_POP:
6533 nxm_execute_stack_pop(ofpact_get_STACK_POP(a), &ctx->flow,
6534 &ctx->stack);
6535 break;
6536
b02475c5
SH
6537 case OFPACT_PUSH_MPLS:
6538 execute_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a)->ethertype);
6539 break;
6540
6541 case OFPACT_POP_MPLS:
6542 execute_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
6543 break;
6544
0f3f3c3d
SH
6545 case OFPACT_SET_MPLS_TTL:
6546 if (execute_set_mpls_ttl_action(ctx, ofpact_get_SET_MPLS_TTL(a)->ttl)) {
6547 goto out;
6548 }
6549 break;
6550
b676167a
SH
6551 case OFPACT_DEC_MPLS_TTL:
6552 if (execute_dec_mpls_ttl_action(ctx)) {
6553 goto out;
6554 }
6555 break;
6556
f25d0cf3 6557 case OFPACT_DEC_TTL:
c2d967a5 6558 if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
f25d0cf3
BP
6559 goto out;
6560 }
38f2e360
BP
6561 break;
6562
f25d0cf3
BP
6563 case OFPACT_NOTE:
6564 /* Nothing to do. */
abe529af
BP
6565 break;
6566
f25d0cf3
BP
6567 case OFPACT_MULTIPATH:
6568 multipath_execute(ofpact_get_MULTIPATH(a), &ctx->flow);
abe529af 6569 break;
daff3353 6570
f25d0cf3 6571 case OFPACT_BUNDLE:
a368bb53 6572 ctx->ofproto->has_bundle_action = true;
f25d0cf3 6573 xlate_bundle_action(ctx, ofpact_get_BUNDLE(a));
a368bb53 6574 break;
f694937d 6575
f25d0cf3
BP
6576 case OFPACT_OUTPUT_REG:
6577 xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a));
f694937d 6578 break;
75a75043 6579
f25d0cf3 6580 case OFPACT_LEARN:
75a75043 6581 ctx->has_learn = true;
3de9590b 6582 if (ctx->may_learn) {
f25d0cf3 6583 xlate_learn_action(ctx, ofpact_get_LEARN(a));
75a75043
BP
6584 }
6585 break;
848e8809 6586
f25d0cf3 6587 case OFPACT_EXIT:
848e8809
EJ
6588 ctx->exit = true;
6589 break;
0e553d9c 6590
f25d0cf3 6591 case OFPACT_FIN_TIMEOUT:
0e553d9c 6592 ctx->has_fin_timeout = true;
f25d0cf3 6593 xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
a7349929 6594 break;
8dd54666 6595
b19e8793 6596 case OFPACT_CLEAR_ACTIONS:
5dca28b5 6597 /* XXX
b19e8793
IY
6598 * Nothing to do because writa-actions is not supported for now.
6599 * When writa-actions is supported, clear-actions also must
6600 * be supported at the same time.
6601 */
6602 break;
6603
4cceacb9
JS
6604 case OFPACT_WRITE_METADATA:
6605 metadata = ofpact_get_WRITE_METADATA(a);
6606 ctx->flow.metadata &= ~metadata->mask;
6607 ctx->flow.metadata |= metadata->metadata & metadata->mask;
6608 break;
6609
8dd54666 6610 case OFPACT_GOTO_TABLE: {
55599423 6611 /* It is assumed that goto-table is the last action. */
8dd54666 6612 struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
55599423
JR
6613 struct rule_dpif *rule;
6614
cb22974d 6615 ovs_assert(ctx->table_id < ogt->table_id);
55599423
JR
6616
6617 ctx->table_id = ogt->table_id;
6618
6619 /* Look up a flow from the new table. */
6620 rule = rule_dpif_lookup__(ctx->ofproto, &ctx->flow, ctx->table_id);
6621
6622 tag_the_flow(ctx, rule);
6623
6624 rule = ctx_rule_hooks(ctx, rule, true);
6625
6626 if (rule) {
6627 if (ctx->rule) {
6628 ctx->rule->up.evictable = was_evictable;
6629 }
6630 ctx->rule = rule;
6631 was_evictable = rule->up.evictable;
6632 rule->up.evictable = false;
6633
6634 /* Tail recursion removal. */
6635 ofpacts = rule->up.ofpacts;
6636 ofpacts_len = rule->up.ofpacts_len;
6637 goto do_xlate_actions_again;
6638 }
8dd54666
IY
6639 break;
6640 }
abe529af
BP
6641 }
6642 }
21f7563c 6643
f0fd1a17 6644out:
254750ce
BP
6645 if (ctx->rule) {
6646 ctx->rule->up.evictable = was_evictable;
6647 }
abe529af
BP
6648}
6649
6650static void
6651action_xlate_ctx_init(struct action_xlate_ctx *ctx,
6652 struct ofproto_dpif *ofproto, const struct flow *flow,
14f94f9a
JP
6653 const struct initial_vals *initial_vals,
6654 struct rule_dpif *rule,
0e553d9c 6655 uint8_t tcp_flags, const struct ofpbuf *packet)
abe529af 6656{
ef506a7c
JG
6657 ovs_be64 initial_tun_id = flow->tunnel.tun_id;
6658
6659 /* Flow initialization rules:
6660 * - 'base_flow' must match the kernel's view of the packet at the
6661 * time that action processing starts. 'flow' represents any
6662 * transformations we wish to make through actions.
6663 * - By default 'base_flow' and 'flow' are the same since the input
6664 * packet matches the output before any actions are applied.
6665 * - When using VLAN splinters, 'base_flow''s VLAN is set to the value
6666 * of the received packet as seen by the kernel. If we later output
6667 * to another device without any modifications this will cause us to
6668 * insert a new tag since the original one was stripped off by the
6669 * VLAN device.
6670 * - Tunnel 'flow' is largely cleared when transitioning between
6671 * the input and output stages since it does not make sense to output
6672 * a packet with the exact headers that it was received with (i.e.
6673 * the destination IP is us). The one exception is the tun_id, which
6674 * is preserved to allow use in later resubmit lookups and loads into
6675 * registers.
6676 * - Tunnel 'base_flow' is completely cleared since that is what the
6677 * kernel does. If we wish to maintain the original values an action
6678 * needs to be generated. */
6679
abe529af
BP
6680 ctx->ofproto = ofproto;
6681 ctx->flow = *flow;
47d4a9db 6682 memset(&ctx->flow.tunnel, 0, sizeof ctx->flow.tunnel);
e84173dc 6683 ctx->base_flow = ctx->flow;
14f94f9a 6684 ctx->base_flow.vlan_tci = initial_vals->vlan_tci;
c3f6c502 6685 ctx->base_flow.tunnel.ip_tos = initial_vals->tunnel_ip_tos;
ef506a7c 6686 ctx->flow.tunnel.tun_id = initial_tun_id;
18b2a258 6687 ctx->rule = rule;
abe529af 6688 ctx->packet = packet;
3de9590b 6689 ctx->may_learn = packet != NULL;
0e553d9c 6690 ctx->tcp_flags = tcp_flags;
abe529af 6691 ctx->resubmit_hook = NULL;
479df176 6692 ctx->report_hook = NULL;
112bc5f4 6693 ctx->resubmit_stats = NULL;
abe529af
BP
6694}
6695
f25d0cf3
BP
6696/* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
6697 * into datapath actions in 'odp_actions', using 'ctx'. */
050ac423 6698static void
abe529af 6699xlate_actions(struct action_xlate_ctx *ctx,
f25d0cf3 6700 const struct ofpact *ofpacts, size_t ofpacts_len,
050ac423 6701 struct ofpbuf *odp_actions)
abe529af 6702{
43d50bc8
BP
6703 /* Normally false. Set to true if we ever hit MAX_RESUBMIT_RECURSION, so
6704 * that in the future we always keep a copy of the original flow for
6705 * tracing purposes. */
6706 static bool hit_resubmit_limit;
6707
6a7e895f 6708 enum slow_path_reason special;
ffaef958 6709 struct ofport_dpif *in_port;
9ba85077 6710 struct flow orig_flow;
6a7e895f 6711
abe529af
BP
6712 COVERAGE_INC(ofproto_dpif_xlate);
6713
050ac423
BP
6714 ofpbuf_clear(odp_actions);
6715 ofpbuf_reserve(odp_actions, NL_A_U32_SIZE);
6716
6717 ctx->odp_actions = odp_actions;
97e42c92 6718 ctx->tags = 0;
6a7e895f 6719 ctx->slow = 0;
97e42c92
BP
6720 ctx->has_learn = false;
6721 ctx->has_normal = false;
0e553d9c 6722 ctx->has_fin_timeout = false;
97e42c92 6723 ctx->nf_output_iface = NF_OUT_DROP;
9d24de3b 6724 ctx->mirrors = 0;
97e42c92 6725 ctx->recurse = 0;
6a6455e5 6726 ctx->max_resubmit_trigger = false;
deedf7e7 6727 ctx->orig_skb_priority = ctx->flow.skb_priority;
97e42c92 6728 ctx->table_id = 0;
848e8809 6729 ctx->exit = false;
7257b535 6730
bd85dac1
AZ
6731 ofpbuf_use_stub(&ctx->stack, ctx->init_stack, sizeof ctx->init_stack);
6732
43d50bc8 6733 if (ctx->ofproto->has_mirrors || hit_resubmit_limit) {
ccb7c863 6734 /* Do this conditionally because the copy is expensive enough that it
9ba85077
BP
6735 * shows up in profiles. */
6736 orig_flow = ctx->flow;
ccb7c863
BP
6737 }
6738
eadef313 6739 if (ctx->flow.nw_frag & FLOW_NW_FRAG_ANY) {
7257b535
BP
6740 switch (ctx->ofproto->up.frag_handling) {
6741 case OFPC_FRAG_NORMAL:
6742 /* We must pretend that transport ports are unavailable. */
97e42c92
BP
6743 ctx->flow.tp_src = ctx->base_flow.tp_src = htons(0);
6744 ctx->flow.tp_dst = ctx->base_flow.tp_dst = htons(0);
7257b535
BP
6745 break;
6746
6747 case OFPC_FRAG_DROP:
050ac423 6748 return;
7257b535
BP
6749
6750 case OFPC_FRAG_REASM:
6751 NOT_REACHED();
6752
6753 case OFPC_FRAG_NX_MATCH:
6754 /* Nothing to do. */
6755 break;
f0fd1a17
PS
6756
6757 case OFPC_INVALID_TTL_TO_CONTROLLER:
6758 NOT_REACHED();
7257b535
BP
6759 }
6760 }
6761
ffaef958
BP
6762 in_port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
6763 special = process_special(ctx->ofproto, &ctx->flow, in_port, ctx->packet);
6a7e895f
BP
6764 if (special) {
6765 ctx->slow |= special;
abe529af 6766 } else {
6a6455e5 6767 static struct vlog_rate_limit trace_rl = VLOG_RATE_LIMIT_INIT(1, 1);
14f94f9a 6768 struct initial_vals initial_vals;
ee382d89 6769 uint32_t local_odp_port;
6a6455e5 6770
14f94f9a 6771 initial_vals.vlan_tci = ctx->base_flow.vlan_tci;
c3f6c502 6772 initial_vals.tunnel_ip_tos = ctx->base_flow.tunnel.ip_tos;
14f94f9a 6773
6ff686f2 6774 add_sflow_action(ctx);
ffaef958 6775
4863c249 6776 if (tunnel_ecn_ok(ctx) && (!in_port || may_receive(in_port, ctx))) {
ffaef958
BP
6777 do_xlate_actions(ofpacts, ofpacts_len, ctx);
6778
6779 /* We've let OFPP_NORMAL and the learning action look at the
6780 * packet, so drop it now if forwarding is disabled. */
6781 if (in_port && !stp_forward_in_state(in_port->stp_state)) {
6782 ofpbuf_clear(ctx->odp_actions);
6783 add_sflow_action(ctx);
6784 }
6785 }
abe529af 6786
43d50bc8
BP
6787 if (ctx->max_resubmit_trigger && !ctx->resubmit_hook) {
6788 if (!hit_resubmit_limit) {
6789 /* We didn't record the original flow. Make sure we do from
6790 * now on. */
6791 hit_resubmit_limit = true;
6792 } else if (!VLOG_DROP_ERR(&trace_rl)) {
6793 struct ds ds = DS_EMPTY_INITIALIZER;
6794
9ba85077 6795 ofproto_trace(ctx->ofproto, &orig_flow, ctx->packet,
14f94f9a 6796 &initial_vals, &ds);
43d50bc8
BP
6797 VLOG_ERR("Trace triggered by excessive resubmit "
6798 "recursion:\n%s", ds_cstr(&ds));
6799 ds_destroy(&ds);
6800 }
6a6455e5
EJ
6801 }
6802
ee382d89 6803 local_odp_port = ofp_port_to_odp_port(ctx->ofproto, OFPP_LOCAL);
b6848f13 6804 if (!connmgr_may_set_up_flow(ctx->ofproto->up.connmgr, &ctx->flow,
ee382d89 6805 local_odp_port,
b6848f13
BP
6806 ctx->odp_actions->data,
6807 ctx->odp_actions->size)) {
6a7e895f 6808 ctx->slow |= SLOW_IN_BAND;
b6848f13
BP
6809 if (ctx->packet
6810 && connmgr_msg_in_hook(ctx->ofproto->up.connmgr, &ctx->flow,
6811 ctx->packet)) {
5e48dc2b 6812 compose_output_action(ctx, OFPP_LOCAL);
b6848f13
BP
6813 }
6814 }
ccb7c863 6815 if (ctx->ofproto->has_mirrors) {
9ba85077 6816 add_mirror_actions(ctx, &orig_flow);
ccb7c863 6817 }
a7c4eaf6 6818 fix_sflow_action(ctx);
abe529af 6819 }
bd85dac1
AZ
6820
6821 ofpbuf_uninit(&ctx->stack);
050ac423
BP
6822}
6823
f25d0cf3
BP
6824/* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
6825 * into datapath actions, using 'ctx', and discards the datapath actions. */
050ac423
BP
6826static void
6827xlate_actions_for_side_effects(struct action_xlate_ctx *ctx,
f25d0cf3
BP
6828 const struct ofpact *ofpacts,
6829 size_t ofpacts_len)
050ac423
BP
6830{
6831 uint64_t odp_actions_stub[1024 / 8];
6832 struct ofpbuf odp_actions;
abe529af 6833
050ac423 6834 ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
f25d0cf3 6835 xlate_actions(ctx, ofpacts, ofpacts_len, &odp_actions);
050ac423 6836 ofpbuf_uninit(&odp_actions);
abe529af 6837}
479df176
BP
6838
6839static void
6840xlate_report(struct action_xlate_ctx *ctx, const char *s)
6841{
6842 if (ctx->report_hook) {
6843 ctx->report_hook(ctx, s);
6844 }
6845}
abe529af
BP
6846\f
6847/* OFPP_NORMAL implementation. */
6848
abe529af
BP
6849static struct ofport_dpif *ofbundle_get_a_port(const struct ofbundle *);
6850
ecac4ebf
BP
6851/* Given 'vid', the VID obtained from the 802.1Q header that was received as
6852 * part of a packet (specify 0 if there was no 802.1Q header), and 'in_bundle',
6853 * the bundle on which the packet was received, returns the VLAN to which the
6854 * packet belongs.
6855 *
6856 * Both 'vid' and the return value are in the range 0...4095. */
6857static uint16_t
6858input_vid_to_vlan(const struct ofbundle *in_bundle, uint16_t vid)
6859{
6860 switch (in_bundle->vlan_mode) {
6861 case PORT_VLAN_ACCESS:
6862 return in_bundle->vlan;
6863 break;
6864
6865 case PORT_VLAN_TRUNK:
6866 return vid;
6867
6868 case PORT_VLAN_NATIVE_UNTAGGED:
6869 case PORT_VLAN_NATIVE_TAGGED:
6870 return vid ? vid : in_bundle->vlan;
6871
6872 default:
6873 NOT_REACHED();
6874 }
6875}
6876
5da5ec37
BP
6877/* Checks whether a packet with the given 'vid' may ingress on 'in_bundle'.
6878 * If so, returns true. Otherwise, returns false and, if 'warn' is true, logs
6879 * a warning.
6880 *
6881 * 'vid' should be the VID obtained from the 802.1Q header that was received as
6882 * part of a packet (specify 0 if there was no 802.1Q header), in the range
6883 * 0...4095. */
6884static bool
6885input_vid_is_valid(uint16_t vid, struct ofbundle *in_bundle, bool warn)
6886{
33158a18
JP
6887 /* Allow any VID on the OFPP_NONE port. */
6888 if (in_bundle == &ofpp_none_bundle) {
6889 return true;
6890 }
6891
5da5ec37
BP
6892 switch (in_bundle->vlan_mode) {
6893 case PORT_VLAN_ACCESS:
6894 if (vid) {
6895 if (warn) {
6896 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6897 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %"PRIu16" tagged "
6898 "packet received on port %s configured as VLAN "
6899 "%"PRIu16" access port",
6900 in_bundle->ofproto->up.name, vid,
6901 in_bundle->name, in_bundle->vlan);
6902 }
6903 return false;
6904 }
6905 return true;
6906
6907 case PORT_VLAN_NATIVE_UNTAGGED:
6908 case PORT_VLAN_NATIVE_TAGGED:
6909 if (!vid) {
6910 /* Port must always carry its native VLAN. */
6911 return true;
6912 }
6913 /* Fall through. */
6914 case PORT_VLAN_TRUNK:
6915 if (!ofbundle_includes_vlan(in_bundle, vid)) {
6916 if (warn) {
6917 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
6918 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %"PRIu16" packet "
6919 "received on port %s not configured for trunking "
6920 "VLAN %"PRIu16,
6921 in_bundle->ofproto->up.name, vid,
6922 in_bundle->name, vid);
6923 }
6924 return false;
6925 }
6926 return true;
6927
6928 default:
6929 NOT_REACHED();
6930 }
6931
6932}
6933
ecac4ebf
BP
6934/* Given 'vlan', the VLAN that a packet belongs to, and
6935 * 'out_bundle', a bundle on which the packet is to be output, returns the VID
6936 * that should be included in the 802.1Q header. (If the return value is 0,
6937 * then the 802.1Q header should only be included in the packet if there is a
6938 * nonzero PCP.)
6939 *
6940 * Both 'vlan' and the return value are in the range 0...4095. */
6941static uint16_t
6942output_vlan_to_vid(const struct ofbundle *out_bundle, uint16_t vlan)
6943{
6944 switch (out_bundle->vlan_mode) {
6945 case PORT_VLAN_ACCESS:
6946 return 0;
6947
6948 case PORT_VLAN_TRUNK:
6949 case PORT_VLAN_NATIVE_TAGGED:
6950 return vlan;
6951
6952 case PORT_VLAN_NATIVE_UNTAGGED:
6953 return vlan == out_bundle->vlan ? 0 : vlan;
6954
6955 default:
6956 NOT_REACHED();
6957 }
6958}
6959
395e68ce
BP
6960static void
6961output_normal(struct action_xlate_ctx *ctx, const struct ofbundle *out_bundle,
6962 uint16_t vlan)
abe529af 6963{
395e68ce
BP
6964 struct ofport_dpif *port;
6965 uint16_t vid;
81b1afb1 6966 ovs_be16 tci, old_tci;
ecac4ebf 6967
395e68ce
BP
6968 vid = output_vlan_to_vid(out_bundle, vlan);
6969 if (!out_bundle->bond) {
6970 port = ofbundle_get_a_port(out_bundle);
6971 } else {
6972 port = bond_choose_output_slave(out_bundle->bond, &ctx->flow,
6973 vid, &ctx->tags);
6974 if (!port) {
6975 /* No slaves enabled, so drop packet. */
6976 return;
6977 }
6978 }
abe529af 6979
81b1afb1 6980 old_tci = ctx->flow.vlan_tci;
5e9ceccd
BP
6981 tci = htons(vid);
6982 if (tci || out_bundle->use_priority_tags) {
6983 tci |= ctx->flow.vlan_tci & htons(VLAN_PCP_MASK);
6984 if (tci) {
6985 tci |= htons(VLAN_CFI);
6986 }
395e68ce 6987 }
81b1afb1 6988 ctx->flow.vlan_tci = tci;
395e68ce 6989
5e48dc2b 6990 compose_output_action(ctx, port->up.ofp_port);
81b1afb1 6991 ctx->flow.vlan_tci = old_tci;
abe529af
BP
6992}
6993
6994static int
6995mirror_mask_ffs(mirror_mask_t mask)
6996{
6997 BUILD_ASSERT_DECL(sizeof(unsigned int) >= sizeof(mask));
6998 return ffs(mask);
6999}
7000
abe529af
BP
7001static bool
7002ofbundle_trunks_vlan(const struct ofbundle *bundle, uint16_t vlan)
7003{
ecac4ebf 7004 return (bundle->vlan_mode != PORT_VLAN_ACCESS
fc3d7408 7005 && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
abe529af
BP
7006}
7007
7008static bool
7009ofbundle_includes_vlan(const struct ofbundle *bundle, uint16_t vlan)
7010{
7011 return vlan == bundle->vlan || ofbundle_trunks_vlan(bundle, vlan);
7012}
7013
7014/* Returns an arbitrary interface within 'bundle'. */
7015static struct ofport_dpif *
7016ofbundle_get_a_port(const struct ofbundle *bundle)
7017{
7018 return CONTAINER_OF(list_front(&bundle->ports),
7019 struct ofport_dpif, bundle_node);
7020}
7021
abe529af
BP
7022static bool
7023vlan_is_mirrored(const struct ofmirror *m, int vlan)
7024{
fc3d7408 7025 return !m->vlans || bitmap_is_set(m->vlans, vlan);
abe529af
BP
7026}
7027
7028static void
c06bba01 7029add_mirror_actions(struct action_xlate_ctx *ctx, const struct flow *orig_flow)
abe529af
BP
7030{
7031 struct ofproto_dpif *ofproto = ctx->ofproto;
7032 mirror_mask_t mirrors;
c06bba01
JP
7033 struct ofbundle *in_bundle;
7034 uint16_t vlan;
7035 uint16_t vid;
7036 const struct nlattr *a;
7037 size_t left;
7038
3581c12c 7039 in_bundle = lookup_input_bundle(ctx->ofproto, orig_flow->in_port,
70c2fd56 7040 ctx->packet != NULL, NULL);
3581c12c 7041 if (!in_bundle) {
c06bba01
JP
7042 return;
7043 }
c06bba01
JP
7044 mirrors = in_bundle->src_mirrors;
7045
7046 /* Drop frames on bundles reserved for mirroring. */
7047 if (in_bundle->mirror_out) {
7048 if (ctx->packet != NULL) {
7049 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
7050 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
7051 "%s, which is reserved exclusively for mirroring",
7052 ctx->ofproto->up.name, in_bundle->name);
7053 }
7054 return;
7055 }
7056
7057 /* Check VLAN. */
7058 vid = vlan_tci_to_vid(orig_flow->vlan_tci);
7059 if (!input_vid_is_valid(vid, in_bundle, ctx->packet != NULL)) {
7060 return;
7061 }
7062 vlan = input_vid_to_vlan(in_bundle, vid);
7063
7064 /* Look at the output ports to check for destination selections. */
7065
7066 NL_ATTR_FOR_EACH (a, left, ctx->odp_actions->data,
7067 ctx->odp_actions->size) {
7068 enum ovs_action_attr type = nl_attr_type(a);
7069 struct ofport_dpif *ofport;
7070
7071 if (type != OVS_ACTION_ATTR_OUTPUT) {
7072 continue;
7073 }
7074
7075 ofport = get_odp_port(ofproto, nl_attr_get_u32(a));
521472bc
BP
7076 if (ofport && ofport->bundle) {
7077 mirrors |= ofport->bundle->dst_mirrors;
7078 }
c06bba01 7079 }
abe529af
BP
7080
7081 if (!mirrors) {
7082 return;
7083 }
7084
c06bba01
JP
7085 /* Restore the original packet before adding the mirror actions. */
7086 ctx->flow = *orig_flow;
7087
9ba15e2a
BP
7088 while (mirrors) {
7089 struct ofmirror *m;
9ba15e2a
BP
7090
7091 m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1];
7092
7093 if (!vlan_is_mirrored(m, vlan)) {
8472a3ce 7094 mirrors = zero_rightmost_1bit(mirrors);
9ba15e2a
BP
7095 continue;
7096 }
7097
7098 mirrors &= ~m->dup_mirrors;
9d24de3b 7099 ctx->mirrors |= m->dup_mirrors;
9ba15e2a 7100 if (m->out) {
395e68ce 7101 output_normal(ctx, m->out, vlan);
614ec445
EJ
7102 } else if (vlan != m->out_vlan
7103 && !eth_addr_is_reserved(orig_flow->dl_dst)) {
9ba15e2a
BP
7104 struct ofbundle *bundle;
7105
7106 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
7107 if (ofbundle_includes_vlan(bundle, m->out_vlan)
395e68ce
BP
7108 && !bundle->mirror_out) {
7109 output_normal(ctx, bundle, m->out_vlan);
abe529af
BP
7110 }
7111 }
7112 }
abe529af
BP
7113 }
7114}
7115
9d24de3b
JP
7116static void
7117update_mirror_stats(struct ofproto_dpif *ofproto, mirror_mask_t mirrors,
7118 uint64_t packets, uint64_t bytes)
7119{
7120 if (!mirrors) {
7121 return;
7122 }
7123
8472a3ce 7124 for (; mirrors; mirrors = zero_rightmost_1bit(mirrors)) {
9d24de3b
JP
7125 struct ofmirror *m;
7126
7127 m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1];
7128
7129 if (!m) {
7130 /* In normal circumstances 'm' will not be NULL. However,
7131 * if mirrors are reconfigured, we can temporarily get out
7132 * of sync in facet_revalidate(). We could "correct" the
7133 * mirror list before reaching here, but doing that would
7134 * not properly account the traffic stats we've currently
7135 * accumulated for previous mirror configuration. */
7136 continue;
7137 }
7138
7139 m->packet_count += packets;
7140 m->byte_count += bytes;
7141 }
7142}
7143
abe529af
BP
7144/* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
7145 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
7146 * indicate this; newer upstream kernels use gratuitous ARP requests. */
7147static bool
7148is_gratuitous_arp(const struct flow *flow)
7149{
7150 return (flow->dl_type == htons(ETH_TYPE_ARP)
7151 && eth_addr_is_broadcast(flow->dl_dst)
7152 && (flow->nw_proto == ARP_OP_REPLY
7153 || (flow->nw_proto == ARP_OP_REQUEST
7154 && flow->nw_src == flow->nw_dst)));
7155}
7156
7157static void
7158update_learning_table(struct ofproto_dpif *ofproto,
7159 const struct flow *flow, int vlan,
7160 struct ofbundle *in_bundle)
7161{
7162 struct mac_entry *mac;
7163
33158a18
JP
7164 /* Don't learn the OFPP_NONE port. */
7165 if (in_bundle == &ofpp_none_bundle) {
7166 return;
7167 }
7168
abe529af
BP
7169 if (!mac_learning_may_learn(ofproto->ml, flow->dl_src, vlan)) {
7170 return;
7171 }
7172
7173 mac = mac_learning_insert(ofproto->ml, flow->dl_src, vlan);
7174 if (is_gratuitous_arp(flow)) {
7175 /* We don't want to learn from gratuitous ARP packets that are
7176 * reflected back over bond slaves so we lock the learning table. */
7177 if (!in_bundle->bond) {
7178 mac_entry_set_grat_arp_lock(mac);
7179 } else if (mac_entry_is_grat_arp_locked(mac)) {
7180 return;
7181 }
7182 }
7183
7184 if (mac_entry_is_new(mac) || mac->port.p != in_bundle) {
7185 /* The log messages here could actually be useful in debugging,
7186 * so keep the rate limit relatively high. */
7187 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
7188 VLOG_DBG_RL(&rl, "bridge %s: learned that "ETH_ADDR_FMT" is "
7189 "on port %s in VLAN %d",
7190 ofproto->up.name, ETH_ADDR_ARGS(flow->dl_src),
7191 in_bundle->name, vlan);
7192
7193 mac->port.p = in_bundle;
2cc3c58e 7194 tag_set_add(&ofproto->backer->revalidate_set,
abe529af
BP
7195 mac_learning_changed(ofproto->ml, mac));
7196 }
7197}
7198
3581c12c 7199static struct ofbundle *
4acbc98d
SH
7200lookup_input_bundle(const struct ofproto_dpif *ofproto, uint16_t in_port,
7201 bool warn, struct ofport_dpif **in_ofportp)
395e68ce
BP
7202{
7203 struct ofport_dpif *ofport;
7204
7205 /* Find the port and bundle for the received packet. */
7206 ofport = get_ofp_port(ofproto, in_port);
70c2fd56
BP
7207 if (in_ofportp) {
7208 *in_ofportp = ofport;
7209 }
395e68ce 7210 if (ofport && ofport->bundle) {
3581c12c 7211 return ofport->bundle;
395e68ce
BP
7212 }
7213
70c2fd56
BP
7214 /* Special-case OFPP_NONE, which a controller may use as the ingress
7215 * port for traffic that it is sourcing. */
7216 if (in_port == OFPP_NONE) {
7217 return &ofpp_none_bundle;
7218 }
7219
395e68ce
BP
7220 /* Odd. A few possible reasons here:
7221 *
7222 * - We deleted a port but there are still a few packets queued up
7223 * from it.
7224 *
7225 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
7226 * we don't know about.
7227 *
7228 * - The ofproto client didn't configure the port as part of a bundle.
6b803ddc
EJ
7229 * This is particularly likely to happen if a packet was received on the
7230 * port after it was created, but before the client had a chance to
7231 * configure its bundle.
395e68ce
BP
7232 */
7233 if (warn) {
7234 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
7235
7236 VLOG_WARN_RL(&rl, "bridge %s: received packet on unknown "
7237 "port %"PRIu16, ofproto->up.name, in_port);
7238 }
7239 return NULL;
7240}
7241
5da5ec37 7242/* Determines whether packets in 'flow' within 'ofproto' should be forwarded or
abe529af
BP
7243 * dropped. Returns true if they may be forwarded, false if they should be
7244 * dropped.
7245 *
395e68ce
BP
7246 * 'in_port' must be the ofport_dpif that corresponds to flow->in_port.
7247 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
abe529af 7248 *
395e68ce
BP
7249 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
7250 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
7251 * checked by input_vid_is_valid().
abe529af
BP
7252 *
7253 * May also add tags to '*tags', although the current implementation only does
7254 * so in one special case.
7255 */
7256static bool
479df176
BP
7257is_admissible(struct action_xlate_ctx *ctx, struct ofport_dpif *in_port,
7258 uint16_t vlan)
abe529af 7259{
479df176
BP
7260 struct ofproto_dpif *ofproto = ctx->ofproto;
7261 struct flow *flow = &ctx->flow;
395e68ce 7262 struct ofbundle *in_bundle = in_port->bundle;
abe529af 7263
395e68ce
BP
7264 /* Drop frames for reserved multicast addresses
7265 * only if forward_bpdu option is absent. */
614ec445 7266 if (!ofproto->up.forward_bpdu && eth_addr_is_reserved(flow->dl_dst)) {
479df176 7267 xlate_report(ctx, "packet has reserved destination MAC, dropping");
abe529af
BP
7268 return false;
7269 }
7270
abe529af
BP
7271 if (in_bundle->bond) {
7272 struct mac_entry *mac;
7273
7274 switch (bond_check_admissibility(in_bundle->bond, in_port,
479df176 7275 flow->dl_dst, &ctx->tags)) {
abe529af
BP
7276 case BV_ACCEPT:
7277 break;
7278
7279 case BV_DROP:
479df176 7280 xlate_report(ctx, "bonding refused admissibility, dropping");
abe529af
BP
7281 return false;
7282
7283 case BV_DROP_IF_MOVED:
7284 mac = mac_learning_lookup(ofproto->ml, flow->dl_src, vlan, NULL);
7285 if (mac && mac->port.p != in_bundle &&
7286 (!is_gratuitous_arp(flow)
7287 || mac_entry_is_grat_arp_locked(mac))) {
479df176
BP
7288 xlate_report(ctx, "SLB bond thinks this packet looped back, "
7289 "dropping");
abe529af
BP
7290 return false;
7291 }
7292 break;
7293 }
7294 }
7295
7296 return true;
7297}
7298
4cd78906 7299static void
abe529af
BP
7300xlate_normal(struct action_xlate_ctx *ctx)
7301{
395e68ce 7302 struct ofport_dpif *in_port;
abe529af 7303 struct ofbundle *in_bundle;
abe529af 7304 struct mac_entry *mac;
395e68ce
BP
7305 uint16_t vlan;
7306 uint16_t vid;
abe529af 7307
75a75043
BP
7308 ctx->has_normal = true;
7309
3581c12c 7310 in_bundle = lookup_input_bundle(ctx->ofproto, ctx->flow.in_port,
70c2fd56 7311 ctx->packet != NULL, &in_port);
3581c12c 7312 if (!in_bundle) {
479df176 7313 xlate_report(ctx, "no input bundle, dropping");
395e68ce
BP
7314 return;
7315 }
3581c12c 7316
395e68ce
BP
7317 /* Drop malformed frames. */
7318 if (ctx->flow.dl_type == htons(ETH_TYPE_VLAN) &&
7319 !(ctx->flow.vlan_tci & htons(VLAN_CFI))) {
7320 if (ctx->packet != NULL) {
7321 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
7322 VLOG_WARN_RL(&rl, "bridge %s: dropping packet with partial "
7323 "VLAN tag received on port %s",
7324 ctx->ofproto->up.name, in_bundle->name);
7325 }
479df176 7326 xlate_report(ctx, "partial VLAN tag, dropping");
395e68ce
BP
7327 return;
7328 }
7329
7330 /* Drop frames on bundles reserved for mirroring. */
7331 if (in_bundle->mirror_out) {
7332 if (ctx->packet != NULL) {
7333 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
7334 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
7335 "%s, which is reserved exclusively for mirroring",
7336 ctx->ofproto->up.name, in_bundle->name);
7337 }
479df176 7338 xlate_report(ctx, "input port is mirror output port, dropping");
395e68ce
BP
7339 return;
7340 }
7341
7342 /* Check VLAN. */
7343 vid = vlan_tci_to_vid(ctx->flow.vlan_tci);
7344 if (!input_vid_is_valid(vid, in_bundle, ctx->packet != NULL)) {
479df176 7345 xlate_report(ctx, "disallowed VLAN VID for this input port, dropping");
395e68ce
BP
7346 return;
7347 }
7348 vlan = input_vid_to_vlan(in_bundle, vid);
7349
7350 /* Check other admissibility requirements. */
479df176 7351 if (in_port && !is_admissible(ctx, in_port, vlan)) {
395e68ce 7352 return;
abe529af
BP
7353 }
7354
75a75043 7355 /* Learn source MAC. */
3de9590b 7356 if (ctx->may_learn) {
abe529af
BP
7357 update_learning_table(ctx->ofproto, &ctx->flow, vlan, in_bundle);
7358 }
7359
7360 /* Determine output bundle. */
7361 mac = mac_learning_lookup(ctx->ofproto->ml, ctx->flow.dl_dst, vlan,
7362 &ctx->tags);
7363 if (mac) {
c06bba01 7364 if (mac->port.p != in_bundle) {
479df176 7365 xlate_report(ctx, "forwarding to learned port");
c06bba01 7366 output_normal(ctx, mac->port.p, vlan);
479df176
BP
7367 } else {
7368 xlate_report(ctx, "learned port is input port, dropping");
c06bba01 7369 }
abe529af 7370 } else {
c06bba01 7371 struct ofbundle *bundle;
abe529af 7372
479df176 7373 xlate_report(ctx, "no learned MAC for destination, flooding");
c06bba01
JP
7374 HMAP_FOR_EACH (bundle, hmap_node, &ctx->ofproto->bundles) {
7375 if (bundle != in_bundle
7376 && ofbundle_includes_vlan(bundle, vlan)
7377 && bundle->floodable
7378 && !bundle->mirror_out) {
7379 output_normal(ctx, bundle, vlan);
7380 }
7381 }
7382 ctx->nf_output_iface = NF_OUT_FLOOD;
abe529af 7383 }
abe529af
BP
7384}
7385\f
54a9cbc9
BP
7386/* Optimized flow revalidation.
7387 *
7388 * It's a difficult problem, in general, to tell which facets need to have
7389 * their actions recalculated whenever the OpenFlow flow table changes. We
7390 * don't try to solve that general problem: for most kinds of OpenFlow flow
7391 * table changes, we recalculate the actions for every facet. This is
7392 * relatively expensive, but it's good enough if the OpenFlow flow table
7393 * doesn't change very often.
7394 *
7395 * However, we can expect one particular kind of OpenFlow flow table change to
7396 * happen frequently: changes caused by MAC learning. To avoid wasting a lot
7397 * of CPU on revalidating every facet whenever MAC learning modifies the flow
7398 * table, we add a special case that applies to flow tables in which every rule
7399 * has the same form (that is, the same wildcards), except that the table is
7400 * also allowed to have a single "catch-all" flow that matches all packets. We
7401 * optimize this case by tagging all of the facets that resubmit into the table
7402 * and invalidating the same tag whenever a flow changes in that table. The
7403 * end result is that we revalidate just the facets that need it (and sometimes
7404 * a few more, but not all of the facets or even all of the facets that
7405 * resubmit to the table modified by MAC learning). */
7406
5cb7a798 7407/* Calculates the tag to use for 'flow' and mask 'mask' when it is inserted
54a9cbc9 7408 * into an OpenFlow table with the given 'basis'. */
822d9414 7409static tag_type
5cb7a798 7410rule_calculate_tag(const struct flow *flow, const struct minimask *mask,
54a9cbc9
BP
7411 uint32_t secret)
7412{
5cb7a798 7413 if (minimask_is_catchall(mask)) {
54a9cbc9
BP
7414 return 0;
7415 } else {
5cb7a798
BP
7416 uint32_t hash = flow_hash_in_minimask(flow, mask, secret);
7417 return tag_create_deterministic(hash);
54a9cbc9
BP
7418 }
7419}
7420
7421/* Following a change to OpenFlow table 'table_id' in 'ofproto', update the
7422 * taggability of that table.
7423 *
7424 * This function must be called after *each* change to a flow table. If you
7425 * skip calling it on some changes then the pointer comparisons at the end can
7426 * be invalid if you get unlucky. For example, if a flow removal causes a
7427 * cls_table to be destroyed and then a flow insertion causes a cls_table with
7428 * different wildcards to be created with the same address, then this function
7429 * will incorrectly skip revalidation. */
7430static void
7431table_update_taggable(struct ofproto_dpif *ofproto, uint8_t table_id)
7432{
7433 struct table_dpif *table = &ofproto->tables[table_id];
d0918789 7434 const struct oftable *oftable = &ofproto->up.tables[table_id];
54a9cbc9
BP
7435 struct cls_table *catchall, *other;
7436 struct cls_table *t;
7437
7438 catchall = other = NULL;
7439
d0918789 7440 switch (hmap_count(&oftable->cls.tables)) {
54a9cbc9
BP
7441 case 0:
7442 /* We could tag this OpenFlow table but it would make the logic a
7443 * little harder and it's a corner case that doesn't seem worth it
7444 * yet. */
7445 break;
7446
7447 case 1:
7448 case 2:
d0918789 7449 HMAP_FOR_EACH (t, hmap_node, &oftable->cls.tables) {
54a9cbc9
BP
7450 if (cls_table_is_catchall(t)) {
7451 catchall = t;
7452 } else if (!other) {
7453 other = t;
7454 } else {
7455 /* Indicate that we can't tag this by setting both tables to
7456 * NULL. (We know that 'catchall' is already NULL.) */
7457 other = NULL;
7458 }
7459 }
7460 break;
7461
7462 default:
7463 /* Can't tag this table. */
7464 break;
7465 }
7466
7467 if (table->catchall_table != catchall || table->other_table != other) {
7468 table->catchall_table = catchall;
7469 table->other_table = other;
2cc3c58e 7470 ofproto->backer->need_revalidate = REV_FLOW_TABLE;
54a9cbc9
BP
7471 }
7472}
7473
7474/* Given 'rule' that has changed in some way (either it is a rule being
7475 * inserted, a rule being deleted, or a rule whose actions are being
7476 * modified), marks facets for revalidation to ensure that packets will be
7477 * forwarded correctly according to the new state of the flow table.
7478 *
7479 * This function must be called after *each* change to a flow table. See
7480 * the comment on table_update_taggable() for more information. */
7481static void
7482rule_invalidate(const struct rule_dpif *rule)
7483{
7484 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
7485
7486 table_update_taggable(ofproto, rule->up.table_id);
7487
2cc3c58e 7488 if (!ofproto->backer->need_revalidate) {
54a9cbc9
BP
7489 struct table_dpif *table = &ofproto->tables[rule->up.table_id];
7490
7491 if (table->other_table && rule->tag) {
2cc3c58e 7492 tag_set_add(&ofproto->backer->revalidate_set, rule->tag);
54a9cbc9 7493 } else {
2cc3c58e 7494 ofproto->backer->need_revalidate = REV_FLOW_TABLE;
54a9cbc9
BP
7495 }
7496 }
7497}
7498\f
abe529af 7499static bool
7257b535
BP
7500set_frag_handling(struct ofproto *ofproto_,
7501 enum ofp_config_flags frag_handling)
abe529af
BP
7502{
7503 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
7257b535 7504 if (frag_handling != OFPC_FRAG_REASM) {
2cc3c58e 7505 ofproto->backer->need_revalidate = REV_RECONFIGURE;
7257b535
BP
7506 return true;
7507 } else {
7508 return false;
7509 }
abe529af
BP
7510}
7511
90bf1e07 7512static enum ofperr
abe529af
BP
7513packet_out(struct ofproto *ofproto_, struct ofpbuf *packet,
7514 const struct flow *flow,
f25d0cf3 7515 const struct ofpact *ofpacts, size_t ofpacts_len)
abe529af
BP
7516{
7517 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
14f94f9a 7518 struct initial_vals initial_vals;
548de4dd
BP
7519 struct odputil_keybuf keybuf;
7520 struct dpif_flow_stats stats;
abe529af 7521
548de4dd 7522 struct ofpbuf key;
112bc5f4 7523
548de4dd
BP
7524 struct action_xlate_ctx ctx;
7525 uint64_t odp_actions_stub[1024 / 8];
7526 struct ofpbuf odp_actions;
80e5eed9 7527
548de4dd 7528 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
e1b1d06a
JP
7529 odp_flow_key_from_flow(&key, flow,
7530 ofp_port_to_odp_port(ofproto, flow->in_port));
050ac423 7531
548de4dd 7532 dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
abe529af 7533
14f94f9a 7534 initial_vals.vlan_tci = flow->vlan_tci;
c3f6c502 7535 initial_vals.tunnel_ip_tos = 0;
14f94f9a 7536 action_xlate_ctx_init(&ctx, ofproto, flow, &initial_vals, NULL,
548de4dd
BP
7537 packet_get_tcp_flags(packet, flow), packet);
7538 ctx.resubmit_stats = &stats;
2284188b 7539
548de4dd
BP
7540 ofpbuf_use_stub(&odp_actions,
7541 odp_actions_stub, sizeof odp_actions_stub);
7542 xlate_actions(&ctx, ofpacts, ofpacts_len, &odp_actions);
acf60855 7543 dpif_execute(ofproto->backer->dpif, key.data, key.size,
548de4dd
BP
7544 odp_actions.data, odp_actions.size, packet);
7545 ofpbuf_uninit(&odp_actions);
2284188b 7546
548de4dd 7547 return 0;
abe529af 7548}
6fca1ffb
BP
7549\f
7550/* NetFlow. */
7551
7552static int
7553set_netflow(struct ofproto *ofproto_,
7554 const struct netflow_options *netflow_options)
7555{
7556 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
7557
7558 if (netflow_options) {
7559 if (!ofproto->netflow) {
7560 ofproto->netflow = netflow_create();
7561 }
7562 return netflow_set_options(ofproto->netflow, netflow_options);
7563 } else {
7564 netflow_destroy(ofproto->netflow);
7565 ofproto->netflow = NULL;
7566 return 0;
7567 }
7568}
abe529af
BP
7569
7570static void
7571get_netflow_ids(const struct ofproto *ofproto_,
7572 uint8_t *engine_type, uint8_t *engine_id)
7573{
7574 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
7575
acf60855 7576 dpif_get_netflow_ids(ofproto->backer->dpif, engine_type, engine_id);
abe529af 7577}
6fca1ffb
BP
7578
7579static void
7580send_active_timeout(struct ofproto_dpif *ofproto, struct facet *facet)
7581{
7582 if (!facet_is_controller_flow(facet) &&
7583 netflow_active_timeout_expired(ofproto->netflow, &facet->nf_flow)) {
b0f7b9b5 7584 struct subfacet *subfacet;
6fca1ffb
BP
7585 struct ofexpired expired;
7586
b0f7b9b5 7587 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
6a7e895f 7588 if (subfacet->path == SF_FAST_PATH) {
b0f7b9b5 7589 struct dpif_flow_stats stats;
6fca1ffb 7590
6a7e895f 7591 subfacet_reinstall(subfacet, &stats);
15baa734 7592 subfacet_update_stats(subfacet, &stats);
b0f7b9b5 7593 }
6fca1ffb
BP
7594 }
7595
7596 expired.flow = facet->flow;
7597 expired.packet_count = facet->packet_count;
7598 expired.byte_count = facet->byte_count;
7599 expired.used = facet->used;
7600 netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
7601 }
7602}
7603
7604static void
7605send_netflow_active_timeouts(struct ofproto_dpif *ofproto)
7606{
7607 struct facet *facet;
7608
7609 HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
7610 send_active_timeout(ofproto, facet);
7611 }
7612}
abe529af
BP
7613\f
7614static struct ofproto_dpif *
7615ofproto_dpif_lookup(const char *name)
7616{
b44a10b7
BP
7617 struct ofproto_dpif *ofproto;
7618
7619 HMAP_FOR_EACH_WITH_HASH (ofproto, all_ofproto_dpifs_node,
7620 hash_string(name, 0), &all_ofproto_dpifs) {
7621 if (!strcmp(ofproto->up.name, name)) {
7622 return ofproto;
7623 }
7624 }
7625 return NULL;
abe529af
BP
7626}
7627
f0a3aa2e 7628static void
96e466a3 7629ofproto_unixctl_fdb_flush(struct unixctl_conn *conn, int argc,
0e15264f 7630 const char *argv[], void *aux OVS_UNUSED)
f0a3aa2e 7631{
490df1ef 7632 struct ofproto_dpif *ofproto;
f0a3aa2e 7633
96e466a3
EJ
7634 if (argc > 1) {
7635 ofproto = ofproto_dpif_lookup(argv[1]);
7636 if (!ofproto) {
bde9f75d 7637 unixctl_command_reply_error(conn, "no such bridge");
96e466a3
EJ
7638 return;
7639 }
2cc3c58e 7640 mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
96e466a3
EJ
7641 } else {
7642 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
2cc3c58e 7643 mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
96e466a3 7644 }
f0a3aa2e 7645 }
f0a3aa2e 7646
bde9f75d 7647 unixctl_command_reply(conn, "table successfully flushed");
f0a3aa2e
AA
7648}
7649
abe529af 7650static void
0e15264f
BP
7651ofproto_unixctl_fdb_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
7652 const char *argv[], void *aux OVS_UNUSED)
abe529af
BP
7653{
7654 struct ds ds = DS_EMPTY_INITIALIZER;
7655 const struct ofproto_dpif *ofproto;
7656 const struct mac_entry *e;
7657
0e15264f 7658 ofproto = ofproto_dpif_lookup(argv[1]);
abe529af 7659 if (!ofproto) {
bde9f75d 7660 unixctl_command_reply_error(conn, "no such bridge");
abe529af
BP
7661 return;
7662 }
7663
7664 ds_put_cstr(&ds, " port VLAN MAC Age\n");
7665 LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
7666 struct ofbundle *bundle = e->port.p;
7667 ds_put_format(&ds, "%5d %4d "ETH_ADDR_FMT" %3d\n",
7668 ofbundle_get_a_port(bundle)->odp_port,
e764773c
BP
7669 e->vlan, ETH_ADDR_ARGS(e->mac),
7670 mac_entry_age(ofproto->ml, e));
abe529af 7671 }
bde9f75d 7672 unixctl_command_reply(conn, ds_cstr(&ds));
abe529af
BP
7673 ds_destroy(&ds);
7674}
7675
6a6455e5 7676struct trace_ctx {
abe529af
BP
7677 struct action_xlate_ctx ctx;
7678 struct flow flow;
7679 struct ds *result;
7680};
7681
7682static void
29901626
BP
7683trace_format_rule(struct ds *result, uint8_t table_id, int level,
7684 const struct rule_dpif *rule)
abe529af
BP
7685{
7686 ds_put_char_multiple(result, '\t', level);
7687 if (!rule) {
7688 ds_put_cstr(result, "No match\n");
7689 return;
7690 }
7691
29901626
BP
7692 ds_put_format(result, "Rule: table=%"PRIu8" cookie=%#"PRIx64" ",
7693 table_id, ntohll(rule->up.flow_cookie));
79feb7df 7694 cls_rule_format(&rule->up.cr, result);
abe529af
BP
7695 ds_put_char(result, '\n');
7696
7697 ds_put_char_multiple(result, '\t', level);
7698 ds_put_cstr(result, "OpenFlow ");
f25d0cf3 7699 ofpacts_format(rule->up.ofpacts, rule->up.ofpacts_len, result);
abe529af
BP
7700 ds_put_char(result, '\n');
7701}
7702
7703static void
7704trace_format_flow(struct ds *result, int level, const char *title,
6a6455e5 7705 struct trace_ctx *trace)
abe529af
BP
7706{
7707 ds_put_char_multiple(result, '\t', level);
7708 ds_put_format(result, "%s: ", title);
7709 if (flow_equal(&trace->ctx.flow, &trace->flow)) {
7710 ds_put_cstr(result, "unchanged");
7711 } else {
7712 flow_format(result, &trace->ctx.flow);
7713 trace->flow = trace->ctx.flow;
7714 }
7715 ds_put_char(result, '\n');
7716}
7717
eb9e1c26
EJ
7718static void
7719trace_format_regs(struct ds *result, int level, const char *title,
6a6455e5 7720 struct trace_ctx *trace)
eb9e1c26
EJ
7721{
7722 size_t i;
7723
7724 ds_put_char_multiple(result, '\t', level);
7725 ds_put_format(result, "%s:", title);
7726 for (i = 0; i < FLOW_N_REGS; i++) {
7727 ds_put_format(result, " reg%zu=0x%"PRIx32, i, trace->flow.regs[i]);
7728 }
7729 ds_put_char(result, '\n');
7730}
7731
1ed8d352
EJ
7732static void
7733trace_format_odp(struct ds *result, int level, const char *title,
6a6455e5 7734 struct trace_ctx *trace)
1ed8d352
EJ
7735{
7736 struct ofpbuf *odp_actions = trace->ctx.odp_actions;
7737
7738 ds_put_char_multiple(result, '\t', level);
7739 ds_put_format(result, "%s: ", title);
7740 format_odp_actions(result, odp_actions->data, odp_actions->size);
7741 ds_put_char(result, '\n');
7742}
7743
abe529af
BP
7744static void
7745trace_resubmit(struct action_xlate_ctx *ctx, struct rule_dpif *rule)
7746{
6a6455e5 7747 struct trace_ctx *trace = CONTAINER_OF(ctx, struct trace_ctx, ctx);
abe529af
BP
7748 struct ds *result = trace->result;
7749
7750 ds_put_char(result, '\n');
7751 trace_format_flow(result, ctx->recurse + 1, "Resubmitted flow", trace);
eb9e1c26 7752 trace_format_regs(result, ctx->recurse + 1, "Resubmitted regs", trace);
1ed8d352 7753 trace_format_odp(result, ctx->recurse + 1, "Resubmitted odp", trace);
29901626 7754 trace_format_rule(result, ctx->table_id, ctx->recurse + 1, rule);
abe529af
BP
7755}
7756
479df176
BP
7757static void
7758trace_report(struct action_xlate_ctx *ctx, const char *s)
7759{
7760 struct trace_ctx *trace = CONTAINER_OF(ctx, struct trace_ctx, ctx);
7761 struct ds *result = trace->result;
7762
7763 ds_put_char_multiple(result, '\t', ctx->recurse);
7764 ds_put_cstr(result, s);
7765 ds_put_char(result, '\n');
7766}
7767
abe529af 7768static void
0e15264f 7769ofproto_unixctl_trace(struct unixctl_conn *conn, int argc, const char *argv[],
abe529af
BP
7770 void *aux OVS_UNUSED)
7771{
0e15264f 7772 const char *dpname = argv[1];
abe529af 7773 struct ofproto_dpif *ofproto;
876b0e1c
BP
7774 struct ofpbuf odp_key;
7775 struct ofpbuf *packet;
14f94f9a 7776 struct initial_vals initial_vals;
abe529af
BP
7777 struct ds result;
7778 struct flow flow;
abe529af
BP
7779 char *s;
7780
876b0e1c
BP
7781 packet = NULL;
7782 ofpbuf_init(&odp_key, 0);
abe529af
BP
7783 ds_init(&result);
7784
e84173dc
BP
7785 ofproto = ofproto_dpif_lookup(dpname);
7786 if (!ofproto) {
bde9f75d
EJ
7787 unixctl_command_reply_error(conn, "Unknown ofproto (use ofproto/list "
7788 "for help)");
e84173dc
BP
7789 goto exit;
7790 }
0e15264f 7791 if (argc == 3 || (argc == 4 && !strcmp(argv[3], "-generate"))) {
8b3b8dd1 7792 /* ofproto/trace dpname flow [-generate] */
0e15264f
BP
7793 const char *flow_s = argv[2];
7794 const char *generate_s = argv[3];
876b0e1c 7795
31a19d69
BP
7796 /* Allow 'flow_s' to be either a datapath flow or an OpenFlow-like
7797 * flow. We guess which type it is based on whether 'flow_s' contains
7798 * an '(', since a datapath flow always contains '(') but an
7799 * OpenFlow-like flow should not (in fact it's allowed but I believe
7800 * that's not documented anywhere).
7801 *
7802 * An alternative would be to try to parse 'flow_s' both ways, but then
7803 * it would be tricky giving a sensible error message. After all, do
7804 * you just say "syntax error" or do you present both error messages?
7805 * Both choices seem lousy. */
7806 if (strchr(flow_s, '(')) {
7807 int error;
7808
7809 /* Convert string to datapath key. */
7810 ofpbuf_init(&odp_key, 0);
7811 error = odp_flow_key_from_string(flow_s, NULL, &odp_key);
7812 if (error) {
7813 unixctl_command_reply_error(conn, "Bad flow syntax");
7814 goto exit;
7815 }
876b0e1c 7816
6d199116
BP
7817 /* The user might have specified the wrong ofproto but within the
7818 * same backer. That's OK, ofproto_receive() can find the right
7819 * one for us. */
e09ee259 7820 if (ofproto_receive(ofproto->backer, NULL, odp_key.data,
6d199116 7821 odp_key.size, &flow, NULL, &ofproto, NULL,
14f94f9a 7822 &initial_vals)) {
31a19d69
BP
7823 unixctl_command_reply_error(conn, "Invalid flow");
7824 goto exit;
7825 }
6d199116 7826 ds_put_format(&result, "Bridge: %s\n", ofproto->up.name);
31a19d69
BP
7827 } else {
7828 char *error_s;
7829
7830 error_s = parse_ofp_exact_flow(&flow, argv[2]);
7831 if (error_s) {
7832 unixctl_command_reply_error(conn, error_s);
7833 free(error_s);
7834 goto exit;
7835 }
7836
14f94f9a 7837 initial_vals.vlan_tci = flow.vlan_tci;
c3f6c502 7838 initial_vals.tunnel_ip_tos = flow.tunnel.ip_tos;
876b0e1c 7839 }
8b3b8dd1
BP
7840
7841 /* Generate a packet, if requested. */
0e15264f 7842 if (generate_s) {
8b3b8dd1
BP
7843 packet = ofpbuf_new(0);
7844 flow_compose(packet, &flow);
7845 }
72e8bf28
AA
7846 } else if (argc == 7) {
7847 /* ofproto/trace dpname priority tun_id in_port mark packet */
0e15264f
BP
7848 const char *priority_s = argv[2];
7849 const char *tun_id_s = argv[3];
7850 const char *in_port_s = argv[4];
72e8bf28
AA
7851 const char *mark_s = argv[5];
7852 const char *packet_s = argv[6];
9b56fe13 7853 uint32_t in_port = atoi(in_port_s);
0e15264f
BP
7854 ovs_be64 tun_id = htonll(strtoull(tun_id_s, NULL, 0));
7855 uint32_t priority = atoi(priority_s);
72e8bf28 7856 uint32_t mark = atoi(mark_s);
e22f1753 7857 const char *msg;
0e15264f 7858
e22f1753
BP
7859 msg = eth_from_hex(packet_s, &packet);
7860 if (msg) {
bde9f75d 7861 unixctl_command_reply_error(conn, msg);
876b0e1c
BP
7862 goto exit;
7863 }
7864
7865 ds_put_cstr(&result, "Packet: ");
c499c75d 7866 s = ofp_packet_to_string(packet->data, packet->size);
876b0e1c
BP
7867 ds_put_cstr(&result, s);
7868 free(s);
7869
72e8bf28 7870 flow_extract(packet, priority, mark, NULL, in_port, &flow);
296e07ac 7871 flow.tunnel.tun_id = tun_id;
14f94f9a 7872 initial_vals.vlan_tci = flow.vlan_tci;
c3f6c502 7873 initial_vals.tunnel_ip_tos = flow.tunnel.ip_tos;
876b0e1c 7874 } else {
bde9f75d 7875 unixctl_command_reply_error(conn, "Bad command syntax");
abe529af
BP
7876 goto exit;
7877 }
7878
14f94f9a 7879 ofproto_trace(ofproto, &flow, packet, &initial_vals, &result);
6a6455e5
EJ
7880 unixctl_command_reply(conn, ds_cstr(&result));
7881
7882exit:
7883 ds_destroy(&result);
7884 ofpbuf_delete(packet);
7885 ofpbuf_uninit(&odp_key);
7886}
7887
7888static void
7889ofproto_trace(struct ofproto_dpif *ofproto, const struct flow *flow,
14f94f9a
JP
7890 const struct ofpbuf *packet,
7891 const struct initial_vals *initial_vals, struct ds *ds)
6a6455e5
EJ
7892{
7893 struct rule_dpif *rule;
7894
7895 ds_put_cstr(ds, "Flow: ");
7896 flow_format(ds, flow);
7897 ds_put_char(ds, '\n');
abe529af 7898
c57b2226
BP
7899 rule = rule_dpif_lookup(ofproto, flow);
7900
6a6455e5 7901 trace_format_rule(ds, 0, 0, rule);
c57b2226
BP
7902 if (rule == ofproto->miss_rule) {
7903 ds_put_cstr(ds, "\nNo match, flow generates \"packet in\"s.\n");
7904 } else if (rule == ofproto->no_packet_in_rule) {
7905 ds_put_cstr(ds, "\nNo match, packets dropped because "
7906 "OFPPC_NO_PACKET_IN is set on in_port.\n");
7907 }
7908
abe529af 7909 if (rule) {
050ac423
BP
7910 uint64_t odp_actions_stub[1024 / 8];
7911 struct ofpbuf odp_actions;
7912
6a6455e5 7913 struct trace_ctx trace;
0e553d9c 7914 uint8_t tcp_flags;
abe529af 7915
6a6455e5
EJ
7916 tcp_flags = packet ? packet_get_tcp_flags(packet, flow) : 0;
7917 trace.result = ds;
7918 trace.flow = *flow;
050ac423
BP
7919 ofpbuf_use_stub(&odp_actions,
7920 odp_actions_stub, sizeof odp_actions_stub);
14f94f9a 7921 action_xlate_ctx_init(&trace.ctx, ofproto, flow, initial_vals,
0e553d9c 7922 rule, tcp_flags, packet);
abe529af 7923 trace.ctx.resubmit_hook = trace_resubmit;
479df176 7924 trace.ctx.report_hook = trace_report;
f25d0cf3 7925 xlate_actions(&trace.ctx, rule->up.ofpacts, rule->up.ofpacts_len,
050ac423 7926 &odp_actions);
abe529af 7927
6a6455e5
EJ
7928 ds_put_char(ds, '\n');
7929 trace_format_flow(ds, 0, "Final flow", &trace);
7930 ds_put_cstr(ds, "Datapath actions: ");
050ac423
BP
7931 format_odp_actions(ds, odp_actions.data, odp_actions.size);
7932 ofpbuf_uninit(&odp_actions);
876b0e1c 7933
6a7e895f
BP
7934 if (trace.ctx.slow) {
7935 enum slow_path_reason slow;
7936
7937 ds_put_cstr(ds, "\nThis flow is handled by the userspace "
7938 "slow path because it:");
7939 for (slow = trace.ctx.slow; slow; ) {
7940 enum slow_path_reason bit = rightmost_1bit(slow);
7941
7942 switch (bit) {
7943 case SLOW_CFM:
7944 ds_put_cstr(ds, "\n\t- Consists of CFM packets.");
7945 break;
7946 case SLOW_LACP:
7947 ds_put_cstr(ds, "\n\t- Consists of LACP packets.");
7948 break;
7949 case SLOW_STP:
7950 ds_put_cstr(ds, "\n\t- Consists of STP packets.");
7951 break;
7952 case SLOW_IN_BAND:
7953 ds_put_cstr(ds, "\n\t- Needs in-band special case "
7954 "processing.");
7955 if (!packet) {
7956 ds_put_cstr(ds, "\n\t (The datapath actions are "
7957 "incomplete--for complete actions, "
7958 "please supply a packet.)");
7959 }
7960 break;
7961 case SLOW_CONTROLLER:
7962 ds_put_cstr(ds, "\n\t- Sends \"packet-in\" messages "
7963 "to the OpenFlow controller.");
7964 break;
7965 case SLOW_MATCH:
7966 ds_put_cstr(ds, "\n\t- Needs more specific matching "
7967 "than the datapath supports.");
7968 break;
7969 }
7970
7971 slow &= ~bit;
7972 }
7973
7974 if (slow & ~SLOW_MATCH) {
7975 ds_put_cstr(ds, "\nThe datapath actions above do not reflect "
7976 "the special slow-path processing.");
876b0e1c
BP
7977 }
7978 }
abe529af 7979 }
abe529af
BP
7980}
7981
7ee20df1 7982static void
0e15264f
BP
7983ofproto_dpif_clog(struct unixctl_conn *conn OVS_UNUSED, int argc OVS_UNUSED,
7984 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
7ee20df1
BP
7985{
7986 clogged = true;
bde9f75d 7987 unixctl_command_reply(conn, NULL);
7ee20df1
BP
7988}
7989
7990static void
0e15264f
BP
7991ofproto_dpif_unclog(struct unixctl_conn *conn OVS_UNUSED, int argc OVS_UNUSED,
7992 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
7ee20df1
BP
7993{
7994 clogged = false;
bde9f75d 7995 unixctl_command_reply(conn, NULL);
7ee20df1
BP
7996}
7997
6814e51f
BP
7998/* Runs a self-check of flow translations in 'ofproto'. Appends a message to
7999 * 'reply' describing the results. */
8000static void
8001ofproto_dpif_self_check__(struct ofproto_dpif *ofproto, struct ds *reply)
8002{
8003 struct facet *facet;
8004 int errors;
8005
8006 errors = 0;
8007 HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
8008 if (!facet_check_consistency(facet)) {
8009 errors++;
8010 }
8011 }
8012 if (errors) {
2cc3c58e 8013 ofproto->backer->need_revalidate = REV_INCONSISTENCY;
6814e51f
BP
8014 }
8015
8016 if (errors) {
8017 ds_put_format(reply, "%s: self-check failed (%d errors)\n",
8018 ofproto->up.name, errors);
8019 } else {
8020 ds_put_format(reply, "%s: self-check passed\n", ofproto->up.name);
8021 }
8022}
8023
8024static void
8025ofproto_dpif_self_check(struct unixctl_conn *conn,
8026 int argc, const char *argv[], void *aux OVS_UNUSED)
8027{
8028 struct ds reply = DS_EMPTY_INITIALIZER;
8029 struct ofproto_dpif *ofproto;
8030
8031 if (argc > 1) {
8032 ofproto = ofproto_dpif_lookup(argv[1]);
8033 if (!ofproto) {
bde9f75d
EJ
8034 unixctl_command_reply_error(conn, "Unknown ofproto (use "
8035 "ofproto/list for help)");
6814e51f
BP
8036 return;
8037 }
8038 ofproto_dpif_self_check__(ofproto, &reply);
8039 } else {
8040 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
8041 ofproto_dpif_self_check__(ofproto, &reply);
8042 }
8043 }
8044
bde9f75d 8045 unixctl_command_reply(conn, ds_cstr(&reply));
6814e51f
BP
8046 ds_destroy(&reply);
8047}
8048
27022416
JP
8049/* Store the current ofprotos in 'ofproto_shash'. Returns a sorted list
8050 * of the 'ofproto_shash' nodes. It is the responsibility of the caller
8051 * to destroy 'ofproto_shash' and free the returned value. */
8052static const struct shash_node **
8053get_ofprotos(struct shash *ofproto_shash)
8054{
8055 const struct ofproto_dpif *ofproto;
8056
8057 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
8058 char *name = xasprintf("%s@%s", ofproto->up.type, ofproto->up.name);
8059 shash_add_nocopy(ofproto_shash, name, ofproto);
8060 }
8061
8062 return shash_sort(ofproto_shash);
8063}
8064
8065static void
8066ofproto_unixctl_dpif_dump_dps(struct unixctl_conn *conn, int argc OVS_UNUSED,
8067 const char *argv[] OVS_UNUSED,
8068 void *aux OVS_UNUSED)
8069{
8070 struct ds ds = DS_EMPTY_INITIALIZER;
8071 struct shash ofproto_shash;
8072 const struct shash_node **sorted_ofprotos;
8073 int i;
8074
8075 shash_init(&ofproto_shash);
8076 sorted_ofprotos = get_ofprotos(&ofproto_shash);
8077 for (i = 0; i < shash_count(&ofproto_shash); i++) {
8078 const struct shash_node *node = sorted_ofprotos[i];
8079 ds_put_format(&ds, "%s\n", node->name);
8080 }
8081
8082 shash_destroy(&ofproto_shash);
8083 free(sorted_ofprotos);
8084
8085 unixctl_command_reply(conn, ds_cstr(&ds));
8086 ds_destroy(&ds);
8087}
8088
8089static void
8090show_dp_format(const struct ofproto_dpif *ofproto, struct ds *ds)
8091{
27022416
JP
8092 const struct shash_node **ports;
8093 int i;
8094
acf60855
JP
8095 ds_put_format(ds, "%s (%s):\n", ofproto->up.name,
8096 dpif_name(ofproto->backer->dpif));
27022416 8097 ds_put_format(ds,
735d7efb
AZ
8098 "\tlookups: hit:%"PRIu64" missed:%"PRIu64"\n",
8099 ofproto->n_hit, ofproto->n_missed);
acf60855
JP
8100 ds_put_format(ds, "\tflows: %zu\n",
8101 hmap_count(&ofproto->subfacets));
27022416
JP
8102
8103 ports = shash_sort(&ofproto->up.port_by_name);
8104 for (i = 0; i < shash_count(&ofproto->up.port_by_name); i++) {
8105 const struct shash_node *node = ports[i];
8106 struct ofport *ofport = node->data;
8107 const char *name = netdev_get_name(ofport->netdev);
8108 const char *type = netdev_get_type(ofport->netdev);
0a740f48
EJ
8109 uint32_t odp_port;
8110
8111 ds_put_format(ds, "\t%s %u/", name, ofport->ofp_port);
8112
8113 odp_port = ofp_port_to_odp_port(ofproto, ofport->ofp_port);
8114 if (odp_port != OVSP_NONE) {
8115 ds_put_format(ds, "%"PRIu32":", odp_port);
8116 } else {
8117 ds_put_cstr(ds, "none:");
8118 }
27022416 8119
27022416
JP
8120 if (strcmp(type, "system")) {
8121 struct netdev *netdev;
8122 int error;
8123
8124 ds_put_format(ds, " (%s", type);
8125
8126 error = netdev_open(name, type, &netdev);
8127 if (!error) {
8128 struct smap config;
8129
8130 smap_init(&config);
8131 error = netdev_get_config(netdev, &config);
8132 if (!error) {
8133 const struct smap_node **nodes;
8134 size_t i;
8135
8136 nodes = smap_sort(&config);
8137 for (i = 0; i < smap_count(&config); i++) {
8138 const struct smap_node *node = nodes[i];
8139 ds_put_format(ds, "%c %s=%s", i ? ',' : ':',
8140 node->key, node->value);
8141 }
8142 free(nodes);
8143 }
8144 smap_destroy(&config);
8145
8146 netdev_close(netdev);
8147 }
8148 ds_put_char(ds, ')');
8149 }
8150 ds_put_char(ds, '\n');
8151 }
8152 free(ports);
8153}
8154
8155static void
8156ofproto_unixctl_dpif_show(struct unixctl_conn *conn, int argc,
8157 const char *argv[], void *aux OVS_UNUSED)
8158{
8159 struct ds ds = DS_EMPTY_INITIALIZER;
8160 const struct ofproto_dpif *ofproto;
8161
8162 if (argc > 1) {
8163 int i;
8164 for (i = 1; i < argc; i++) {
8165 ofproto = ofproto_dpif_lookup(argv[i]);
8166 if (!ofproto) {
8167 ds_put_format(&ds, "Unknown bridge %s (use dpif/dump-dps "
8168 "for help)", argv[i]);
8169 unixctl_command_reply_error(conn, ds_cstr(&ds));
8170 return;
8171 }
8172 show_dp_format(ofproto, &ds);
8173 }
8174 } else {
8175 struct shash ofproto_shash;
8176 const struct shash_node **sorted_ofprotos;
8177 int i;
8178
8179 shash_init(&ofproto_shash);
8180 sorted_ofprotos = get_ofprotos(&ofproto_shash);
8181 for (i = 0; i < shash_count(&ofproto_shash); i++) {
8182 const struct shash_node *node = sorted_ofprotos[i];
8183 show_dp_format(node->data, &ds);
8184 }
8185
8186 shash_destroy(&ofproto_shash);
8187 free(sorted_ofprotos);
8188 }
8189
8190 unixctl_command_reply(conn, ds_cstr(&ds));
8191 ds_destroy(&ds);
8192}
8193
8194static void
8195ofproto_unixctl_dpif_dump_flows(struct unixctl_conn *conn,
8196 int argc OVS_UNUSED, const char *argv[],
8197 void *aux OVS_UNUSED)
8198{
8199 struct ds ds = DS_EMPTY_INITIALIZER;
8200 const struct ofproto_dpif *ofproto;
8201 struct subfacet *subfacet;
8202
8203 ofproto = ofproto_dpif_lookup(argv[1]);
8204 if (!ofproto) {
8205 unixctl_command_reply_error(conn, "no such bridge");
8206 return;
8207 }
8208
af37354d
EJ
8209 update_stats(ofproto->backer);
8210
27022416 8211 HMAP_FOR_EACH (subfacet, hmap_node, &ofproto->subfacets) {
9566abf9 8212 odp_flow_key_format(subfacet->key, subfacet->key_len, &ds);
27022416
JP
8213
8214 ds_put_format(&ds, ", packets:%"PRIu64", bytes:%"PRIu64", used:",
8215 subfacet->dp_packet_count, subfacet->dp_byte_count);
8216 if (subfacet->used) {
8217 ds_put_format(&ds, "%.3fs",
8218 (time_msec() - subfacet->used) / 1000.0);
8219 } else {
8220 ds_put_format(&ds, "never");
8221 }
8222 if (subfacet->facet->tcp_flags) {
8223 ds_put_cstr(&ds, ", flags:");
8224 packet_format_tcp_flags(&ds, subfacet->facet->tcp_flags);
8225 }
8226
8227 ds_put_cstr(&ds, ", actions:");
f2245da3
JP
8228 if (subfacet->slow) {
8229 uint64_t slow_path_stub[128 / 8];
8230 const struct nlattr *actions;
8231 size_t actions_len;
8232
8233 compose_slow_path(ofproto, &subfacet->facet->flow, subfacet->slow,
8234 slow_path_stub, sizeof slow_path_stub,
8235 &actions, &actions_len);
8236 format_odp_actions(&ds, actions, actions_len);
8237 } else {
8238 format_odp_actions(&ds, subfacet->actions, subfacet->actions_len);
8239 }
27022416
JP
8240 ds_put_char(&ds, '\n');
8241 }
8242
8243 unixctl_command_reply(conn, ds_cstr(&ds));
8244 ds_destroy(&ds);
8245}
8246
8247static void
8248ofproto_unixctl_dpif_del_flows(struct unixctl_conn *conn,
8249 int argc OVS_UNUSED, const char *argv[],
8250 void *aux OVS_UNUSED)
8251{
8252 struct ds ds = DS_EMPTY_INITIALIZER;
8253 struct ofproto_dpif *ofproto;
8254
8255 ofproto = ofproto_dpif_lookup(argv[1]);
8256 if (!ofproto) {
8257 unixctl_command_reply_error(conn, "no such bridge");
8258 return;
8259 }
8260
8261 flush(&ofproto->up);
8262
8263 unixctl_command_reply(conn, ds_cstr(&ds));
8264 ds_destroy(&ds);
8265}
8266
abe529af
BP
8267static void
8268ofproto_dpif_unixctl_init(void)
8269{
8270 static bool registered;
8271 if (registered) {
8272 return;
8273 }
8274 registered = true;
8275
0e15264f
BP
8276 unixctl_command_register(
8277 "ofproto/trace",
72e8bf28
AA
8278 "bridge {priority tun_id in_port mark packet | odp_flow [-generate]}",
8279 2, 6, ofproto_unixctl_trace, NULL);
96e466a3 8280 unixctl_command_register("fdb/flush", "[bridge]", 0, 1,
0e15264f
BP
8281 ofproto_unixctl_fdb_flush, NULL);
8282 unixctl_command_register("fdb/show", "bridge", 1, 1,
8283 ofproto_unixctl_fdb_show, NULL);
8284 unixctl_command_register("ofproto/clog", "", 0, 0,
8285 ofproto_dpif_clog, NULL);
8286 unixctl_command_register("ofproto/unclog", "", 0, 0,
8287 ofproto_dpif_unclog, NULL);
6814e51f
BP
8288 unixctl_command_register("ofproto/self-check", "[bridge]", 0, 1,
8289 ofproto_dpif_self_check, NULL);
27022416
JP
8290 unixctl_command_register("dpif/dump-dps", "", 0, 0,
8291 ofproto_unixctl_dpif_dump_dps, NULL);
8292 unixctl_command_register("dpif/show", "[bridge]", 0, INT_MAX,
8293 ofproto_unixctl_dpif_show, NULL);
8294 unixctl_command_register("dpif/dump-flows", "bridge", 1, 1,
8295 ofproto_unixctl_dpif_dump_flows, NULL);
8296 unixctl_command_register("dpif/del-flows", "bridge", 1, 1,
8297 ofproto_unixctl_dpif_del_flows, NULL);
abe529af
BP
8298}
8299\f
52a90c29
BP
8300/* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
8301 *
8302 * This is deprecated. It is only for compatibility with broken device drivers
8303 * in old versions of Linux that do not properly support VLANs when VLAN
8304 * devices are not used. When broken device drivers are no longer in
8305 * widespread use, we will delete these interfaces. */
8306
8307static int
8308set_realdev(struct ofport *ofport_, uint16_t realdev_ofp_port, int vid)
8309{
8310 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport_->ofproto);
8311 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
8312
8313 if (realdev_ofp_port == ofport->realdev_ofp_port
8314 && vid == ofport->vlandev_vid) {
8315 return 0;
8316 }
8317
2cc3c58e 8318 ofproto->backer->need_revalidate = REV_RECONFIGURE;
52a90c29
BP
8319
8320 if (ofport->realdev_ofp_port) {
8321 vsp_remove(ofport);
8322 }
8323 if (realdev_ofp_port && ofport->bundle) {
8324 /* vlandevs are enslaved to their realdevs, so they are not allowed to
8325 * themselves be part of a bundle. */
8326 bundle_set(ofport->up.ofproto, ofport->bundle, NULL);
8327 }
8328
8329 ofport->realdev_ofp_port = realdev_ofp_port;
8330 ofport->vlandev_vid = vid;
8331
8332 if (realdev_ofp_port) {
8333 vsp_add(ofport, realdev_ofp_port, vid);
8334 }
8335
8336 return 0;
8337}
8338
8339static uint32_t
8340hash_realdev_vid(uint16_t realdev_ofp_port, int vid)
8341{
8342 return hash_2words(realdev_ofp_port, vid);
8343}
8344
40e05935
BP
8345/* Returns the ODP port number of the Linux VLAN device that corresponds to
8346 * 'vlan_tci' on the network device with port number 'realdev_odp_port' in
8347 * 'ofproto'. For example, given 'realdev_odp_port' of eth0 and 'vlan_tci' 9,
8348 * it would return the port number of eth0.9.
8349 *
8350 * Unless VLAN splinters are enabled for port 'realdev_odp_port', this
8351 * function just returns its 'realdev_odp_port' argument. */
52a90c29
BP
8352static uint32_t
8353vsp_realdev_to_vlandev(const struct ofproto_dpif *ofproto,
8354 uint32_t realdev_odp_port, ovs_be16 vlan_tci)
8355{
8356 if (!hmap_is_empty(&ofproto->realdev_vid_map)) {
e1b1d06a 8357 uint16_t realdev_ofp_port;
52a90c29
BP
8358 int vid = vlan_tci_to_vid(vlan_tci);
8359 const struct vlan_splinter *vsp;
8360
e1b1d06a 8361 realdev_ofp_port = odp_port_to_ofp_port(ofproto, realdev_odp_port);
52a90c29
BP
8362 HMAP_FOR_EACH_WITH_HASH (vsp, realdev_vid_node,
8363 hash_realdev_vid(realdev_ofp_port, vid),
8364 &ofproto->realdev_vid_map) {
8365 if (vsp->realdev_ofp_port == realdev_ofp_port
8366 && vsp->vid == vid) {
e1b1d06a 8367 return ofp_port_to_odp_port(ofproto, vsp->vlandev_ofp_port);
52a90c29
BP
8368 }
8369 }
8370 }
8371 return realdev_odp_port;
8372}
8373
8374static struct vlan_splinter *
8375vlandev_find(const struct ofproto_dpif *ofproto, uint16_t vlandev_ofp_port)
8376{
8377 struct vlan_splinter *vsp;
8378
8379 HMAP_FOR_EACH_WITH_HASH (vsp, vlandev_node, hash_int(vlandev_ofp_port, 0),
8380 &ofproto->vlandev_map) {
8381 if (vsp->vlandev_ofp_port == vlandev_ofp_port) {
8382 return vsp;
8383 }
8384 }
8385
8386 return NULL;
8387}
8388
40e05935
BP
8389/* Returns the OpenFlow port number of the "real" device underlying the Linux
8390 * VLAN device with OpenFlow port number 'vlandev_ofp_port' and stores the
8391 * VLAN VID of the Linux VLAN device in '*vid'. For example, given
8392 * 'vlandev_ofp_port' of eth0.9, it would return the OpenFlow port number of
8393 * eth0 and store 9 in '*vid'.
8394 *
8395 * Returns 0 and does not modify '*vid' if 'vlandev_ofp_port' is not a Linux
8396 * VLAN device. Unless VLAN splinters are enabled, this is what this function
8397 * always does.*/
52a90c29
BP
8398static uint16_t
8399vsp_vlandev_to_realdev(const struct ofproto_dpif *ofproto,
40e05935 8400 uint16_t vlandev_ofp_port, int *vid)
52a90c29
BP
8401{
8402 if (!hmap_is_empty(&ofproto->vlandev_map)) {
8403 const struct vlan_splinter *vsp;
8404
8405 vsp = vlandev_find(ofproto, vlandev_ofp_port);
8406 if (vsp) {
8407 if (vid) {
8408 *vid = vsp->vid;
8409 }
8410 return vsp->realdev_ofp_port;
8411 }
8412 }
8413 return 0;
8414}
8415
b98d8985
BP
8416/* Given 'flow', a flow representing a packet received on 'ofproto', checks
8417 * whether 'flow->in_port' represents a Linux VLAN device. If so, changes
8418 * 'flow->in_port' to the "real" device backing the VLAN device, sets
8419 * 'flow->vlan_tci' to the VLAN VID, and returns true. Otherwise (which is
8420 * always the case unless VLAN splinters are enabled), returns false without
8421 * making any changes. */
8422static bool
8423vsp_adjust_flow(const struct ofproto_dpif *ofproto, struct flow *flow)
8424{
8425 uint16_t realdev;
8426 int vid;
8427
8428 realdev = vsp_vlandev_to_realdev(ofproto, flow->in_port, &vid);
8429 if (!realdev) {
8430 return false;
8431 }
8432
8433 /* Cause the flow to be processed as if it came in on the real device with
8434 * the VLAN device's VLAN ID. */
8435 flow->in_port = realdev;
8436 flow->vlan_tci = htons((vid & VLAN_VID_MASK) | VLAN_CFI);
8437 return true;
8438}
8439
52a90c29
BP
8440static void
8441vsp_remove(struct ofport_dpif *port)
8442{
8443 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
8444 struct vlan_splinter *vsp;
8445
8446 vsp = vlandev_find(ofproto, port->up.ofp_port);
8447 if (vsp) {
8448 hmap_remove(&ofproto->vlandev_map, &vsp->vlandev_node);
8449 hmap_remove(&ofproto->realdev_vid_map, &vsp->realdev_vid_node);
8450 free(vsp);
8451
8452 port->realdev_ofp_port = 0;
8453 } else {
8454 VLOG_ERR("missing vlan device record");
8455 }
8456}
8457
8458static void
8459vsp_add(struct ofport_dpif *port, uint16_t realdev_ofp_port, int vid)
8460{
8461 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
8462
8463 if (!vsp_vlandev_to_realdev(ofproto, port->up.ofp_port, NULL)
8464 && (vsp_realdev_to_vlandev(ofproto, realdev_ofp_port, htons(vid))
8465 == realdev_ofp_port)) {
8466 struct vlan_splinter *vsp;
8467
8468 vsp = xmalloc(sizeof *vsp);
8469 hmap_insert(&ofproto->vlandev_map, &vsp->vlandev_node,
8470 hash_int(port->up.ofp_port, 0));
8471 hmap_insert(&ofproto->realdev_vid_map, &vsp->realdev_vid_node,
8472 hash_realdev_vid(realdev_ofp_port, vid));
8473 vsp->realdev_ofp_port = realdev_ofp_port;
8474 vsp->vlandev_ofp_port = port->up.ofp_port;
8475 vsp->vid = vid;
8476
8477 port->realdev_ofp_port = realdev_ofp_port;
8478 } else {
8479 VLOG_ERR("duplicate vlan device record");
8480 }
8481}
e1b1d06a
JP
8482
8483static uint32_t
8484ofp_port_to_odp_port(const struct ofproto_dpif *ofproto, uint16_t ofp_port)
8485{
8486 const struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port);
8487 return ofport ? ofport->odp_port : OVSP_NONE;
8488}
8489
acf60855
JP
8490static struct ofport_dpif *
8491odp_port_to_ofport(const struct dpif_backer *backer, uint32_t odp_port)
e1b1d06a
JP
8492{
8493 struct ofport_dpif *port;
8494
8495 HMAP_FOR_EACH_IN_BUCKET (port, odp_port_node,
8496 hash_int(odp_port, 0),
acf60855 8497 &backer->odp_to_ofport_map) {
e1b1d06a 8498 if (port->odp_port == odp_port) {
acf60855 8499 return port;
e1b1d06a
JP
8500 }
8501 }
8502
acf60855
JP
8503 return NULL;
8504}
8505
8506static uint16_t
8507odp_port_to_ofp_port(const struct ofproto_dpif *ofproto, uint32_t odp_port)
8508{
8509 struct ofport_dpif *port;
8510
8511 port = odp_port_to_ofport(ofproto->backer, odp_port);
6472ba11 8512 if (port && &ofproto->up == port->up.ofproto) {
acf60855
JP
8513 return port->up.ofp_port;
8514 } else {
8515 return OFPP_NONE;
8516 }
e1b1d06a
JP
8517}
8518
735d7efb
AZ
8519static void
8520dpif_stats_update_hit_count(struct ofproto_dpif *ofproto, uint64_t delta)
8521{
8522 ofproto->n_hit += delta;
8523}
8524
abe529af 8525const struct ofproto_class ofproto_dpif_class = {
b0408fca 8526 init,
abe529af
BP
8527 enumerate_types,
8528 enumerate_names,
8529 del,
0aeaabc8 8530 port_open_type,
acf60855
JP
8531 type_run,
8532 type_run_fast,
8533 type_wait,
abe529af
BP
8534 alloc,
8535 construct,
8536 destruct,
8537 dealloc,
8538 run,
5fcc0d00 8539 run_fast,
abe529af 8540 wait,
0d085684 8541 get_memory_usage,
abe529af 8542 flush,
6c1491fb
BP
8543 get_features,
8544 get_tables,
abe529af
BP
8545 port_alloc,
8546 port_construct,
8547 port_destruct,
8548 port_dealloc,
8549 port_modified,
8550 port_reconfigured,
8551 port_query_by_name,
8552 port_add,
8553 port_del,
6527c598 8554 port_get_stats,
abe529af
BP
8555 port_dump_start,
8556 port_dump_next,
8557 port_dump_done,
8558 port_poll,
8559 port_poll_wait,
8560 port_is_lacp_current,
0ab6decf 8561 NULL, /* rule_choose_table */
abe529af
BP
8562 rule_alloc,
8563 rule_construct,
8564 rule_destruct,
8565 rule_dealloc,
abe529af
BP
8566 rule_get_stats,
8567 rule_execute,
8568 rule_modify_actions,
7257b535 8569 set_frag_handling,
abe529af
BP
8570 packet_out,
8571 set_netflow,
8572 get_netflow_ids,
8573 set_sflow,
8574 set_cfm,
9a9e3786 8575 get_cfm_status,
21f7563c
JP
8576 set_stp,
8577 get_stp_status,
8578 set_stp_port,
8579 get_stp_port_status,
8b36f51e 8580 set_queues,
abe529af
BP
8581 bundle_set,
8582 bundle_remove,
8583 mirror_set,
9d24de3b 8584 mirror_get_stats,
abe529af
BP
8585 set_flood_vlans,
8586 is_mirror_output_bundle,
8402c74b 8587 forward_bpdu_changed,
c4069512 8588 set_mac_table_config,
52a90c29 8589 set_realdev,
abe529af 8590};